зеркало из https://github.com/mozilla/treeherder.git
Bug 1823654 - Mostly use double quotes (#7900)
* Enable quote style verifications from Black * Actually exclude migrations folders
This commit is contained in:
Родитель
20cfe11c5d
Коммит
cfb19a5ef8
|
@ -22,3 +22,4 @@ repos:
|
|||
hooks:
|
||||
- id: black
|
||||
language_version: python3.9
|
||||
exclude: ^treeherder/.*/migrations
|
||||
|
|
|
@ -6,7 +6,7 @@ import warnings
|
|||
|
||||
# Display deprecation warnings, which are hidden by default:
|
||||
# https://docs.python.org/3.7/library/warnings.html#default-warning-filters
|
||||
warnings.simplefilter('default', DeprecationWarning)
|
||||
warnings.simplefilter("default", DeprecationWarning)
|
||||
|
||||
if __name__ == "__main__":
|
||||
os.environ["DJANGO_SETTINGS_MODULE"] = "treeherder.config.settings"
|
||||
|
|
|
@ -25,7 +25,7 @@ def main(args):
|
|||
production_client = TreeherderClient(server_url=HOSTS["production"])
|
||||
|
||||
# Support comma separated projects
|
||||
projects = args.projects.split(',')
|
||||
projects = args.projects.split(",")
|
||||
for _project in projects:
|
||||
logger.info("Comparing {} against production.".format(_project))
|
||||
# Remove properties that are irrelevant for the comparison
|
||||
|
|
|
@ -24,15 +24,7 @@ mdx_truly_sane_lists = { version = "1.3", optional = true }
|
|||
[tool.black]
|
||||
line-length = 100
|
||||
target-version = ['py39']
|
||||
skip-string-normalization = true
|
||||
include = '\.pyi?$'
|
||||
exclude = '''
|
||||
/(
|
||||
treeherder/model/migrations
|
||||
| treeherder/perf/migrations
|
||||
| treeherder/changelog/migrations
|
||||
)/
|
||||
'''
|
||||
|
||||
[tool.ruff]
|
||||
# Same as Black.
|
||||
|
|
|
@ -41,11 +41,11 @@ def create_failure_lines(job, failure_line_list, start_line=0):
|
|||
failure_line = FailureLine(**data)
|
||||
job_log = JobLog.objects.create(
|
||||
job=job,
|
||||
name='{}{}'.format(base_data.get('test'), job.id),
|
||||
url='bar{}'.format(i),
|
||||
name="{}{}".format(base_data.get("test"), job.id),
|
||||
url="bar{}".format(i),
|
||||
status=1,
|
||||
)
|
||||
print('create jobLog for job id: {}'.format(job.id))
|
||||
print("create jobLog for job id: {}".format(job.id))
|
||||
failure_line.job_log = job_log
|
||||
failure_line.save()
|
||||
failure_lines.append(failure_line)
|
||||
|
|
|
@ -9,37 +9,37 @@ class PerfherderClientTest(unittest.TestCase):
|
|||
@responses.activate
|
||||
def test_get_performance_signatures(self):
|
||||
pc = PerfherderClient()
|
||||
url = pc._get_endpoint_url(pc.PERFORMANCE_SIGNATURES_ENDPOINT, project='mozilla-central')
|
||||
url = pc._get_endpoint_url(pc.PERFORMANCE_SIGNATURES_ENDPOINT, project="mozilla-central")
|
||||
content = {
|
||||
'signature1': {'cheezburgers': 1},
|
||||
'signature2': {'hamburgers': 2},
|
||||
'signature3': {'cheezburgers': 2},
|
||||
"signature1": {"cheezburgers": 1},
|
||||
"signature2": {"hamburgers": 2},
|
||||
"signature3": {"cheezburgers": 2},
|
||||
}
|
||||
responses.add(responses.GET, url, json=content, status=200)
|
||||
|
||||
sigs = pc.get_performance_signatures('mozilla-central')
|
||||
sigs = pc.get_performance_signatures("mozilla-central")
|
||||
self.assertEqual(len(sigs), 3)
|
||||
self.assertEqual(sigs.get_signature_hashes(), ['signature1', 'signature2', 'signature3'])
|
||||
self.assertEqual(sigs.get_property_names(), set(['cheezburgers', 'hamburgers']))
|
||||
self.assertEqual(sigs.get_property_values('cheezburgers'), set([1, 2]))
|
||||
self.assertEqual(sigs.get_signature_hashes(), ["signature1", "signature2", "signature3"])
|
||||
self.assertEqual(sigs.get_property_names(), set(["cheezburgers", "hamburgers"]))
|
||||
self.assertEqual(sigs.get_property_values("cheezburgers"), set([1, 2]))
|
||||
|
||||
@responses.activate
|
||||
def test_get_performance_data(self):
|
||||
pc = PerfherderClient()
|
||||
|
||||
url = '{}?{}'.format(
|
||||
pc._get_endpoint_url(pc.PERFORMANCE_DATA_ENDPOINT, project='mozilla-central'),
|
||||
'signatures=signature1&signatures=signature2',
|
||||
url = "{}?{}".format(
|
||||
pc._get_endpoint_url(pc.PERFORMANCE_DATA_ENDPOINT, project="mozilla-central"),
|
||||
"signatures=signature1&signatures=signature2",
|
||||
)
|
||||
content = {
|
||||
'signature1': [{'value': 1}, {'value': 2}],
|
||||
'signature2': [{'value': 2}, {'value': 1}],
|
||||
"signature1": [{"value": 1}, {"value": 2}],
|
||||
"signature2": [{"value": 2}, {"value": 1}],
|
||||
}
|
||||
responses.add(responses.GET, url, json=content, status=200)
|
||||
|
||||
series_list = pc.get_performance_data(
|
||||
'mozilla-central', signatures=['signature1', 'signature2']
|
||||
"mozilla-central", signatures=["signature1", "signature2"]
|
||||
)
|
||||
self.assertEqual(len(series_list), 2)
|
||||
self.assertEqual(series_list['signature1']['value'], [1, 2])
|
||||
self.assertEqual(series_list['signature2']['value'], [2, 1])
|
||||
self.assertEqual(series_list["signature1"]["value"], [1, 2])
|
||||
self.assertEqual(series_list["signature2"]["value"], [2, 1])
|
||||
|
|
|
@ -12,7 +12,7 @@ class TreeherderClientTest(unittest.TestCase):
|
|||
@responses.activate
|
||||
def test_get_job(self):
|
||||
tdc = TreeherderClient()
|
||||
url = tdc._get_endpoint_url(tdc.JOBS_ENDPOINT, project='autoland')
|
||||
url = tdc._get_endpoint_url(tdc.JOBS_ENDPOINT, project="autoland")
|
||||
content = {
|
||||
"meta": {"count": 3, "repository": "autoland", "offset": 0},
|
||||
"results": self.JOB_RESULTS,
|
||||
|
@ -26,7 +26,7 @@ class TreeherderClientTest(unittest.TestCase):
|
|||
@responses.activate
|
||||
def test_get_pushes(self):
|
||||
tdc = TreeherderClient()
|
||||
url = tdc._get_endpoint_url(tdc.PUSH_ENDPOINT, project='autoland')
|
||||
url = tdc._get_endpoint_url(tdc.PUSH_ENDPOINT, project="autoland")
|
||||
content = {
|
||||
"meta": {"count": 3, "repository": "autoland", "offset": 0},
|
||||
"results": self.PUSHES,
|
||||
|
@ -38,5 +38,5 @@ class TreeherderClientTest(unittest.TestCase):
|
|||
self.assertEqual(pushes, self.PUSHES)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -27,7 +27,7 @@ from treeherder.services.pulse.exchange import get_exchange
|
|||
from treeherder.webapp.api import perfcompare_utils
|
||||
|
||||
IS_WINDOWS = "windows" in platform.system().lower()
|
||||
SAMPLE_DATA_PATH = join(dirname(__file__), 'sample_data')
|
||||
SAMPLE_DATA_PATH = join(dirname(__file__), "sample_data")
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
|
@ -45,7 +45,7 @@ def pytest_runtest_setup(item):
|
|||
- Clear the django cache between runs
|
||||
"""
|
||||
|
||||
if 'slow' in item.keywords and not item.config.getoption("--runslow"):
|
||||
if "slow" in item.keywords and not item.config.getoption("--runslow"):
|
||||
pytest.skip("need --runslow option to run")
|
||||
|
||||
from django.core.cache import cache
|
||||
|
@ -56,9 +56,9 @@ def pytest_runtest_setup(item):
|
|||
@pytest.fixture
|
||||
def setup_repository_data(django_db_setup, django_db_blocker):
|
||||
with django_db_blocker.unblock():
|
||||
call_command('loaddata', join(SAMPLE_DATA_PATH, 'repository_group.json'))
|
||||
call_command("loaddata", join(SAMPLE_DATA_PATH, "repository_group.json"))
|
||||
with django_db_blocker.unblock():
|
||||
call_command('loaddata', join(SAMPLE_DATA_PATH, 'repository.json'))
|
||||
call_command("loaddata", join(SAMPLE_DATA_PATH, "repository.json"))
|
||||
|
||||
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
|
@ -70,14 +70,14 @@ def block_unmocked_requests():
|
|||
"""
|
||||
|
||||
def mocked_send(*args, **kwargs):
|
||||
raise RuntimeError('Tests must mock all HTTP requests!')
|
||||
raise RuntimeError("Tests must mock all HTTP requests!")
|
||||
|
||||
# The standard monkeypatch fixture cannot be used with session scope:
|
||||
# https://github.com/pytest-dev/pytest/issues/363
|
||||
monkeypatch = MonkeyPatch()
|
||||
# Monkeypatching here since any higher level would break responses:
|
||||
# https://github.com/getsentry/responses/blob/0.5.1/responses.py#L295
|
||||
monkeypatch.setattr('requests.adapters.HTTPAdapter.send', mocked_send)
|
||||
monkeypatch.setattr("requests.adapters.HTTPAdapter.send", mocked_send)
|
||||
yield monkeypatch
|
||||
monkeypatch.undo()
|
||||
|
||||
|
@ -90,7 +90,7 @@ def sample_data():
|
|||
return SampleData()
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
@pytest.fixture(scope="session")
|
||||
def test_base_dir():
|
||||
return os.path.dirname(__file__)
|
||||
|
||||
|
@ -100,14 +100,14 @@ def sample_push(sample_data):
|
|||
return copy.deepcopy(sample_data.push_data)
|
||||
|
||||
|
||||
@pytest.fixture(name='create_push')
|
||||
@pytest.fixture(name="create_push")
|
||||
def fixture_create_push():
|
||||
"""Return a function to create a push"""
|
||||
|
||||
def create(
|
||||
repository,
|
||||
revision='4c45a777949168d16c03a4cba167678b7ab65f76',
|
||||
author='foo@bar.com',
|
||||
revision="4c45a777949168d16c03a4cba167678b7ab65f76",
|
||||
author="foo@bar.com",
|
||||
time=None,
|
||||
explicit_id=None,
|
||||
):
|
||||
|
@ -122,11 +122,11 @@ def fixture_create_push():
|
|||
return create
|
||||
|
||||
|
||||
@pytest.fixture(name='create_commit')
|
||||
@pytest.fixture(name="create_commit")
|
||||
def fixture_create_commit():
|
||||
"""Return a function to create a commit"""
|
||||
|
||||
def create(push, comments='Bug 12345 - This is a message'):
|
||||
def create(push, comments="Bug 12345 - This is a message"):
|
||||
return th_models.Commit.objects.create(
|
||||
push=push, revision=push.revision, author=push.author, comments=comments
|
||||
)
|
||||
|
@ -134,7 +134,7 @@ def fixture_create_commit():
|
|||
return create
|
||||
|
||||
|
||||
@pytest.fixture(name='create_signature')
|
||||
@pytest.fixture(name="create_signature")
|
||||
def fixture_create_signature():
|
||||
"""Returns a function to create a signature"""
|
||||
|
||||
|
@ -147,7 +147,7 @@ def fixture_create_signature():
|
|||
test,
|
||||
test_perf_signature,
|
||||
repository,
|
||||
application='',
|
||||
application="",
|
||||
):
|
||||
return perf_models.PerformanceSignature.objects.create(
|
||||
repository=repository,
|
||||
|
@ -167,7 +167,7 @@ def fixture_create_signature():
|
|||
return create
|
||||
|
||||
|
||||
@pytest.fixture(name='create_perf_datum')
|
||||
@pytest.fixture(name="create_perf_datum")
|
||||
def fixture_create_perf_datum():
|
||||
"""Returns a function to create a performance datum"""
|
||||
|
||||
|
@ -258,9 +258,9 @@ def test_issue_tracker(transactional_db):
|
|||
def test_repository_2(test_repository):
|
||||
return th_models.Repository.objects.create(
|
||||
repository_group=test_repository.repository_group,
|
||||
name=test_repository.name + '_2',
|
||||
name=test_repository.name + "_2",
|
||||
dvcs_type=test_repository.dvcs_type,
|
||||
url=test_repository.url + '_2',
|
||||
url=test_repository.url + "_2",
|
||||
codebase=test_repository.codebase,
|
||||
)
|
||||
|
||||
|
@ -272,25 +272,25 @@ def test_push(create_push, test_repository):
|
|||
|
||||
@pytest.fixture
|
||||
def test_perfcomp_push(create_push, test_repository):
|
||||
return create_push(test_repository, '1377267c6dc1')
|
||||
return create_push(test_repository, "1377267c6dc1")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_perfcomp_push_2(create_push, test_repository):
|
||||
return create_push(test_repository, '08038e535f58')
|
||||
return create_push(test_repository, "08038e535f58")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_linux_platform():
|
||||
return th_models.MachinePlatform.objects.create(
|
||||
os_name='-', platform='linux1804-64-shippable-qr', architecture='-'
|
||||
os_name="-", platform="linux1804-64-shippable-qr", architecture="-"
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_macosx_platform():
|
||||
return th_models.MachinePlatform.objects.create(
|
||||
os_name='', platform='macosx1015-64-shippable-qr', architecture=''
|
||||
os_name="", platform="macosx1015-64-shippable-qr", architecture=""
|
||||
)
|
||||
|
||||
|
||||
|
@ -304,7 +304,7 @@ def test_commit(create_commit, test_push):
|
|||
return create_commit(test_push)
|
||||
|
||||
|
||||
@pytest.fixture(name='create_jobs')
|
||||
@pytest.fixture(name="create_jobs")
|
||||
def fixture_create_jobs(test_repository, failure_classifications):
|
||||
"""Return a function to create jobs"""
|
||||
|
||||
|
@ -318,8 +318,8 @@ def fixture_create_jobs(test_repository, failure_classifications):
|
|||
@pytest.fixture
|
||||
def test_job(eleven_job_blobs, create_jobs):
|
||||
job = eleven_job_blobs[0]
|
||||
job['job'].update(
|
||||
{'taskcluster_task_id': 'V3SVuxO8TFy37En_6HcXLs', 'taskcluster_retry_id': '0'}
|
||||
job["job"].update(
|
||||
{"taskcluster_task_id": "V3SVuxO8TFy37En_6HcXLs", "taskcluster_retry_id": "0"}
|
||||
)
|
||||
return create_jobs([job])[0]
|
||||
|
||||
|
@ -327,20 +327,20 @@ def test_job(eleven_job_blobs, create_jobs):
|
|||
@pytest.fixture
|
||||
def test_two_jobs_tc_metadata(eleven_job_blobs_new_date, create_jobs):
|
||||
job_1, job_2 = eleven_job_blobs_new_date[0:2]
|
||||
job_1['job'].update(
|
||||
job_1["job"].update(
|
||||
{
|
||||
'status': 'completed',
|
||||
'result': 'testfailed',
|
||||
'taskcluster_task_id': 'V3SVuxO8TFy37En_6HcXLs',
|
||||
'taskcluster_retry_id': '0',
|
||||
"status": "completed",
|
||||
"result": "testfailed",
|
||||
"taskcluster_task_id": "V3SVuxO8TFy37En_6HcXLs",
|
||||
"taskcluster_retry_id": "0",
|
||||
}
|
||||
)
|
||||
job_2['job'].update(
|
||||
job_2["job"].update(
|
||||
{
|
||||
'status': 'completed',
|
||||
'result': 'testfailed',
|
||||
'taskcluster_task_id': 'FJtjczXfTAGClIl6wNBo9g',
|
||||
'taskcluster_retry_id': '0',
|
||||
"status": "completed",
|
||||
"result": "testfailed",
|
||||
"taskcluster_task_id": "FJtjczXfTAGClIl6wNBo9g",
|
||||
"taskcluster_retry_id": "0",
|
||||
}
|
||||
)
|
||||
return create_jobs([job_1, job_2])
|
||||
|
@ -365,7 +365,7 @@ def mock_log_parser(monkeypatch):
|
|||
def task_mock(*args, **kwargs):
|
||||
pass
|
||||
|
||||
monkeypatch.setattr(tasks, 'parse_logs', task_mock)
|
||||
monkeypatch.setattr(tasks, "parse_logs", task_mock)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -376,20 +376,20 @@ def taskcluster_notify_mock(monkeypatch):
|
|||
nonlocal mock
|
||||
return mock
|
||||
|
||||
monkeypatch.setattr(taskcluster, 'notify_client_factory', mockreturn)
|
||||
monkeypatch.setattr(taskcluster, "notify_client_factory", mockreturn)
|
||||
return mock
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_tc_prod_backfill_credentials(monkeypatch):
|
||||
monkeypatch.setattr(settings, 'PERF_SHERIFF_BOT_CLIENT_ID', "client_id")
|
||||
monkeypatch.setattr(settings, 'PERF_SHERIFF_BOT_ACCESS_TOKEN', "access_token")
|
||||
monkeypatch.setattr(settings, "PERF_SHERIFF_BOT_CLIENT_ID", "client_id")
|
||||
monkeypatch.setattr(settings, "PERF_SHERIFF_BOT_ACCESS_TOKEN", "access_token")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_tc_prod_notify_credentials(monkeypatch):
|
||||
monkeypatch.setattr(settings, 'NOTIFY_CLIENT_ID', "client_id")
|
||||
monkeypatch.setattr(settings, 'NOTIFY_ACCESS_TOKEN', "access_token")
|
||||
monkeypatch.setattr(settings, "NOTIFY_CLIENT_ID", "client_id")
|
||||
monkeypatch.setattr(settings, "NOTIFY_ACCESS_TOKEN", "access_token")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -423,12 +423,12 @@ def eleven_job_blobs(sample_data, sample_push, test_repository, mock_log_parser)
|
|||
push_index = 0
|
||||
|
||||
# Modify job structure to sync with the push sample data
|
||||
if 'sources' in blob:
|
||||
del blob['sources']
|
||||
if "sources" in blob:
|
||||
del blob["sources"]
|
||||
|
||||
blob['revision'] = sample_push[push_index]['revision']
|
||||
blob['taskcluster_task_id'] = 'V3SVuxO8TFy37En_6HcXL{}'.format(task_id_index)
|
||||
blob['taskcluster_retry_id'] = '0'
|
||||
blob["revision"] = sample_push[push_index]["revision"]
|
||||
blob["taskcluster_task_id"] = "V3SVuxO8TFy37En_6HcXL{}".format(task_id_index)
|
||||
blob["taskcluster_retry_id"] = "0"
|
||||
blobs.append(blob)
|
||||
|
||||
push_index += 1
|
||||
|
@ -441,7 +441,7 @@ def eleven_job_blobs_new_date(sample_data, sample_push, test_repository, mock_lo
|
|||
# make unique revisions
|
||||
counter = 0
|
||||
for push in sample_push:
|
||||
push['push_timestamp'] = int(time.time()) + counter
|
||||
push["push_timestamp"] = int(time.time()) + counter
|
||||
counter += 1
|
||||
|
||||
store_push_data(test_repository, sample_push)
|
||||
|
@ -459,16 +459,16 @@ def eleven_job_blobs_new_date(sample_data, sample_push, test_repository, mock_lo
|
|||
push_index = 0
|
||||
|
||||
# Modify job structure to sync with the push sample data
|
||||
if 'sources' in blob:
|
||||
del blob['sources']
|
||||
if "sources" in blob:
|
||||
del blob["sources"]
|
||||
|
||||
blob['revision'] = sample_push[push_index]['revision']
|
||||
blob['taskcluster_task_id'] = 'V3SVuxO8TFy37En_6HcX{:0>2}'.format(task_id_index)
|
||||
blob['taskcluster_retry_id'] = '0'
|
||||
blob['job']['revision'] = sample_push[push_index]['revision']
|
||||
blob['job']['submit_timestamp'] = sample_push[push_index]['push_timestamp']
|
||||
blob['job']['start_timestamp'] = sample_push[push_index]['push_timestamp'] + 10
|
||||
blob['job']['end_timestamp'] = sample_push[push_index]['push_timestamp'] + 1000
|
||||
blob["revision"] = sample_push[push_index]["revision"]
|
||||
blob["taskcluster_task_id"] = "V3SVuxO8TFy37En_6HcX{:0>2}".format(task_id_index)
|
||||
blob["taskcluster_retry_id"] = "0"
|
||||
blob["job"]["revision"] = sample_push[push_index]["revision"]
|
||||
blob["job"]["submit_timestamp"] = sample_push[push_index]["push_timestamp"]
|
||||
blob["job"]["start_timestamp"] = sample_push[push_index]["push_timestamp"] + 10
|
||||
blob["job"]["end_timestamp"] = sample_push[push_index]["push_timestamp"] + 1000
|
||||
blobs.append(blob)
|
||||
|
||||
push_index += 1
|
||||
|
@ -552,7 +552,7 @@ def failure_lines(test_job):
|
|||
def failure_line_logs(test_job):
|
||||
return create_failure_lines(
|
||||
test_job,
|
||||
[(test_line, {'action': 'log', 'test': None}), (test_line, {'subtest': 'subtest2'})],
|
||||
[(test_line, {"action": "log", "test": None}), (test_line, {"subtest": "subtest2"})],
|
||||
)
|
||||
|
||||
|
||||
|
@ -611,7 +611,7 @@ def classified_failures(
|
|||
@pytest.fixture
|
||||
def test_user(db):
|
||||
# a user *without* sheriff/staff permissions
|
||||
user = th_models.User.objects.create(username="testuser1", email='user@foo.com', is_staff=False)
|
||||
user = th_models.User.objects.create(username="testuser1", email="user@foo.com", is_staff=False)
|
||||
return user
|
||||
|
||||
|
||||
|
@ -622,7 +622,7 @@ def test_ldap_user(db):
|
|||
and who does not have `is_staff` permissions.
|
||||
"""
|
||||
user = th_models.User.objects.create(
|
||||
username="mozilla-ldap/user@foo.com", email='user@foo.com', is_staff=False
|
||||
username="mozilla-ldap/user@foo.com", email="user@foo.com", is_staff=False
|
||||
)
|
||||
return user
|
||||
|
||||
|
@ -631,20 +631,20 @@ def test_ldap_user(db):
|
|||
def test_sheriff(db):
|
||||
# a user *with* sheriff/staff permissions
|
||||
user = th_models.User.objects.create(
|
||||
username="testsheriff1", email='sheriff@foo.com', is_staff=True
|
||||
username="testsheriff1", email="sheriff@foo.com", is_staff=True
|
||||
)
|
||||
return user
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_perf_framework(transactional_db):
|
||||
return perf_models.PerformanceFramework.objects.create(name='test_talos', enabled=True)
|
||||
return perf_models.PerformanceFramework.objects.create(name="test_talos", enabled=True)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_perf_signature(test_repository, test_perf_framework) -> perf_models.PerformanceSignature:
|
||||
windows_7_platform = th_models.MachinePlatform.objects.create(
|
||||
os_name='win', platform='win7', architecture='x86'
|
||||
os_name="win", platform="win7", architecture="x86"
|
||||
)
|
||||
return create_perf_signature(test_perf_framework, test_repository, windows_7_platform)
|
||||
|
||||
|
@ -652,24 +652,24 @@ def test_perf_signature(test_repository, test_perf_framework) -> perf_models.Per
|
|||
def create_perf_signature(
|
||||
perf_framework, repository, machine_platform: th_models.MachinePlatform
|
||||
) -> perf_models.PerformanceSignature:
|
||||
option = th_models.Option.objects.create(name='opt')
|
||||
option = th_models.Option.objects.create(name="opt")
|
||||
option_collection = th_models.OptionCollection.objects.create(
|
||||
option_collection_hash='my_option_hash', option=option
|
||||
option_collection_hash="my_option_hash", option=option
|
||||
)
|
||||
|
||||
return perf_models.PerformanceSignature.objects.create(
|
||||
repository=repository,
|
||||
signature_hash=(40 * 't'),
|
||||
signature_hash=(40 * "t"),
|
||||
framework=perf_framework,
|
||||
platform=machine_platform,
|
||||
option_collection=option_collection,
|
||||
suite='mysuite',
|
||||
test='mytest',
|
||||
application='firefox',
|
||||
suite="mysuite",
|
||||
test="mytest",
|
||||
application="firefox",
|
||||
has_subtests=False,
|
||||
tags='warm pageload',
|
||||
extra_options='e10s opt',
|
||||
measurement_unit='ms',
|
||||
tags="warm pageload",
|
||||
extra_options="e10s opt",
|
||||
measurement_unit="ms",
|
||||
last_updated=datetime.datetime.now(),
|
||||
)
|
||||
|
||||
|
@ -687,16 +687,16 @@ def test_taskcluster_metadata_2(test_job_3) -> th_models.TaskclusterMetadata:
|
|||
def create_taskcluster_metadata(test_job_2) -> th_models.TaskclusterMetadata:
|
||||
return th_models.TaskclusterMetadata.objects.create(
|
||||
job=test_job_2,
|
||||
task_id='V3SVuxO8TFy37En_6HcXLp',
|
||||
retry_id='0',
|
||||
task_id="V3SVuxO8TFy37En_6HcXLp",
|
||||
retry_id="0",
|
||||
)
|
||||
|
||||
|
||||
def create_taskcluster_metadata_2(test_job_3) -> th_models.TaskclusterMetadata:
|
||||
return th_models.TaskclusterMetadata.objects.create(
|
||||
job=test_job_3,
|
||||
task_id='V3SVuxO8TFy37En_6HcXLq',
|
||||
retry_id='0',
|
||||
task_id="V3SVuxO8TFy37En_6HcXLq",
|
||||
retry_id="0",
|
||||
)
|
||||
|
||||
|
||||
|
@ -704,12 +704,12 @@ def create_taskcluster_metadata_2(test_job_3) -> th_models.TaskclusterMetadata:
|
|||
def test_perf_signature_2(test_perf_signature):
|
||||
return perf_models.PerformanceSignature.objects.create(
|
||||
repository=test_perf_signature.repository,
|
||||
signature_hash=(20 * 't2'),
|
||||
signature_hash=(20 * "t2"),
|
||||
framework=test_perf_signature.framework,
|
||||
platform=test_perf_signature.platform,
|
||||
option_collection=test_perf_signature.option_collection,
|
||||
suite='mysuite2',
|
||||
test='mytest2',
|
||||
suite="mysuite2",
|
||||
test="mytest2",
|
||||
has_subtests=test_perf_signature.has_subtests,
|
||||
extra_options=test_perf_signature.extra_options,
|
||||
last_updated=datetime.datetime.now(),
|
||||
|
@ -721,12 +721,12 @@ def test_stalled_data_signature(test_perf_signature):
|
|||
stalled_data_timestamp = datetime.datetime.now() - datetime.timedelta(days=120)
|
||||
return perf_models.PerformanceSignature.objects.create(
|
||||
repository=test_perf_signature.repository,
|
||||
signature_hash=(20 * 't3'),
|
||||
signature_hash=(20 * "t3"),
|
||||
framework=test_perf_signature.framework,
|
||||
platform=test_perf_signature.platform,
|
||||
option_collection=test_perf_signature.option_collection,
|
||||
suite='mysuite3',
|
||||
test='mytest3',
|
||||
suite="mysuite3",
|
||||
test="mytest3",
|
||||
has_subtests=test_perf_signature.has_subtests,
|
||||
extra_options=test_perf_signature.extra_options,
|
||||
last_updated=stalled_data_timestamp,
|
||||
|
@ -738,7 +738,7 @@ def test_perf_data(test_perf_signature, eleven_jobs_stored):
|
|||
# for making things easier, ids for jobs
|
||||
# and push should be the same;
|
||||
# also, we only need a subset of jobs
|
||||
perf_jobs = th_models.Job.objects.filter(pk__in=range(7, 11)).order_by('id').all()
|
||||
perf_jobs = th_models.Job.objects.filter(pk__in=range(7, 11)).order_by("id").all()
|
||||
|
||||
for index, job in enumerate(perf_jobs, start=1):
|
||||
job.push_id = index
|
||||
|
@ -755,7 +755,7 @@ def test_perf_data(test_perf_signature, eleven_jobs_stored):
|
|||
perf_datum.push.time = job.push.time
|
||||
perf_datum.push.save()
|
||||
|
||||
return perf_models.PerformanceDatum.objects.order_by('id').all()
|
||||
return perf_models.PerformanceDatum.objects.order_by("id").all()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -767,14 +767,14 @@ def mock_bugzilla_api_request(monkeypatch):
|
|||
bug_list_path = os.path.join(tests_folder, "sample_data", "bug_list.json")
|
||||
with open(bug_list_path) as f:
|
||||
last_change_time = (datetime.datetime.utcnow() - datetime.timedelta(days=30)).strftime(
|
||||
'%Y-%m-%dT%H:%M:%SZ'
|
||||
"%Y-%m-%dT%H:%M:%SZ"
|
||||
)
|
||||
data = json.load(f)
|
||||
for bug in data["bugs"]:
|
||||
bug["last_change_time"] = last_change_time
|
||||
return data
|
||||
|
||||
monkeypatch.setattr(treeherder.etl.bugzilla, 'fetch_json', _fetch_json)
|
||||
monkeypatch.setattr(treeherder.etl.bugzilla, "fetch_json", _fetch_json)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -787,7 +787,7 @@ def mock_deviance(monkeypatch):
|
|||
def _deviance(*args, **kwargs):
|
||||
return "OK", 0
|
||||
|
||||
monkeypatch.setattr(moz_measure_noise, 'deviance', _deviance)
|
||||
monkeypatch.setattr(moz_measure_noise, "deviance", _deviance)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -797,7 +797,7 @@ def bugs(mock_bugzilla_api_request):
|
|||
process = BzApiBugProcess()
|
||||
process.run()
|
||||
|
||||
return th_models.Bugscache.objects.all().order_by('id')
|
||||
return th_models.Bugscache.objects.all().order_by("id")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -807,11 +807,11 @@ def mock_bugzilla_reopen_request(monkeypatch, request):
|
|||
def _reopen_request(url, method, headers, json):
|
||||
import json as json_module
|
||||
|
||||
reopened_bugs = request.config.cache.get('reopened_bugs', {})
|
||||
reopened_bugs = request.config.cache.get("reopened_bugs", {})
|
||||
reopened_bugs[url] = json_module.dumps(json)
|
||||
request.config.cache.set('reopened_bugs', reopened_bugs)
|
||||
request.config.cache.set("reopened_bugs", reopened_bugs)
|
||||
|
||||
monkeypatch.setattr(treeherder.etl.bugzilla, 'reopen_request', _reopen_request)
|
||||
monkeypatch.setattr(treeherder.etl.bugzilla, "reopen_request", _reopen_request)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -839,7 +839,7 @@ def mock_file_bugzilla_map_request(monkeypatch):
|
|||
|
||||
def _fetch_data(self, project):
|
||||
url = (
|
||||
'https://firefox-ci-tc.services.mozilla.com/api/index/v1/task/gecko.v2.%s.latest.source.source-bugzilla-info/artifacts/public/components.json'
|
||||
"https://firefox-ci-tc.services.mozilla.com/api/index/v1/task/gecko.v2.%s.latest.source.source-bugzilla-info/artifacts/public/components.json"
|
||||
% project
|
||||
)
|
||||
files_bugzilla_data = None
|
||||
|
@ -859,7 +859,7 @@ def mock_file_bugzilla_map_request(monkeypatch):
|
|||
}
|
||||
|
||||
monkeypatch.setattr(
|
||||
treeherder.etl.files_bugzilla_map.FilesBugzillaMapProcess, 'fetch_data', _fetch_data
|
||||
treeherder.etl.files_bugzilla_map.FilesBugzillaMapProcess, "fetch_data", _fetch_data
|
||||
)
|
||||
|
||||
|
||||
|
@ -879,11 +879,11 @@ def mock_bugscache_bugzilla_request(monkeypatch):
|
|||
for bug in bugzilla_data["bugs"]:
|
||||
bug["last_change_time"] = (
|
||||
datetime.datetime.now() - datetime.timedelta(20)
|
||||
).isoformat(timespec='seconds') + 'Z'
|
||||
).isoformat(timespec="seconds") + "Z"
|
||||
return bugzilla_data["bugs"]
|
||||
|
||||
monkeypatch.setattr(
|
||||
treeherder.etl.bugzilla, 'fetch_intermittent_bugs', _fetch_intermittent_bugs
|
||||
treeherder.etl.bugzilla, "fetch_intermittent_bugs", _fetch_intermittent_bugs
|
||||
)
|
||||
|
||||
|
||||
|
@ -909,7 +909,7 @@ def mock_get_artifact_list(monkeypatch):
|
|||
def _mock_get(url, params=None):
|
||||
return MockResponse()
|
||||
|
||||
monkeypatch.setattr(treeherder.webapp.api.utils, 'fetch_json', _mock_get)
|
||||
monkeypatch.setattr(treeherder.webapp.api.utils, "fetch_json", _mock_get)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -919,7 +919,7 @@ def mock_cache(monkeypatch):
|
|||
def mockreturn_cache(*args, **kwargs):
|
||||
return {"task_id": "some_id", "retry_id": 0}
|
||||
|
||||
monkeypatch.setattr(django.core.cache.cache, 'get', mockreturn_cache)
|
||||
monkeypatch.setattr(django.core.cache.cache, "get", mockreturn_cache)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -935,17 +935,17 @@ def text_log_error_lines(test_job, failure_lines):
|
|||
|
||||
@pytest.fixture
|
||||
def test_perf_tag():
|
||||
return perf_models.PerformanceTag.objects.create(name='first_tag')
|
||||
return perf_models.PerformanceTag.objects.create(name="first_tag")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_perf_tag_2():
|
||||
return perf_models.PerformanceTag.objects.create(name='second_tag')
|
||||
return perf_models.PerformanceTag.objects.create(name="second_tag")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_perf_alert_summary(test_repository, push_stored, test_perf_framework, test_issue_tracker):
|
||||
test_perf_tag = perf_models.PerformanceTag.objects.create(name='harness')
|
||||
test_perf_tag = perf_models.PerformanceTag.objects.create(name="harness")
|
||||
|
||||
performance_alert_summary = perf_models.PerformanceAlertSummary.objects.create(
|
||||
repository=test_repository,
|
||||
|
@ -1074,9 +1074,9 @@ def generic_reference_data(test_repository):
|
|||
|
||||
r = RefdataHolder()
|
||||
|
||||
r.option = th_models.Option.objects.create(name='my_option')
|
||||
r.option = th_models.Option.objects.create(name="my_option")
|
||||
r.option_collection = th_models.OptionCollection.objects.create(
|
||||
option_collection_hash='my_option_hash', option=r.option
|
||||
option_collection_hash="my_option_hash", option=r.option
|
||||
)
|
||||
r.option_collection_hash = r.option_collection.option_collection_hash
|
||||
r.machine_platform = th_models.MachinePlatform.objects.create(
|
||||
|
@ -1085,13 +1085,13 @@ def generic_reference_data(test_repository):
|
|||
r.build_platform = th_models.BuildPlatform.objects.create(
|
||||
os_name="my_os", platform="my_platform", architecture="x86"
|
||||
)
|
||||
r.machine = th_models.Machine.objects.create(name='mymachine')
|
||||
r.job_group = th_models.JobGroup.objects.create(symbol='S', name='myjobgroup')
|
||||
r.job_type = th_models.JobType.objects.create(symbol='j', name='myjob')
|
||||
r.product = th_models.Product.objects.create(name='myproduct')
|
||||
r.machine = th_models.Machine.objects.create(name="mymachine")
|
||||
r.job_group = th_models.JobGroup.objects.create(symbol="S", name="myjobgroup")
|
||||
r.job_type = th_models.JobType.objects.create(symbol="j", name="myjob")
|
||||
r.product = th_models.Product.objects.create(name="myproduct")
|
||||
r.signature = th_models.ReferenceDataSignatures.objects.create(
|
||||
name='myreferencedatasignaeture',
|
||||
signature='1234',
|
||||
name="myreferencedatasignaeture",
|
||||
signature="1234",
|
||||
build_os_name=r.build_platform.os_name,
|
||||
build_platform=r.build_platform.platform,
|
||||
build_architecture=r.build_platform.architecture,
|
||||
|
@ -1103,7 +1103,7 @@ def generic_reference_data(test_repository):
|
|||
job_type_name=r.job_type.name,
|
||||
job_type_symbol=r.job_type.symbol,
|
||||
option_collection_hash=r.option_collection_hash,
|
||||
build_system_type='buildbot',
|
||||
build_system_type="buildbot",
|
||||
repository=test_repository.name,
|
||||
first_submission_timestamp=0,
|
||||
)
|
||||
|
@ -1113,37 +1113,37 @@ def generic_reference_data(test_repository):
|
|||
|
||||
@pytest.fixture
|
||||
def bug_data(eleven_jobs_stored, test_repository, test_push, bugs):
|
||||
jobs = th_models.Job.objects.all().order_by('id')
|
||||
jobs = th_models.Job.objects.all().order_by("id")
|
||||
bug_id = bugs[0].id
|
||||
job_id = jobs[0].id
|
||||
th_models.BugJobMap.create(job_id=job_id, bug_id=bug_id)
|
||||
query_string = '?startday=2012-05-09&endday=2018-05-10&tree={}'.format(test_repository.name)
|
||||
query_string = "?startday=2012-05-09&endday=2018-05-10&tree={}".format(test_repository.name)
|
||||
|
||||
return {
|
||||
'tree': test_repository.name,
|
||||
'option': th_models.Option.objects.first(),
|
||||
'bug_id': bug_id,
|
||||
'job': jobs[0],
|
||||
'jobs': jobs,
|
||||
'query_string': query_string,
|
||||
"tree": test_repository.name,
|
||||
"option": th_models.Option.objects.first(),
|
||||
"bug_id": bug_id,
|
||||
"job": jobs[0],
|
||||
"jobs": jobs,
|
||||
"query_string": query_string,
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_run_data(bug_data):
|
||||
pushes = th_models.Push.objects.all()
|
||||
time = pushes[0].time.strftime('%Y-%m-%d')
|
||||
time = pushes[0].time.strftime("%Y-%m-%d")
|
||||
test_runs = 0
|
||||
for push in list(pushes):
|
||||
if push.time.strftime('%Y-%m-%d') == time:
|
||||
if push.time.strftime("%Y-%m-%d") == time:
|
||||
test_runs += 1
|
||||
|
||||
return {'test_runs': test_runs, 'push_time': time}
|
||||
return {"test_runs": test_runs, "push_time": time}
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def group_data(transactional_db, eleven_job_blobs, create_jobs):
|
||||
query_string = '?manifest=/test&date=2022-10-01'
|
||||
query_string = "?manifest=/test&date=2022-10-01"
|
||||
|
||||
jt = []
|
||||
jt.append(
|
||||
|
@ -1159,11 +1159,11 @@ def group_data(transactional_db, eleven_job_blobs, create_jobs):
|
|||
g1 = th_models.Group.objects.create(name="/test")
|
||||
for i in range(3):
|
||||
job = eleven_job_blobs[i]
|
||||
job['job'].update(
|
||||
job["job"].update(
|
||||
{
|
||||
'taskcluster_task_id': 'V3SVuxO8TFy37En_6HcXL%s' % i,
|
||||
'taskcluster_retry_id': '0',
|
||||
'name': jt[i].name,
|
||||
"taskcluster_task_id": "V3SVuxO8TFy37En_6HcXL%s" % i,
|
||||
"taskcluster_retry_id": "0",
|
||||
"name": jt[i].name,
|
||||
}
|
||||
)
|
||||
j = create_jobs([job])[0]
|
||||
|
@ -1174,17 +1174,17 @@ def group_data(transactional_db, eleven_job_blobs, create_jobs):
|
|||
th_models.GroupStatus.objects.create(status=1, duration=1, job_log=job_log, group=g1)
|
||||
|
||||
return {
|
||||
'date': j.submit_time,
|
||||
'manifest': '/test',
|
||||
'query_string': query_string,
|
||||
'expected': {
|
||||
'job_type_names': [
|
||||
'test-windows10-64-2004-qr/opt-mochitest-plain',
|
||||
'test-windows10-64-2004-qr/opt-mochitest-plain-swr',
|
||||
"date": j.submit_time,
|
||||
"manifest": "/test",
|
||||
"query_string": query_string,
|
||||
"expected": {
|
||||
"job_type_names": [
|
||||
"test-windows10-64-2004-qr/opt-mochitest-plain",
|
||||
"test-windows10-64-2004-qr/opt-mochitest-plain-swr",
|
||||
],
|
||||
'manifests': [
|
||||
"manifests": [
|
||||
{
|
||||
'/test': [[0, "passed", 1, 2], [1, "passed", 1, 1]],
|
||||
"/test": [[0, "passed", 1, 2], [1, "passed", 1, 1]],
|
||||
}
|
||||
],
|
||||
},
|
||||
|
@ -1210,10 +1210,10 @@ def generate_enough_perf_datum(test_repository, test_perf_signature):
|
|||
|
||||
@pytest.fixture
|
||||
def sample_option_collections(transactional_db):
|
||||
option1 = th_models.Option.objects.create(name='opt1')
|
||||
option2 = th_models.Option.objects.create(name='opt2')
|
||||
th_models.OptionCollection.objects.create(option_collection_hash='option_hash1', option=option1)
|
||||
th_models.OptionCollection.objects.create(option_collection_hash='option_hash2', option=option2)
|
||||
option1 = th_models.Option.objects.create(name="opt1")
|
||||
option2 = th_models.Option.objects.create(name="opt2")
|
||||
th_models.OptionCollection.objects.create(option_collection_hash="option_hash1", option=option1)
|
||||
th_models.OptionCollection.objects.create(option_collection_hash="option_hash2", option=option2)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -1270,7 +1270,7 @@ class JSONFixtureLoader:
|
|||
|
||||
def __call__(self, fixture_filename):
|
||||
fixture_path = join(*self._prior_dirs, fixture_filename)
|
||||
with open(fixture_path, 'r') as f:
|
||||
with open(fixture_path, "r") as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ def pending_jobs_stored(test_repository, failure_classifications, pending_job, p
|
|||
stores a list of buildapi pending jobs into the jobs store
|
||||
"""
|
||||
pending_job.update(push_stored[0])
|
||||
pending_job.update({'project': test_repository.name})
|
||||
pending_job.update({"project": test_repository.name})
|
||||
store_job_data(test_repository, [pending_job])
|
||||
|
||||
|
||||
|
@ -45,7 +45,7 @@ def running_jobs_stored(test_repository, failure_classifications, running_job, p
|
|||
stores a list of buildapi running jobs
|
||||
"""
|
||||
running_job.update(push_stored[0])
|
||||
running_job.update({'project': test_repository.name})
|
||||
running_job.update({"project": test_repository.name})
|
||||
store_job_data(test_repository, [running_job])
|
||||
|
||||
|
||||
|
@ -54,6 +54,6 @@ def completed_jobs_stored(test_repository, failure_classifications, completed_jo
|
|||
"""
|
||||
stores a list of buildapi completed jobs
|
||||
"""
|
||||
completed_job['revision'] = push_stored[0]['revision']
|
||||
completed_job.update({'project': test_repository.name})
|
||||
completed_job["revision"] = push_stored[0]["revision"]
|
||||
completed_job.update({"project": test_repository.name})
|
||||
store_job_data(test_repository, [completed_job])
|
||||
|
|
|
@ -24,23 +24,23 @@ def test_store_job_with_unparsed_log(
|
|||
|
||||
# create a wrapper around get_error_summary that records whether
|
||||
# it's been called
|
||||
mock_get_error_summary = MagicMock(name='get_error_summary', wraps=get_error_summary)
|
||||
mock_get_error_summary = MagicMock(name="get_error_summary", wraps=get_error_summary)
|
||||
import treeherder.model.error_summary
|
||||
|
||||
monkeypatch.setattr(treeherder.model.error_summary, 'get_error_summary', mock_get_error_summary)
|
||||
monkeypatch.setattr(treeherder.model.error_summary, "get_error_summary", mock_get_error_summary)
|
||||
log_url = add_log_response("mozilla-central-macosx64-debug-bm65-build1-build15.txt.gz")
|
||||
errorsummary = add_log_response("mochitest-browser-chrome_errorsummary.log")
|
||||
|
||||
job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
|
||||
job_guid = "d22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33"
|
||||
job_data = {
|
||||
'project': test_repository.name,
|
||||
'revision': push_stored[0]['revision'],
|
||||
'job': {
|
||||
'job_guid': job_guid,
|
||||
'state': 'completed',
|
||||
'log_references': [
|
||||
{'url': log_url, 'name': 'live_backing_log', 'parse_status': 'pending'},
|
||||
{'url': errorsummary, 'name': 'mochi_errorsummary.log', 'parse_status': 'pending'},
|
||||
"project": test_repository.name,
|
||||
"revision": push_stored[0]["revision"],
|
||||
"job": {
|
||||
"job_guid": job_guid,
|
||||
"state": "completed",
|
||||
"log_references": [
|
||||
{"url": log_url, "name": "live_backing_log", "parse_status": "pending"},
|
||||
{"url": errorsummary, "name": "mochi_errorsummary.log", "parse_status": "pending"},
|
||||
],
|
||||
},
|
||||
}
|
||||
|
@ -58,13 +58,13 @@ def test_store_job_with_unparsed_log(
|
|||
def test_store_job_pending_to_completed_with_unparsed_log(
|
||||
test_repository, push_stored, failure_classifications, activate_responses
|
||||
):
|
||||
job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
|
||||
job_guid = "d22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33"
|
||||
|
||||
# the first time, submit it as running (with no logs)
|
||||
job_data = {
|
||||
'project': test_repository.name,
|
||||
'revision': push_stored[0]['revision'],
|
||||
'job': {'job_guid': job_guid, 'state': 'running'},
|
||||
"project": test_repository.name,
|
||||
"revision": push_stored[0]["revision"],
|
||||
"job": {"job_guid": job_guid, "state": "running"},
|
||||
}
|
||||
store_job_data(test_repository, [job_data])
|
||||
# should have no text log errors or bug suggestions
|
||||
|
@ -74,13 +74,13 @@ def test_store_job_pending_to_completed_with_unparsed_log(
|
|||
# the second time, post a log that will get parsed
|
||||
log_url = add_log_response("mozilla-central-macosx64-debug-bm65-build1-build15.txt.gz")
|
||||
job_data = {
|
||||
'project': test_repository.name,
|
||||
'revision': push_stored[0]['revision'],
|
||||
'job': {
|
||||
'job_guid': job_guid,
|
||||
'state': 'completed',
|
||||
'log_references': [
|
||||
{'url': log_url, 'name': 'live_backing_log', 'parse_status': 'pending'}
|
||||
"project": test_repository.name,
|
||||
"revision": push_stored[0]["revision"],
|
||||
"job": {
|
||||
"job_guid": job_guid,
|
||||
"state": "completed",
|
||||
"log_references": [
|
||||
{"url": log_url, "name": "live_backing_log", "parse_status": "pending"}
|
||||
],
|
||||
},
|
||||
}
|
||||
|
@ -93,11 +93,11 @@ def test_store_job_pending_to_completed_with_unparsed_log(
|
|||
|
||||
def test_store_job_with_tier(test_repository, failure_classifications, push_stored):
|
||||
"""test submitting a job with tier specified"""
|
||||
job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
|
||||
job_guid = "d22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33"
|
||||
job_data = {
|
||||
'project': test_repository.name,
|
||||
'revision': push_stored[0]['revision'],
|
||||
'job': {'job_guid': job_guid, 'state': 'completed', 'tier': 3},
|
||||
"project": test_repository.name,
|
||||
"revision": push_stored[0]["revision"],
|
||||
"job": {"job_guid": job_guid, "state": "completed", "tier": 3},
|
||||
}
|
||||
|
||||
store_job_data(test_repository, [job_data])
|
||||
|
@ -108,11 +108,11 @@ def test_store_job_with_tier(test_repository, failure_classifications, push_stor
|
|||
|
||||
def test_store_job_with_default_tier(test_repository, failure_classifications, push_stored):
|
||||
"""test submitting a job with no tier specified gets default"""
|
||||
job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
|
||||
job_guid = "d22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33"
|
||||
job_data = {
|
||||
'project': test_repository.name,
|
||||
'revision': push_stored[0]['revision'],
|
||||
'job': {'job_guid': job_guid, 'state': 'completed'},
|
||||
"project": test_repository.name,
|
||||
"revision": push_stored[0]["revision"],
|
||||
"job": {"job_guid": job_guid, "state": "completed"},
|
||||
}
|
||||
|
||||
store_job_data(test_repository, [job_data])
|
||||
|
|
|
@ -6,9 +6,9 @@ def test_pending_job_available(test_repository, pending_jobs_stored, client):
|
|||
assert resp.status_code == 200
|
||||
jobs = resp.json()
|
||||
|
||||
assert len(jobs['results']) == 1
|
||||
assert len(jobs["results"]) == 1
|
||||
|
||||
assert jobs['results'][0]['state'] == 'pending'
|
||||
assert jobs["results"][0]["state"] == "pending"
|
||||
|
||||
|
||||
def test_running_job_available(test_repository, running_jobs_stored, client):
|
||||
|
@ -16,9 +16,9 @@ def test_running_job_available(test_repository, running_jobs_stored, client):
|
|||
assert resp.status_code == 200
|
||||
jobs = resp.json()
|
||||
|
||||
assert len(jobs['results']) == 1
|
||||
assert len(jobs["results"]) == 1
|
||||
|
||||
assert jobs['results'][0]['state'] == 'running'
|
||||
assert jobs["results"][0]["state"] == "running"
|
||||
|
||||
|
||||
def test_completed_job_available(test_repository, completed_jobs_stored, client):
|
||||
|
@ -26,8 +26,8 @@ def test_completed_job_available(test_repository, completed_jobs_stored, client)
|
|||
assert resp.status_code == 200
|
||||
jobs = resp.json()
|
||||
|
||||
assert len(jobs['results']) == 1
|
||||
assert jobs['results'][0]['state'] == 'completed'
|
||||
assert len(jobs["results"]) == 1
|
||||
assert jobs["results"][0]["state"] == "completed"
|
||||
|
||||
|
||||
def test_pending_stored_to_running_loaded(
|
||||
|
@ -42,8 +42,8 @@ def test_pending_stored_to_running_loaded(
|
|||
assert resp.status_code == 200
|
||||
jobs = resp.json()
|
||||
|
||||
assert len(jobs['results']) == 1
|
||||
assert jobs['results'][0]['state'] == 'running'
|
||||
assert len(jobs["results"]) == 1
|
||||
assert jobs["results"][0]["state"] == "running"
|
||||
|
||||
|
||||
def test_finished_job_to_running(
|
||||
|
@ -56,8 +56,8 @@ def test_finished_job_to_running(
|
|||
assert resp.status_code == 200
|
||||
jobs = resp.json()
|
||||
|
||||
assert len(jobs['results']) == 1
|
||||
assert jobs['results'][0]['state'] == 'completed'
|
||||
assert len(jobs["results"]) == 1
|
||||
assert jobs["results"][0]["state"] == "completed"
|
||||
|
||||
|
||||
def test_running_job_to_pending(test_repository, running_jobs_stored, pending_jobs_stored, client):
|
||||
|
@ -69,5 +69,5 @@ def test_running_job_to_pending(test_repository, running_jobs_stored, pending_jo
|
|||
assert resp.status_code == 200
|
||||
jobs = resp.json()
|
||||
|
||||
assert len(jobs['results']) == 1
|
||||
assert jobs['results'][0]['state'] == 'running'
|
||||
assert len(jobs["results"]) == 1
|
||||
assert jobs["results"][0]["state"] == "running"
|
||||
|
|
|
@ -10,8 +10,8 @@ from treeherder.model.models import Push
|
|||
def perf_push(test_repository):
|
||||
return Push.objects.create(
|
||||
repository=test_repository,
|
||||
revision='1234abcd',
|
||||
author='foo@bar.com',
|
||||
revision="1234abcd",
|
||||
author="foo@bar.com",
|
||||
time=datetime.datetime.now(),
|
||||
)
|
||||
|
||||
|
@ -19,5 +19,5 @@ def perf_push(test_repository):
|
|||
@pytest.fixture
|
||||
def perf_job(perf_push, failure_classifications, generic_reference_data):
|
||||
return create_generic_job(
|
||||
'myfunguid', perf_push.repository, perf_push.id, generic_reference_data
|
||||
"myfunguid", perf_push.repository, perf_push.id, generic_reference_data
|
||||
)
|
||||
|
|
|
@ -34,7 +34,7 @@ def test_bz_reopen_bugs(request, mock_bugzilla_reopen_request, client, test_job,
|
|||
incomplete_bugs[0],
|
||||
incomplete_bugs[2],
|
||||
]:
|
||||
submit_obj = {u"job_id": test_job.id, u"bug_id": bug.id, u"type": u"manual"}
|
||||
submit_obj = {"job_id": test_job.id, "bug_id": bug.id, "type": "manual"}
|
||||
|
||||
client.post(
|
||||
reverse("bug-job-map-list", kwargs={"project": test_job.repository.name}),
|
||||
|
@ -44,12 +44,12 @@ def test_bz_reopen_bugs(request, mock_bugzilla_reopen_request, client, test_job,
|
|||
process = BzApiBugProcess()
|
||||
process.run()
|
||||
|
||||
reopened_bugs = request.config.cache.get('reopened_bugs', None)
|
||||
reopened_bugs = request.config.cache.get("reopened_bugs", None)
|
||||
|
||||
import json
|
||||
|
||||
EXPECTED_REOPEN_ATTEMPTS = {
|
||||
'https://thisisnotbugzilla.org/rest/bug/202': json.dumps(
|
||||
"https://thisisnotbugzilla.org/rest/bug/202": json.dumps(
|
||||
{
|
||||
"status": "REOPENED",
|
||||
"comment": {
|
||||
|
@ -58,7 +58,7 @@ def test_bz_reopen_bugs(request, mock_bugzilla_reopen_request, client, test_job,
|
|||
"comment_tags": "treeherder",
|
||||
}
|
||||
),
|
||||
'https://thisisnotbugzilla.org/rest/bug/404': json.dumps(
|
||||
"https://thisisnotbugzilla.org/rest/bug/404": json.dumps(
|
||||
{
|
||||
"status": "REOPENED",
|
||||
"comment": {
|
||||
|
|
|
@ -21,80 +21,80 @@ from treeherder.model.models import (
|
|||
)
|
||||
|
||||
DEFAULT_GTD_CONFIG = {
|
||||
'json': {
|
||||
'routes': ['index.project.mozci.classification.autoland.revision.A35mWTRuQmyj88yMnIF0fA']
|
||||
"json": {
|
||||
"routes": ["index.project.mozci.classification.autoland.revision.A35mWTRuQmyj88yMnIF0fA"]
|
||||
},
|
||||
'content_type': 'application/json',
|
||||
'status': 200,
|
||||
"content_type": "application/json",
|
||||
"status": 200,
|
||||
}
|
||||
DEFAULT_DA_CONFIG = {
|
||||
'json': {
|
||||
'push': {
|
||||
'id': 'autoland/c73bcc465e0c2bce7debb0a86277e2dcb27444e4',
|
||||
'classification': 'GOOD',
|
||||
"json": {
|
||||
"push": {
|
||||
"id": "autoland/c73bcc465e0c2bce7debb0a86277e2dcb27444e4",
|
||||
"classification": "GOOD",
|
||||
},
|
||||
'failures': {
|
||||
'real': {},
|
||||
'intermittent': {
|
||||
'testing/web-platform/tests/webdriver/tests/element_click': [],
|
||||
'devtools/client/framework/test/browser.ini': [
|
||||
"failures": {
|
||||
"real": {},
|
||||
"intermittent": {
|
||||
"testing/web-platform/tests/webdriver/tests/element_click": [],
|
||||
"devtools/client/framework/test/browser.ini": [
|
||||
{
|
||||
'task_id': 'V3SVuxO8TFy37En_6HcXLs',
|
||||
'label': 'test-linux1804-64-qr/opt-mochitest-devtools-chrome-dt-no-eft-nofis-e10s-1',
|
||||
"task_id": "V3SVuxO8TFy37En_6HcXLs",
|
||||
"label": "test-linux1804-64-qr/opt-mochitest-devtools-chrome-dt-no-eft-nofis-e10s-1",
|
||||
# autoclassify is True, there is a cached bug test1.js => autoclassification with one associated bug
|
||||
'autoclassify': True,
|
||||
'tests': ['devtools/client/framework/test/test1.js'],
|
||||
"autoclassify": True,
|
||||
"tests": ["devtools/client/framework/test/test1.js"],
|
||||
},
|
||||
{
|
||||
'task_id': 'FJtjczXfTAGClIl6wNBo9g',
|
||||
'label': 'test-linux1804-64-qr/opt-mochitest-devtools-chrome-dt-no-eft-nofis-e10s-2',
|
||||
"task_id": "FJtjczXfTAGClIl6wNBo9g",
|
||||
"label": "test-linux1804-64-qr/opt-mochitest-devtools-chrome-dt-no-eft-nofis-e10s-2",
|
||||
# autoclassify is True, there are two cached bugs test1.js and test2.js => autoclassification with two associated bugs
|
||||
'autoclassify': True,
|
||||
'tests': [
|
||||
'devtools/client/framework/test/test1.js',
|
||||
'devtools/client/framework/test/test2.js',
|
||||
"autoclassify": True,
|
||||
"tests": [
|
||||
"devtools/client/framework/test/test1.js",
|
||||
"devtools/client/framework/test/test2.js",
|
||||
],
|
||||
},
|
||||
],
|
||||
'devtools/client/framework/test2/browser.ini': [
|
||||
"devtools/client/framework/test2/browser.ini": [
|
||||
{
|
||||
'task_id': 'RutlNkofzrbTnbauRSTJWc',
|
||||
'label': 'test-linux1804-64-qr/opt-mochitest-devtools-chrome-dt-no-eft-nofis-e10s-3',
|
||||
"task_id": "RutlNkofzrbTnbauRSTJWc",
|
||||
"label": "test-linux1804-64-qr/opt-mochitest-devtools-chrome-dt-no-eft-nofis-e10s-3",
|
||||
# autoclassify is False, there is a cached bug for test1.js => no autoclassification
|
||||
'autoclassify': False,
|
||||
'tests': ['devtools/client/framework/test/test1.js'],
|
||||
"autoclassify": False,
|
||||
"tests": ["devtools/client/framework/test/test1.js"],
|
||||
},
|
||||
{
|
||||
'task_id': 'HTZJyyQLalgtOkbwDBxChF',
|
||||
'label': 'test-linux1804-64-qr/opt-mochitest-devtools-chrome-dt-no-eft-nofis-e10s-4',
|
||||
"task_id": "HTZJyyQLalgtOkbwDBxChF",
|
||||
"label": "test-linux1804-64-qr/opt-mochitest-devtools-chrome-dt-no-eft-nofis-e10s-4",
|
||||
# Even if autoclassify is True, there is no cached bug for test3.js => no autoclassification
|
||||
'autoclassify': True,
|
||||
'tests': ['devtools/client/framework/test/test3.js'],
|
||||
"autoclassify": True,
|
||||
"tests": ["devtools/client/framework/test/test3.js"],
|
||||
},
|
||||
],
|
||||
},
|
||||
'unknown': {},
|
||||
"unknown": {},
|
||||
},
|
||||
},
|
||||
'content_type': 'application/json',
|
||||
'status': 200,
|
||||
"content_type": "application/json",
|
||||
"status": 200,
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def autoland_repository():
|
||||
group = RepositoryGroup.objects.create(name='development')
|
||||
group = RepositoryGroup.objects.create(name="development")
|
||||
|
||||
return Repository.objects.create(
|
||||
dvcs_type='hg',
|
||||
name='autoland',
|
||||
url='https://hg.mozilla.org/integration/autoland',
|
||||
active_status='active',
|
||||
codebase='gecko',
|
||||
dvcs_type="hg",
|
||||
name="autoland",
|
||||
url="https://hg.mozilla.org/integration/autoland",
|
||||
active_status="active",
|
||||
codebase="gecko",
|
||||
repository_group=group,
|
||||
performance_alerts_enabled=True,
|
||||
expire_performance_data=False,
|
||||
tc_root_url='https://firefox-ci-tc.services.mozilla.com',
|
||||
tc_root_url="https://firefox-ci-tc.services.mozilla.com",
|
||||
)
|
||||
|
||||
|
||||
|
@ -102,8 +102,8 @@ def autoland_repository():
|
|||
def autoland_push(autoland_repository):
|
||||
return Push.objects.create(
|
||||
repository=autoland_repository,
|
||||
revision='A35mWTRuQmyj88yMnIF0fA',
|
||||
author='foo@bar.com',
|
||||
revision="A35mWTRuQmyj88yMnIF0fA",
|
||||
author="foo@bar.com",
|
||||
time=datetime.datetime.now(),
|
||||
)
|
||||
|
||||
|
@ -114,39 +114,39 @@ def populate_bugscache():
|
|||
[
|
||||
Bugscache(
|
||||
id=1234567,
|
||||
status='NEW',
|
||||
summary='intermittent devtools/client/framework/test/test1.js | single tracking bug',
|
||||
modified='2014-01-01 00:00:00',
|
||||
status="NEW",
|
||||
summary="intermittent devtools/client/framework/test/test1.js | single tracking bug",
|
||||
modified="2014-01-01 00:00:00",
|
||||
),
|
||||
Bugscache(
|
||||
id=2345678,
|
||||
status='NEW',
|
||||
summary='intermittent devtools/client/framework/test/test2.js | single tracking bug',
|
||||
modified='2014-01-01 00:00:00',
|
||||
status="NEW",
|
||||
summary="intermittent devtools/client/framework/test/test2.js | single tracking bug",
|
||||
modified="2014-01-01 00:00:00",
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'mode, route',
|
||||
"mode, route",
|
||||
[
|
||||
('production', 'completely bad route'),
|
||||
('production', 'index.project.mozci.classification..revision.A35mWTRuQmyj88yMnIF0fA'),
|
||||
('production', 'index.project.mozci.classification.autoland.revision.'),
|
||||
("production", "completely bad route"),
|
||||
("production", "index.project.mozci.classification..revision.A35mWTRuQmyj88yMnIF0fA"),
|
||||
("production", "index.project.mozci.classification.autoland.revision."),
|
||||
(
|
||||
'production',
|
||||
'index.project.mozci.classification.autoland.revision.-35mW@RuQ__j88yénIF0f-',
|
||||
"production",
|
||||
"index.project.mozci.classification.autoland.revision.-35mW@RuQ__j88yénIF0f-",
|
||||
),
|
||||
(
|
||||
'production',
|
||||
'index.project.mozci.testing.classification.autoland.revision.A35mWTRuQmyj88yMnIF0fA',
|
||||
"production",
|
||||
"index.project.mozci.testing.classification.autoland.revision.A35mWTRuQmyj88yMnIF0fA",
|
||||
),
|
||||
('testing', 'index.project.mozci.classification.autoland.revision.A35mWTRuQmyj88yMnIF0fA'),
|
||||
("testing", "index.project.mozci.classification.autoland.revision.A35mWTRuQmyj88yMnIF0fA"),
|
||||
],
|
||||
)
|
||||
def test_get_push_wrong_route(mode, route, monkeypatch):
|
||||
monkeypatch.setenv('PULSE_MOZCI_ENVIRONMENT', mode)
|
||||
monkeypatch.setenv("PULSE_MOZCI_ENVIRONMENT", mode)
|
||||
|
||||
with pytest.raises(AttributeError):
|
||||
ClassificationLoader().get_push(route)
|
||||
|
@ -154,66 +154,66 @@ def test_get_push_wrong_route(mode, route, monkeypatch):
|
|||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize(
|
||||
'mode, route',
|
||||
"mode, route",
|
||||
[
|
||||
(
|
||||
'production',
|
||||
'index.project.mozci.classification.autoland.revision.A35mWTRuQmyj88yMnIF0fA',
|
||||
"production",
|
||||
"index.project.mozci.classification.autoland.revision.A35mWTRuQmyj88yMnIF0fA",
|
||||
),
|
||||
(
|
||||
'testing',
|
||||
'index.project.mozci.testing.classification.autoland.revision.A35mWTRuQmyj88yMnIF0fA',
|
||||
"testing",
|
||||
"index.project.mozci.testing.classification.autoland.revision.A35mWTRuQmyj88yMnIF0fA",
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_get_push_unsupported_project(mode, route, monkeypatch):
|
||||
monkeypatch.setenv('PULSE_MOZCI_ENVIRONMENT', mode)
|
||||
monkeypatch.setenv("PULSE_MOZCI_ENVIRONMENT", mode)
|
||||
|
||||
with pytest.raises(Repository.DoesNotExist) as e:
|
||||
ClassificationLoader().get_push(route)
|
||||
|
||||
assert str(e.value) == 'Repository matching query does not exist.'
|
||||
assert str(e.value) == "Repository matching query does not exist."
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize(
|
||||
'mode, route',
|
||||
"mode, route",
|
||||
[
|
||||
(
|
||||
'production',
|
||||
'index.project.mozci.classification.autoland.revision.A35mWTRuQmyj88yMnIF0fA',
|
||||
"production",
|
||||
"index.project.mozci.classification.autoland.revision.A35mWTRuQmyj88yMnIF0fA",
|
||||
),
|
||||
(
|
||||
'testing',
|
||||
'index.project.mozci.testing.classification.autoland.revision.A35mWTRuQmyj88yMnIF0fA',
|
||||
"testing",
|
||||
"index.project.mozci.testing.classification.autoland.revision.A35mWTRuQmyj88yMnIF0fA",
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_get_push_unsupported_revision(mode, route, autoland_repository, monkeypatch):
|
||||
monkeypatch.setenv('PULSE_MOZCI_ENVIRONMENT', mode)
|
||||
monkeypatch.setenv("PULSE_MOZCI_ENVIRONMENT", mode)
|
||||
|
||||
with pytest.raises(Push.DoesNotExist) as e:
|
||||
ClassificationLoader().get_push(route)
|
||||
|
||||
assert str(e.value) == 'Push matching query does not exist.'
|
||||
assert str(e.value) == "Push matching query does not exist."
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize(
|
||||
'mode, route',
|
||||
"mode, route",
|
||||
[
|
||||
(
|
||||
'production',
|
||||
'index.project.mozci.classification.autoland.revision.A35mWTRuQmyj88yMnIF0fA',
|
||||
"production",
|
||||
"index.project.mozci.classification.autoland.revision.A35mWTRuQmyj88yMnIF0fA",
|
||||
),
|
||||
(
|
||||
'testing',
|
||||
'index.project.mozci.testing.classification.autoland.revision.A35mWTRuQmyj88yMnIF0fA',
|
||||
"testing",
|
||||
"index.project.mozci.testing.classification.autoland.revision.A35mWTRuQmyj88yMnIF0fA",
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_get_push(mode, route, autoland_push, monkeypatch):
|
||||
monkeypatch.setenv('PULSE_MOZCI_ENVIRONMENT', mode)
|
||||
monkeypatch.setenv("PULSE_MOZCI_ENVIRONMENT", mode)
|
||||
|
||||
assert ClassificationLoader().get_push(route) == autoland_push
|
||||
|
||||
|
@ -226,51 +226,51 @@ def update_dict(dict, update):
|
|||
@responses.activate
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize(
|
||||
'error_type, error_message, get_task_definition_config, get_push_error, download_artifact_config',
|
||||
"error_type, error_message, get_task_definition_config, get_push_error, download_artifact_config",
|
||||
[
|
||||
[HTTPError, '', {'status': 500}, None, DEFAULT_DA_CONFIG],
|
||||
[HTTPError, "", {"status": 500}, None, DEFAULT_DA_CONFIG],
|
||||
[
|
||||
AssertionError,
|
||||
'A route containing the push project and revision is needed to save the mozci classification',
|
||||
update_dict({**DEFAULT_GTD_CONFIG}, {'json': {}}),
|
||||
"A route containing the push project and revision is needed to save the mozci classification",
|
||||
update_dict({**DEFAULT_GTD_CONFIG}, {"json": {}}),
|
||||
None,
|
||||
DEFAULT_DA_CONFIG,
|
||||
],
|
||||
[
|
||||
AssertionError,
|
||||
'A route containing the push project and revision is needed to save the mozci classification',
|
||||
update_dict({**DEFAULT_GTD_CONFIG}, {'json': {'routes': []}}),
|
||||
"A route containing the push project and revision is needed to save the mozci classification",
|
||||
update_dict({**DEFAULT_GTD_CONFIG}, {"json": {"routes": []}}),
|
||||
None,
|
||||
DEFAULT_DA_CONFIG,
|
||||
],
|
||||
[
|
||||
AttributeError,
|
||||
None,
|
||||
update_dict({**DEFAULT_GTD_CONFIG}, {'json': {'routes': ['bad route']}}),
|
||||
update_dict({**DEFAULT_GTD_CONFIG}, {"json": {"routes": ["bad route"]}}),
|
||||
None,
|
||||
DEFAULT_DA_CONFIG,
|
||||
],
|
||||
[None, None, DEFAULT_GTD_CONFIG, Repository.DoesNotExist, DEFAULT_DA_CONFIG],
|
||||
[
|
||||
Push.DoesNotExist,
|
||||
'Push matching query does not exist.',
|
||||
"Push matching query does not exist.",
|
||||
DEFAULT_GTD_CONFIG,
|
||||
Push.DoesNotExist,
|
||||
DEFAULT_DA_CONFIG,
|
||||
],
|
||||
[HTTPError, '', DEFAULT_GTD_CONFIG, None, {'status': 500}],
|
||||
[HTTPError, "", DEFAULT_GTD_CONFIG, None, {"status": 500}],
|
||||
[
|
||||
AssertionError,
|
||||
'Classification result should be a value in BAD, GOOD, UNKNOWN',
|
||||
"Classification result should be a value in BAD, GOOD, UNKNOWN",
|
||||
DEFAULT_GTD_CONFIG,
|
||||
None,
|
||||
update_dict(
|
||||
{**DEFAULT_DA_CONFIG},
|
||||
{
|
||||
'json': {
|
||||
'push': {
|
||||
'id': 'autoland/c73bcc465e0c2bce7debb0a86277e2dcb27444e4',
|
||||
'classification': 'WRONG',
|
||||
"json": {
|
||||
"push": {
|
||||
"id": "autoland/c73bcc465e0c2bce7debb0a86277e2dcb27444e4",
|
||||
"classification": "WRONG",
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -287,17 +287,17 @@ def test_process_handle_errors(
|
|||
get_push_error,
|
||||
download_artifact_config,
|
||||
):
|
||||
root_url = 'https://community-tc.services.mozilla.com'
|
||||
task_id = 'A35mWTRuQmyj88yMnIF0fA'
|
||||
root_url = "https://community-tc.services.mozilla.com"
|
||||
task_id = "A35mWTRuQmyj88yMnIF0fA"
|
||||
|
||||
responses.add(
|
||||
responses.GET,
|
||||
f'{root_url}/api/queue/v1/task/{task_id}',
|
||||
f"{root_url}/api/queue/v1/task/{task_id}",
|
||||
**get_task_definition_config,
|
||||
)
|
||||
responses.add(
|
||||
responses.GET,
|
||||
f'{root_url}/api/queue/v1/task/{task_id}/artifacts/public/classification.json',
|
||||
f"{root_url}/api/queue/v1/task/{task_id}/artifacts/public/classification.json",
|
||||
**download_artifact_config,
|
||||
)
|
||||
|
||||
|
@ -306,17 +306,17 @@ def test_process_handle_errors(
|
|||
def mock_get_push(x, y):
|
||||
raise get_push_error(error_message)
|
||||
|
||||
monkeypatch.setattr(ClassificationLoader, 'get_push', mock_get_push)
|
||||
monkeypatch.setattr(ClassificationLoader, "get_push", mock_get_push)
|
||||
|
||||
assert MozciClassification.objects.count() == 0
|
||||
|
||||
if error_type:
|
||||
with pytest.raises(error_type) as e:
|
||||
ClassificationLoader().process({'status': {'taskId': task_id}}, root_url)
|
||||
ClassificationLoader().process({"status": {"taskId": task_id}}, root_url)
|
||||
if error_message:
|
||||
assert str(e.value) == error_message
|
||||
else:
|
||||
ClassificationLoader().process({'status': {'taskId': task_id}}, root_url)
|
||||
ClassificationLoader().process({"status": {"taskId": task_id}}, root_url)
|
||||
|
||||
assert MozciClassification.objects.count() == 0
|
||||
|
||||
|
@ -324,28 +324,28 @@ def test_process_handle_errors(
|
|||
@responses.activate
|
||||
@pytest.mark.django_db
|
||||
def test_process_missing_failureclassification(autoland_push, test_two_jobs_tc_metadata):
|
||||
root_url = 'https://community-tc.services.mozilla.com'
|
||||
task_id = 'A35mWTRuQmyj88yMnIF0fA'
|
||||
root_url = "https://community-tc.services.mozilla.com"
|
||||
task_id = "A35mWTRuQmyj88yMnIF0fA"
|
||||
|
||||
responses.add(responses.GET, f'{root_url}/api/queue/v1/task/{task_id}', **DEFAULT_GTD_CONFIG)
|
||||
responses.add(responses.GET, f"{root_url}/api/queue/v1/task/{task_id}", **DEFAULT_GTD_CONFIG)
|
||||
responses.add(
|
||||
responses.GET,
|
||||
f'{root_url}/api/queue/v1/task/{task_id}/artifacts/public/classification.json',
|
||||
f"{root_url}/api/queue/v1/task/{task_id}/artifacts/public/classification.json",
|
||||
**DEFAULT_DA_CONFIG,
|
||||
)
|
||||
|
||||
assert MozciClassification.objects.count() == 0
|
||||
first_job, second_job = test_two_jobs_tc_metadata
|
||||
assert first_job.failure_classification.name == 'not classified'
|
||||
assert second_job.failure_classification.name == 'not classified'
|
||||
assert first_job.failure_classification.name == "not classified"
|
||||
assert second_job.failure_classification.name == "not classified"
|
||||
assert JobNote.objects.count() == 0
|
||||
assert BugJobMap.objects.count() == 0
|
||||
|
||||
FailureClassification.objects.filter(name='autoclassified intermittent').delete()
|
||||
FailureClassification.objects.filter(name="autoclassified intermittent").delete()
|
||||
with pytest.raises(FailureClassification.DoesNotExist) as e:
|
||||
ClassificationLoader().process({'status': {'taskId': task_id}}, root_url)
|
||||
ClassificationLoader().process({"status": {"taskId": task_id}}, root_url)
|
||||
|
||||
assert str(e.value) == 'FailureClassification matching query does not exist.'
|
||||
assert str(e.value) == "FailureClassification matching query does not exist."
|
||||
|
||||
assert MozciClassification.objects.count() == 1
|
||||
classification = MozciClassification.objects.first()
|
||||
|
@ -356,8 +356,8 @@ def test_process_missing_failureclassification(autoland_push, test_two_jobs_tc_m
|
|||
# Did not autoclassify since the requested FailureClassification was not found
|
||||
first_job.refresh_from_db()
|
||||
second_job.refresh_from_db()
|
||||
assert first_job.failure_classification.name == 'not classified'
|
||||
assert second_job.failure_classification.name == 'not classified'
|
||||
assert first_job.failure_classification.name == "not classified"
|
||||
assert second_job.failure_classification.name == "not classified"
|
||||
assert JobNote.objects.count() == 0
|
||||
assert BugJobMap.objects.count() == 0
|
||||
|
||||
|
@ -365,19 +365,19 @@ def test_process_missing_failureclassification(autoland_push, test_two_jobs_tc_m
|
|||
@responses.activate
|
||||
@pytest.mark.django_db
|
||||
def test_process(autoland_push, test_two_jobs_tc_metadata, populate_bugscache):
|
||||
root_url = 'https://community-tc.services.mozilla.com'
|
||||
task_id = 'A35mWTRuQmyj88yMnIF0fA'
|
||||
root_url = "https://community-tc.services.mozilla.com"
|
||||
task_id = "A35mWTRuQmyj88yMnIF0fA"
|
||||
|
||||
responses.add(responses.GET, f'{root_url}/api/queue/v1/task/{task_id}', **DEFAULT_GTD_CONFIG)
|
||||
responses.add(responses.GET, f"{root_url}/api/queue/v1/task/{task_id}", **DEFAULT_GTD_CONFIG)
|
||||
responses.add(
|
||||
responses.GET,
|
||||
f'{root_url}/api/queue/v1/task/{task_id}/artifacts/public/classification.json',
|
||||
f"{root_url}/api/queue/v1/task/{task_id}/artifacts/public/classification.json",
|
||||
**DEFAULT_DA_CONFIG,
|
||||
)
|
||||
|
||||
assert MozciClassification.objects.count() == 0
|
||||
|
||||
ClassificationLoader().process({'status': {'taskId': task_id}}, root_url)
|
||||
ClassificationLoader().process({"status": {"taskId": task_id}}, root_url)
|
||||
|
||||
assert MozciClassification.objects.count() == 1
|
||||
classification = MozciClassification.objects.first()
|
||||
|
@ -386,7 +386,7 @@ def test_process(autoland_push, test_two_jobs_tc_metadata, populate_bugscache):
|
|||
assert classification.task_id == task_id
|
||||
|
||||
autoclassified_intermittent = FailureClassification.objects.get(
|
||||
name='autoclassified intermittent'
|
||||
name="autoclassified intermittent"
|
||||
)
|
||||
first_bug, second_bug = populate_bugscache
|
||||
|
||||
|
@ -407,7 +407,7 @@ def test_process(autoland_push, test_two_jobs_tc_metadata, populate_bugscache):
|
|||
).exists()
|
||||
maps = BugJobMap.objects.filter(job=second_job)
|
||||
assert maps.count() == 2
|
||||
assert list(maps.values_list('bug_id', flat=True)) == [first_bug.id, second_bug.id]
|
||||
assert list(maps.values_list("bug_id", flat=True)) == [first_bug.id, second_bug.id]
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
|
@ -416,41 +416,41 @@ def test_autoclassify_failures_missing_job(failure_classifications, populate_bug
|
|||
assert BugJobMap.objects.count() == 0
|
||||
|
||||
intermittents = {
|
||||
'group1': [
|
||||
"group1": [
|
||||
{
|
||||
'task_id': 'unknown_task_id',
|
||||
'label': 'unknown_task',
|
||||
"task_id": "unknown_task_id",
|
||||
"label": "unknown_task",
|
||||
# Should be autoclassified if a matching Job exists
|
||||
'autoclassify': True,
|
||||
'tests': ['devtools/client/framework/test/test1.js'],
|
||||
"autoclassify": True,
|
||||
"tests": ["devtools/client/framework/test/test1.js"],
|
||||
}
|
||||
]
|
||||
}
|
||||
with pytest.raises(Job.DoesNotExist) as e:
|
||||
ClassificationLoader().autoclassify_failures(
|
||||
intermittents, FailureClassification.objects.get(name='autoclassified intermittent')
|
||||
intermittents, FailureClassification.objects.get(name="autoclassified intermittent")
|
||||
)
|
||||
|
||||
assert str(e.value) == 'Job matching query does not exist.'
|
||||
assert str(e.value) == "Job matching query does not exist."
|
||||
|
||||
assert JobNote.objects.count() == 0
|
||||
assert BugJobMap.objects.count() == 0
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('existing_classification', [False, True])
|
||||
@pytest.mark.parametrize("existing_classification", [False, True])
|
||||
def test_autoclassify_failures(
|
||||
existing_classification, test_two_jobs_tc_metadata, test_sheriff, populate_bugscache
|
||||
):
|
||||
first_job, second_job = test_two_jobs_tc_metadata
|
||||
assert first_job.failure_classification.name == 'not classified'
|
||||
assert second_job.failure_classification.name == 'not classified'
|
||||
assert first_job.failure_classification.name == "not classified"
|
||||
assert second_job.failure_classification.name == "not classified"
|
||||
assert JobNote.objects.count() == 0
|
||||
assert BugJobMap.objects.count() == 0
|
||||
|
||||
intermittent = FailureClassification.objects.get(name='intermittent')
|
||||
intermittent = FailureClassification.objects.get(name="intermittent")
|
||||
autoclassified_intermittent = FailureClassification.objects.get(
|
||||
name='autoclassified intermittent'
|
||||
name="autoclassified intermittent"
|
||||
)
|
||||
|
||||
if existing_classification:
|
||||
|
@ -463,7 +463,7 @@ def test_autoclassify_failures(
|
|||
assert JobNote.objects.count() == 1
|
||||
|
||||
ClassificationLoader().autoclassify_failures(
|
||||
DEFAULT_DA_CONFIG['json']['failures']['intermittent'], autoclassified_intermittent
|
||||
DEFAULT_DA_CONFIG["json"]["failures"]["intermittent"], autoclassified_intermittent
|
||||
)
|
||||
|
||||
first_bug, second_bug = populate_bugscache
|
||||
|
@ -484,11 +484,11 @@ def test_autoclassify_failures(
|
|||
if existing_classification
|
||||
else autoclassified_intermittent
|
||||
)
|
||||
assert job_note.who == test_sheriff.email if existing_classification else 'autoclassifier'
|
||||
assert job_note.who == test_sheriff.email if existing_classification else "autoclassifier"
|
||||
assert (
|
||||
job_note.text == "Classified by a Sheriff"
|
||||
if existing_classification
|
||||
else 'Autoclassified by mozci bot as an intermittent failure'
|
||||
else "Autoclassified by mozci bot as an intermittent failure"
|
||||
)
|
||||
|
||||
if not existing_classification:
|
||||
|
@ -496,7 +496,7 @@ def test_autoclassify_failures(
|
|||
bug_job_map = BugJobMap.objects.filter(job=first_job).first()
|
||||
assert bug_job_map.job == first_job
|
||||
assert bug_job_map.bug_id == first_bug.id
|
||||
assert bug_job_map.who == 'autoclassifier'
|
||||
assert bug_job_map.who == "autoclassifier"
|
||||
|
||||
# Second job
|
||||
second_job.refresh_from_db()
|
||||
|
@ -506,14 +506,14 @@ def test_autoclassify_failures(
|
|||
job_note = JobNote.objects.filter(job=second_job).first()
|
||||
assert job_note.job == second_job
|
||||
assert job_note.failure_classification == autoclassified_intermittent
|
||||
assert job_note.who == 'autoclassifier'
|
||||
assert job_note.text == 'Autoclassified by mozci bot as an intermittent failure'
|
||||
assert job_note.who == "autoclassifier"
|
||||
assert job_note.text == "Autoclassified by mozci bot as an intermittent failure"
|
||||
|
||||
maps = BugJobMap.objects.filter(job=second_job)
|
||||
assert maps.count() == 2
|
||||
assert list(maps.values_list('job', flat=True)) == [second_job.id, second_job.id]
|
||||
assert list(maps.values_list('bug_id', flat=True)) == [first_bug.id, second_bug.id]
|
||||
assert [m.who for m in maps] == ['autoclassifier', 'autoclassifier']
|
||||
assert list(maps.values_list("job", flat=True)) == [second_job.id, second_job.id]
|
||||
assert list(maps.values_list("bug_id", flat=True)) == [first_bug.id, second_bug.id]
|
||||
assert [m.who for m in maps] == ["autoclassifier", "autoclassifier"]
|
||||
|
||||
assert JobNote.objects.count() == 2
|
||||
assert BugJobMap.objects.count() == 2 if existing_classification else 3
|
||||
|
@ -526,20 +526,20 @@ def test_new_classification(autoland_push, sample_data, test_two_jobs_tc_metadat
|
|||
first_job, second_job = test_two_jobs_tc_metadata
|
||||
artifact1 = sample_data.text_log_summary
|
||||
artifact1["job_id"] = first_job.id
|
||||
artifact1['job_guid'] = first_job.guid
|
||||
artifact1['blob'] = json.dumps(artifact1['blob'])
|
||||
artifact1["job_guid"] = first_job.guid
|
||||
artifact1["blob"] = json.dumps(artifact1["blob"])
|
||||
|
||||
artifact2 = copy.deepcopy(artifact1)
|
||||
artifact2["job_id"] = second_job.id
|
||||
artifact1['job_guid'] = second_job.guid
|
||||
artifact1["job_guid"] = second_job.guid
|
||||
store_job_artifacts([artifact1, artifact2])
|
||||
|
||||
# first is NEW
|
||||
second_job = Job.objects.get(id=1)
|
||||
first_job = Job.objects.get(id=2)
|
||||
assert first_job.failure_classification.name == 'intermittent needs filing'
|
||||
assert first_job.failure_classification.name == "intermittent needs filing"
|
||||
|
||||
# second instance is normal
|
||||
assert second_job.failure_classification.name == 'not classified'
|
||||
assert second_job.failure_classification.name == "not classified"
|
||||
|
||||
# annotate each job and ensure marked as intermittent
|
||||
|
|
|
@ -18,8 +18,8 @@ def test_ingest_single_sample_job(
|
|||
assert Job.objects.count() == 1
|
||||
job = Job.objects.get(id=1)
|
||||
# Ensure we don't inadvertently change the way we generate job-related hashes.
|
||||
assert job.option_collection_hash == '32faaecac742100f7753f0c1d0aa0add01b4046b'
|
||||
assert job.signature.signature == '5bb6ec49547193d8d9274232cd9de61fb4ef2e59'
|
||||
assert job.option_collection_hash == "32faaecac742100f7753f0c1d0aa0add01b4046b"
|
||||
assert job.signature.signature == "5bb6ec49547193d8d9274232cd9de61fb4ef2e59"
|
||||
|
||||
|
||||
def test_ingest_all_sample_jobs(
|
||||
|
@ -39,13 +39,13 @@ def test_ingest_twice_log_parsing_status_changed(
|
|||
verify that nothing changes"""
|
||||
job_data = sample_data.job_data[:1]
|
||||
|
||||
job_data[0]['job']['state'] = 'running'
|
||||
job_data[0]["job"]["state"] = "running"
|
||||
test_utils.do_job_ingestion(test_repository, job_data, sample_push)
|
||||
assert JobLog.objects.count() == 1
|
||||
for job_log in JobLog.objects.all():
|
||||
job_log.update_status(JobLog.FAILED)
|
||||
|
||||
job_data[0]['job']['state'] = 'completed'
|
||||
job_data[0]["job"]["state"] = "completed"
|
||||
test_utils.do_job_ingestion(test_repository, job_data, sample_push)
|
||||
assert JobLog.objects.count() == 1
|
||||
for job_log in JobLog.objects.all():
|
||||
|
@ -65,23 +65,23 @@ def test_ingest_running_to_retry_sample_job(
|
|||
store_push_data(test_repository, sample_push)
|
||||
|
||||
job_data = copy.deepcopy(sample_data.job_data[:1])
|
||||
job = job_data[0]['job']
|
||||
job_data[0]['revision'] = sample_push[0]['revision']
|
||||
job['state'] = 'running'
|
||||
job['result'] = 'unknown'
|
||||
job = job_data[0]["job"]
|
||||
job_data[0]["revision"] = sample_push[0]["revision"]
|
||||
job["state"] = "running"
|
||||
job["result"] = "unknown"
|
||||
|
||||
def _simulate_retry_job(job):
|
||||
job['state'] = 'completed'
|
||||
job['result'] = 'retry'
|
||||
job["state"] = "completed"
|
||||
job["result"] = "retry"
|
||||
# convert the job_guid to what it would be on a retry
|
||||
job['job_guid'] = job['job_guid'] + "_" + str(job['end_timestamp'])[-5:]
|
||||
job["job_guid"] = job["job_guid"] + "_" + str(job["end_timestamp"])[-5:]
|
||||
return job
|
||||
|
||||
if same_ingestion_cycle:
|
||||
# now we simulate the complete version of the job coming in (on the
|
||||
# same push)
|
||||
new_job_datum = copy.deepcopy(job_data[0])
|
||||
new_job_datum['job'] = _simulate_retry_job(new_job_datum['job'])
|
||||
new_job_datum["job"] = _simulate_retry_job(new_job_datum["job"])
|
||||
job_data.append(new_job_datum)
|
||||
store_job_data(test_repository, job_data)
|
||||
else:
|
||||
|
@ -95,9 +95,9 @@ def test_ingest_running_to_retry_sample_job(
|
|||
|
||||
assert Job.objects.count() == 1
|
||||
job = Job.objects.get(id=1)
|
||||
assert job.result == 'retry'
|
||||
assert job.result == "retry"
|
||||
# guid should be the retry one
|
||||
assert job.guid == job_data[-1]['job']['job_guid']
|
||||
assert job.guid == job_data[-1]["job"]["job_guid"]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
@ -115,29 +115,29 @@ def test_ingest_running_to_retry_to_success_sample_job(
|
|||
store_push_data(test_repository, sample_push)
|
||||
|
||||
job_datum = copy.deepcopy(sample_data.job_data[0])
|
||||
job_datum['revision'] = sample_push[0]['revision']
|
||||
job_datum["revision"] = sample_push[0]["revision"]
|
||||
|
||||
job = job_datum['job']
|
||||
job_guid_root = job['job_guid']
|
||||
job = job_datum["job"]
|
||||
job_guid_root = job["job_guid"]
|
||||
|
||||
job_data = []
|
||||
for state, result, job_guid in [
|
||||
('running', 'unknown', job_guid_root),
|
||||
('completed', 'retry', job_guid_root + "_" + str(job['end_timestamp'])[-5:]),
|
||||
('completed', 'success', job_guid_root),
|
||||
("running", "unknown", job_guid_root),
|
||||
("completed", "retry", job_guid_root + "_" + str(job["end_timestamp"])[-5:]),
|
||||
("completed", "success", job_guid_root),
|
||||
]:
|
||||
new_job_datum = copy.deepcopy(job_datum)
|
||||
new_job_datum['job']['state'] = state
|
||||
new_job_datum['job']['result'] = result
|
||||
new_job_datum['job']['job_guid'] = job_guid
|
||||
new_job_datum["job"]["state"] = state
|
||||
new_job_datum["job"]["result"] = result
|
||||
new_job_datum["job"]["job_guid"] = job_guid
|
||||
job_data.append(new_job_datum)
|
||||
|
||||
for i, j in ingestion_cycles:
|
||||
store_job_data(test_repository, job_data[i:j])
|
||||
|
||||
assert Job.objects.count() == 2
|
||||
assert Job.objects.get(id=1).result == 'retry'
|
||||
assert Job.objects.get(id=2).result == 'success'
|
||||
assert Job.objects.get(id=1).result == "retry"
|
||||
assert Job.objects.get(id=2).result == "success"
|
||||
assert JobLog.objects.count() == 2
|
||||
|
||||
|
||||
|
@ -159,22 +159,22 @@ def test_ingest_running_to_retry_to_success_sample_job_multiple_retries(
|
|||
store_push_data(test_repository, sample_push)
|
||||
|
||||
job_datum = copy.deepcopy(sample_data.job_data[0])
|
||||
job_datum['revision'] = sample_push[0]['revision']
|
||||
job_datum["revision"] = sample_push[0]["revision"]
|
||||
|
||||
job = job_datum['job']
|
||||
job_guid_root = job['job_guid']
|
||||
job = job_datum["job"]
|
||||
job_guid_root = job["job_guid"]
|
||||
|
||||
job_data = []
|
||||
for state, result, job_guid in [
|
||||
('running', 'unknown', job_guid_root),
|
||||
('completed', 'retry', job_guid_root + "_" + str(job['end_timestamp'])[-5:]),
|
||||
('completed', 'retry', job_guid_root + "_12345"),
|
||||
('completed', 'success', job_guid_root),
|
||||
("running", "unknown", job_guid_root),
|
||||
("completed", "retry", job_guid_root + "_" + str(job["end_timestamp"])[-5:]),
|
||||
("completed", "retry", job_guid_root + "_12345"),
|
||||
("completed", "success", job_guid_root),
|
||||
]:
|
||||
new_job_datum = copy.deepcopy(job_datum)
|
||||
new_job_datum['job']['state'] = state
|
||||
new_job_datum['job']['result'] = result
|
||||
new_job_datum['job']['job_guid'] = job_guid
|
||||
new_job_datum["job"]["state"] = state
|
||||
new_job_datum["job"]["result"] = result
|
||||
new_job_datum["job"]["job_guid"] = job_guid
|
||||
job_data.append(new_job_datum)
|
||||
|
||||
for i, j in ingestion_cycles:
|
||||
|
@ -182,9 +182,9 @@ def test_ingest_running_to_retry_to_success_sample_job_multiple_retries(
|
|||
store_job_data(test_repository, ins)
|
||||
|
||||
assert Job.objects.count() == 3
|
||||
assert Job.objects.get(id=1).result == 'retry'
|
||||
assert Job.objects.get(id=2).result == 'retry'
|
||||
assert Job.objects.get(id=3).result == 'success'
|
||||
assert Job.objects.get(id=1).result == "retry"
|
||||
assert Job.objects.get(id=2).result == "retry"
|
||||
assert Job.objects.get(id=3).result == "success"
|
||||
assert JobLog.objects.count() == 3
|
||||
|
||||
|
||||
|
@ -193,23 +193,23 @@ def test_ingest_retry_sample_job_no_running(
|
|||
):
|
||||
"""Process a single job structure in the job_data.txt file"""
|
||||
job_data = copy.deepcopy(sample_data.job_data[:1])
|
||||
job = job_data[0]['job']
|
||||
job_data[0]['revision'] = sample_push[0]['revision']
|
||||
job = job_data[0]["job"]
|
||||
job_data[0]["revision"] = sample_push[0]["revision"]
|
||||
|
||||
store_push_data(test_repository, sample_push)
|
||||
|
||||
# complete version of the job coming in
|
||||
job['state'] = 'completed'
|
||||
job['result'] = 'retry'
|
||||
job["state"] = "completed"
|
||||
job["result"] = "retry"
|
||||
# convert the job_guid to what it would be on a retry
|
||||
retry_guid = job['job_guid'] + "_" + str(job['end_timestamp'])[-5:]
|
||||
job['job_guid'] = retry_guid
|
||||
retry_guid = job["job_guid"] + "_" + str(job["end_timestamp"])[-5:]
|
||||
job["job_guid"] = retry_guid
|
||||
|
||||
store_job_data(test_repository, job_data)
|
||||
|
||||
assert Job.objects.count() == 1
|
||||
job = Job.objects.get(id=1)
|
||||
assert job.result == 'retry'
|
||||
assert job.result == "retry"
|
||||
assert job.guid == retry_guid
|
||||
|
||||
|
||||
|
@ -220,7 +220,7 @@ def test_bad_date_value_ingestion(
|
|||
Test ingesting a job blob with bad date value
|
||||
|
||||
"""
|
||||
blob = job_data(start_timestamp="foo", revision=sample_push[0]['revision'])
|
||||
blob = job_data(start_timestamp="foo", revision=sample_push[0]["revision"])
|
||||
|
||||
store_push_data(test_repository, sample_push[:1])
|
||||
store_job_data(test_repository, [blob])
|
||||
|
|
|
@ -45,7 +45,7 @@ def mock_artifact(taskId, runId, artifactName):
|
|||
responses.GET,
|
||||
baseUrl.format(taskId=taskId, runId=runId, artifactName=artifactName),
|
||||
body="",
|
||||
content_type='text/plain',
|
||||
content_type="text/plain",
|
||||
status=200,
|
||||
)
|
||||
|
||||
|
@ -100,7 +100,7 @@ def test_new_job_transformation(new_pulse_jobs, new_transformed_jobs, failure_cl
|
|||
(decoded_task_id, _) = job_guid.split("/")
|
||||
# As of slugid v2, slugid.encode() returns a string not bytestring under Python 3.
|
||||
taskId = slugid.encode(uuid.UUID(decoded_task_id))
|
||||
transformed_job = jl.process_job(message, 'https://firefox-ci-tc.services.mozilla.com')
|
||||
transformed_job = jl.process_job(message, "https://firefox-ci-tc.services.mozilla.com")
|
||||
# Not all messages from Taskcluster will be processed
|
||||
if transformed_job:
|
||||
assert new_transformed_jobs[taskId] == transformed_job
|
||||
|
@ -117,18 +117,18 @@ def test_ingest_pulse_jobs(
|
|||
revision = push_stored[0]["revision"]
|
||||
for job in pulse_jobs:
|
||||
job["origin"]["revision"] = revision
|
||||
jl.process_job(job, 'https://firefox-ci-tc.services.mozilla.com')
|
||||
jl.process_job(job, "https://firefox-ci-tc.services.mozilla.com")
|
||||
|
||||
jobs = Job.objects.all()
|
||||
assert len(jobs) == 5
|
||||
|
||||
assert [job.taskcluster_metadata for job in jobs]
|
||||
assert set(TaskclusterMetadata.objects.values_list('task_id', flat=True)) == set(
|
||||
assert set(TaskclusterMetadata.objects.values_list("task_id", flat=True)) == set(
|
||||
[
|
||||
'IYyscnNMTLuxzna7PNqUJQ',
|
||||
'XJCbbRQ6Sp-UL1lL-tw5ng',
|
||||
'ZsSzJQu3Q7q2MfehIBAzKQ',
|
||||
'bIzVZt9jQQKgvQYD3a2HQw',
|
||||
"IYyscnNMTLuxzna7PNqUJQ",
|
||||
"XJCbbRQ6Sp-UL1lL-tw5ng",
|
||||
"ZsSzJQu3Q7q2MfehIBAzKQ",
|
||||
"bIzVZt9jQQKgvQYD3a2HQw",
|
||||
]
|
||||
)
|
||||
|
||||
|
@ -165,7 +165,7 @@ def test_ingest_pulse_job_with_long_job_type_name(
|
|||
"jobName"
|
||||
] = "this is a very long string that exceeds the 100 character size that was the previous limit by just a little bit"
|
||||
job["origin"]["revision"] = revision
|
||||
jl.process_job(job, 'https://firefox-ci-tc.services.mozilla.com')
|
||||
jl.process_job(job, "https://firefox-ci-tc.services.mozilla.com")
|
||||
|
||||
jobs = Job.objects.all()
|
||||
assert len(jobs) == 1
|
||||
|
@ -184,14 +184,14 @@ def test_ingest_pending_pulse_job(
|
|||
revision = push_stored[0]["revision"]
|
||||
pulse_job["origin"]["revision"] = revision
|
||||
pulse_job["state"] = "pending"
|
||||
jl.process_job(pulse_job, 'https://firefox-ci-tc.services.mozilla.com')
|
||||
jl.process_job(pulse_job, "https://firefox-ci-tc.services.mozilla.com")
|
||||
|
||||
jobs = Job.objects.all()
|
||||
assert len(jobs) == 1
|
||||
|
||||
job = jobs[0]
|
||||
assert job.taskcluster_metadata
|
||||
assert job.taskcluster_metadata.task_id == 'IYyscnNMTLuxzna7PNqUJQ'
|
||||
assert job.taskcluster_metadata.task_id == "IYyscnNMTLuxzna7PNqUJQ"
|
||||
|
||||
# should not have processed any log or details for pending jobs
|
||||
assert JobLog.objects.count() == 2
|
||||
|
@ -211,7 +211,7 @@ def test_ingest_pulse_jobs_bad_project(
|
|||
job["origin"]["project"] = "ferd"
|
||||
|
||||
for pulse_job in pulse_jobs:
|
||||
jl.process_job(pulse_job, 'https://firefox-ci-tc.services.mozilla.com')
|
||||
jl.process_job(pulse_job, "https://firefox-ci-tc.services.mozilla.com")
|
||||
|
||||
# length of pulse jobs is 5, so one will be skipped due to bad project
|
||||
assert Job.objects.count() == 4
|
||||
|
@ -230,13 +230,13 @@ def test_ingest_pulse_jobs_with_missing_push(pulse_jobs):
|
|||
responses.GET,
|
||||
"https://firefox-ci-tc.services.mozilla.com/api/queue/v1/task/IYyscnNMTLuxzna7PNqUJQ",
|
||||
json={},
|
||||
content_type='application/json',
|
||||
content_type="application/json",
|
||||
status=200,
|
||||
)
|
||||
|
||||
with pytest.raises(ObjectDoesNotExist):
|
||||
for pulse_job in pulse_jobs:
|
||||
jl.process_job(pulse_job, 'https://firefox-ci-tc.services.mozilla.com')
|
||||
jl.process_job(pulse_job, "https://firefox-ci-tc.services.mozilla.com")
|
||||
|
||||
# if one job isn't ready, except on the whole batch. They'll retry as a
|
||||
# task after the timeout.
|
||||
|
@ -300,7 +300,7 @@ def test_transition_pending_retry_fail_stays_retry(
|
|||
def test_skip_unscheduled(first_job, failure_classifications, mock_log_parser):
|
||||
jl = JobLoader()
|
||||
first_job["state"] = "unscheduled"
|
||||
jl.process_job(first_job, 'https://firefox-ci-tc.services.mozilla.com')
|
||||
jl.process_job(first_job, "https://firefox-ci-tc.services.mozilla.com")
|
||||
|
||||
assert not Job.objects.count()
|
||||
|
||||
|
@ -310,10 +310,10 @@ def change_state_result(test_job, job_loader, new_state, new_result, exp_state,
|
|||
job = copy.deepcopy(test_job)
|
||||
job["state"] = new_state
|
||||
job["result"] = new_result
|
||||
if new_state == 'pending':
|
||||
if new_state == "pending":
|
||||
# pending jobs wouldn't have logs and our store_job_data doesn't
|
||||
# support it.
|
||||
del job['logs']
|
||||
del job["logs"]
|
||||
errorsummary_indices = [
|
||||
i
|
||||
for i, item in enumerate(job["jobInfo"].get("links", []))
|
||||
|
@ -322,7 +322,7 @@ def change_state_result(test_job, job_loader, new_state, new_result, exp_state,
|
|||
for index in errorsummary_indices:
|
||||
del job["jobInfo"]["links"][index]
|
||||
|
||||
job_loader.process_job(job, 'https://firefox-ci-tc.services.mozilla.com')
|
||||
job_loader.process_job(job, "https://firefox-ci-tc.services.mozilla.com")
|
||||
|
||||
assert Job.objects.count() == 1
|
||||
job = Job.objects.get(id=1)
|
||||
|
|
|
@ -7,7 +7,7 @@ from treeherder.etl.schema import get_json_schema
|
|||
# production Treeherder
|
||||
|
||||
|
||||
@pytest.mark.parametrize("group_symbol", ['?', 'A', 'Aries', 'Buri/Hamac', 'L10n', 'M-e10s'])
|
||||
@pytest.mark.parametrize("group_symbol", ["?", "A", "Aries", "Buri/Hamac", "L10n", "M-e10s"])
|
||||
def test_group_symbols(sample_data, group_symbol):
|
||||
"""
|
||||
Validate jobs against the schema with different group_symbol values
|
||||
|
@ -19,7 +19,7 @@ def test_group_symbols(sample_data, group_symbol):
|
|||
jsonschema.validate(job, get_json_schema("pulse-job.yml"))
|
||||
|
||||
|
||||
@pytest.mark.parametrize("job_symbol", ['1.1g', '1g', '20', 'A', 'GBI10', 'en-US-1'])
|
||||
@pytest.mark.parametrize("job_symbol", ["1.1g", "1g", "20", "A", "GBI10", "en-US-1"])
|
||||
def test_job_symbols(sample_data, job_symbol):
|
||||
"""
|
||||
Validate jobs against the schema with different job_symbol values
|
||||
|
|
|
@ -6,17 +6,17 @@ from treeherder.model.models import TextLogError
|
|||
|
||||
def test_load_textlog_summary_twice(test_repository, test_job):
|
||||
text_log_summary_artifact = {
|
||||
'type': 'json',
|
||||
'name': 'text_log_summary',
|
||||
'blob': json.dumps(
|
||||
"type": "json",
|
||||
"name": "text_log_summary",
|
||||
"blob": json.dumps(
|
||||
{
|
||||
'errors': [
|
||||
{"line": 'WARNING - foobar', "linenumber": 1587},
|
||||
{"line": 'WARNING - foobar', "linenumber": 1590},
|
||||
"errors": [
|
||||
{"line": "WARNING - foobar", "linenumber": 1587},
|
||||
{"line": "WARNING - foobar", "linenumber": 1590},
|
||||
],
|
||||
}
|
||||
),
|
||||
'job_guid': test_job.guid,
|
||||
"job_guid": test_job.guid,
|
||||
}
|
||||
|
||||
store_job_artifacts([text_log_summary_artifact])
|
||||
|
@ -29,29 +29,29 @@ def test_load_textlog_summary_twice(test_repository, test_job):
|
|||
|
||||
def test_load_non_ascii_textlog_errors(test_job):
|
||||
text_log_summary_artifact = {
|
||||
'type': 'json',
|
||||
'name': 'text_log_summary',
|
||||
'blob': json.dumps(
|
||||
"type": "json",
|
||||
"name": "text_log_summary",
|
||||
"blob": json.dumps(
|
||||
{
|
||||
'errors': [
|
||||
"errors": [
|
||||
{
|
||||
# non-ascii character
|
||||
"line": '07:51:28 WARNING - \U000000c3',
|
||||
"line": "07:51:28 WARNING - \U000000c3",
|
||||
"linenumber": 1587,
|
||||
},
|
||||
{
|
||||
# astral character (i.e. higher than ucs2)
|
||||
"line": '07:51:29 WARNING - \U0001d400',
|
||||
"line": "07:51:29 WARNING - \U0001d400",
|
||||
"linenumber": 1588,
|
||||
},
|
||||
],
|
||||
}
|
||||
),
|
||||
'job_guid': test_job.guid,
|
||||
"job_guid": test_job.guid,
|
||||
}
|
||||
|
||||
store_job_artifacts([text_log_summary_artifact])
|
||||
|
||||
assert TextLogError.objects.count() == 2
|
||||
assert TextLogError.objects.get(line_number=1587).line == '07:51:28 WARNING - \U000000c3'
|
||||
assert TextLogError.objects.get(line_number=1588).line == '07:51:29 WARNING - <U+01D400>'
|
||||
assert TextLogError.objects.get(line_number=1587).line == "07:51:28 WARNING - \U000000c3"
|
||||
assert TextLogError.objects.get(line_number=1588).line == "07:51:29 WARNING - <U+01D400>"
|
||||
|
|
|
@ -19,16 +19,16 @@ from treeherder.perf.models import (
|
|||
|
||||
def sample_perf_datum(framework_name: str, subtest_value: int = 20.0) -> dict:
|
||||
return {
|
||||
'job_guid': 'fake_job_guid',
|
||||
'name': 'test',
|
||||
'type': 'test',
|
||||
'blob': {
|
||||
'framework': {'name': framework_name},
|
||||
'suites': [
|
||||
"job_guid": "fake_job_guid",
|
||||
"name": "test",
|
||||
"type": "test",
|
||||
"blob": {
|
||||
"framework": {"name": framework_name},
|
||||
"suites": [
|
||||
{
|
||||
'name': "some-perf-suite",
|
||||
'unit': "ms",
|
||||
'subtests': [{'name': "some-perf-test", 'value': subtest_value, 'unit': 'ms'}],
|
||||
"name": "some-perf-suite",
|
||||
"unit": "ms",
|
||||
"subtests": [{"name": "some-perf-test", "value": subtest_value, "unit": "ms"}],
|
||||
}
|
||||
],
|
||||
},
|
||||
|
@ -60,33 +60,33 @@ def _generate_and_validate_alerts(
|
|||
"some-perf-framework",
|
||||
"some-perf-suite",
|
||||
"some-perf-test",
|
||||
'my_option_hash',
|
||||
'my_platform',
|
||||
"my_option_hash",
|
||||
"my_platform",
|
||||
True,
|
||||
None,
|
||||
'ms',
|
||||
alert_threshold=extra_subtest_metadata.get('alertThreshold'),
|
||||
alert_change_type=extra_subtest_metadata.get('alertChangeType'),
|
||||
min_back_window=extra_subtest_metadata.get('minBackWindow'),
|
||||
max_back_window=extra_subtest_metadata.get('maxBackWindow'),
|
||||
fore_window=extra_subtest_metadata.get('foreWindow'),
|
||||
"ms",
|
||||
alert_threshold=extra_subtest_metadata.get("alertThreshold"),
|
||||
alert_change_type=extra_subtest_metadata.get("alertChangeType"),
|
||||
min_back_window=extra_subtest_metadata.get("minBackWindow"),
|
||||
max_back_window=extra_subtest_metadata.get("maxBackWindow"),
|
||||
fore_window=extra_subtest_metadata.get("foreWindow"),
|
||||
)
|
||||
if suite_provides_value:
|
||||
_verify_signature(
|
||||
test_repository.name,
|
||||
"some-perf-framework",
|
||||
"some-perf-suite",
|
||||
'',
|
||||
'my_option_hash',
|
||||
'my_platform',
|
||||
"",
|
||||
"my_option_hash",
|
||||
"my_platform",
|
||||
True,
|
||||
None,
|
||||
'ms',
|
||||
alert_threshold=extra_suite_metadata.get('alertThreshold'),
|
||||
alert_change_type=extra_suite_metadata.get('alertChangeType'),
|
||||
min_back_window=extra_suite_metadata.get('minBackWindow'),
|
||||
max_back_window=extra_suite_metadata.get('maxBackWindow'),
|
||||
fore_window=extra_suite_metadata.get('foreWindow'),
|
||||
"ms",
|
||||
alert_threshold=extra_suite_metadata.get("alertThreshold"),
|
||||
alert_change_type=extra_suite_metadata.get("alertChangeType"),
|
||||
min_back_window=extra_suite_metadata.get("minBackWindow"),
|
||||
max_back_window=extra_suite_metadata.get("maxBackWindow"),
|
||||
fore_window=extra_suite_metadata.get("foreWindow"),
|
||||
)
|
||||
|
||||
|
||||
|
@ -125,15 +125,15 @@ def _generate_perf_data_range(
|
|||
datum = sample_perf_datum(framework_name, value)
|
||||
|
||||
if suite_provides_value:
|
||||
datum['blob']['suites'][0]['value'] = value
|
||||
datum["blob"]["suites"][0]["value"] = value
|
||||
if extra_suite_metadata:
|
||||
datum['blob']['suites'][0].update(extra_suite_metadata)
|
||||
datum["blob"]["suites"][0].update(extra_suite_metadata)
|
||||
if extra_subtest_metadata:
|
||||
datum['blob']['suites'][0]['subtests'][0].update(extra_subtest_metadata)
|
||||
datum["blob"]["suites"][0]["subtests"][0].update(extra_subtest_metadata)
|
||||
|
||||
# the perf data adapter expects deserialized performance data
|
||||
submit_datum = copy.copy(datum)
|
||||
submit_datum['blob'] = json.dumps({'performance_data': submit_datum['blob']})
|
||||
submit_datum["blob"] = json.dumps({"performance_data": submit_datum["blob"]})
|
||||
store_performance_artifact(job, submit_datum)
|
||||
|
||||
|
||||
|
@ -155,9 +155,9 @@ def _verify_signature(
|
|||
fore_window=None,
|
||||
):
|
||||
if not extra_opts:
|
||||
extra_options = ''
|
||||
extra_options = ""
|
||||
else:
|
||||
extra_options = ' '.join(sorted(extra_opts))
|
||||
extra_options = " ".join(sorted(extra_opts))
|
||||
|
||||
repository = Repository.objects.get(name=repo_name)
|
||||
signature = PerformanceSignature.objects.get(suite=suite_name, test=test_name)
|
||||
|
@ -199,7 +199,7 @@ def test_same_signature_multiple_performance_frameworks(test_repository, perf_jo
|
|||
|
||||
# the perf data adapter expects deserialized performance data
|
||||
submit_datum = copy.copy(datum)
|
||||
submit_datum['blob'] = json.dumps({'performance_data': submit_datum['blob']})
|
||||
submit_datum["blob"] = json.dumps({"performance_data": submit_datum["blob"]})
|
||||
|
||||
store_performance_artifact(perf_job, submit_datum)
|
||||
|
||||
|
@ -218,36 +218,36 @@ def test_same_signature_multiple_performance_frameworks(test_repository, perf_jo
|
|||
|
||||
@pytest.mark.parametrize(
|
||||
(
|
||||
'alerts_enabled_repository',
|
||||
'suite_provides_value',
|
||||
'extra_suite_metadata',
|
||||
'extra_subtest_metadata',
|
||||
'job_tier',
|
||||
'expected_subtest_alert',
|
||||
'expected_suite_alert',
|
||||
"alerts_enabled_repository",
|
||||
"suite_provides_value",
|
||||
"extra_suite_metadata",
|
||||
"extra_subtest_metadata",
|
||||
"job_tier",
|
||||
"expected_subtest_alert",
|
||||
"expected_suite_alert",
|
||||
),
|
||||
[
|
||||
# should still alert even if we optionally
|
||||
# use a large maximum back window
|
||||
(True, False, None, {'minBackWindow': 12, 'maxBackWindow': 100}, 2, True, False),
|
||||
(True, False, None, {"minBackWindow": 12, "maxBackWindow": 100}, 2, True, False),
|
||||
# summary+subtest, no metadata, default settings
|
||||
(True, True, {}, {}, 1, False, True),
|
||||
# summary+subtest, no metadata, no alerting on
|
||||
# summary, alerting on subtest
|
||||
(True, True, {'shouldAlert': False}, {'shouldAlert': True}, 2, True, False),
|
||||
(True, True, {"shouldAlert": False}, {"shouldAlert": True}, 2, True, False),
|
||||
# summary+subtest, no metadata on summary, alerting
|
||||
# override on subtest
|
||||
(True, True, {}, {'shouldAlert': True}, 2, True, True),
|
||||
(True, True, {}, {"shouldAlert": True}, 2, True, True),
|
||||
# summary+subtest, alerting override on subtest +
|
||||
# summary
|
||||
(True, True, {'shouldAlert': True}, {'shouldAlert': True}, 1, True, True),
|
||||
(True, True, {"shouldAlert": True}, {"shouldAlert": True}, 1, True, True),
|
||||
# summary + subtest, only subtest is absolute so
|
||||
# summary should alert
|
||||
(
|
||||
True,
|
||||
True,
|
||||
{'shouldAlert': True},
|
||||
{'shouldAlert': True, 'alertChangeType': 'absolute'},
|
||||
{"shouldAlert": True},
|
||||
{"shouldAlert": True, "alertChangeType": "absolute"},
|
||||
2,
|
||||
False,
|
||||
True,
|
||||
|
@ -292,9 +292,9 @@ def test_alerts_should_be_generated(
|
|||
|
||||
if expected_suite_alert:
|
||||
# validate suite alert
|
||||
alert = PerformanceAlert.objects.get(series_signature__test='')
|
||||
alert = PerformanceAlert.objects.get(series_signature__test="")
|
||||
assert alert.series_signature.suite == "some-perf-suite"
|
||||
assert alert.series_signature.test == ''
|
||||
assert alert.series_signature.test == ""
|
||||
assert alert.is_regression
|
||||
assert alert.amount_abs == 1
|
||||
assert alert.amount_pct == 100
|
||||
|
@ -311,76 +311,76 @@ def test_alerts_should_be_generated(
|
|||
|
||||
@pytest.mark.parametrize(
|
||||
(
|
||||
'alerts_enabled_repository',
|
||||
'suite_provides_value',
|
||||
'extra_suite_metadata',
|
||||
'extra_subtest_metadata',
|
||||
'job_tier',
|
||||
"alerts_enabled_repository",
|
||||
"suite_provides_value",
|
||||
"extra_suite_metadata",
|
||||
"extra_subtest_metadata",
|
||||
"job_tier",
|
||||
),
|
||||
[
|
||||
# just subtest, no metadata, default settings & non sheriff-able job tier won't alert
|
||||
(True, False, None, {}, 3),
|
||||
# just subtest, high alert threshold (so no alert)
|
||||
(True, False, None, {'alertThreshold': 500.0}, 2),
|
||||
(True, False, None, {"alertThreshold": 500.0}, 2),
|
||||
# non sheriff-able job tier won't alert either
|
||||
(True, False, None, {'alertThreshold': 500.0}, 3),
|
||||
(True, False, None, {"alertThreshold": 500.0}, 3),
|
||||
# just subtest, but larger min window size
|
||||
# (so no alerting)
|
||||
(True, False, {}, {'minBackWindow': 100, 'maxBackWindow': 100}, 1),
|
||||
(True, False, {}, {"minBackWindow": 100, "maxBackWindow": 100}, 1),
|
||||
# non sheriff-able job tier won't alert either
|
||||
(True, False, {}, {'minBackWindow': 100, 'maxBackWindow': 100}, 3),
|
||||
(True, False, {}, {"minBackWindow": 100, "maxBackWindow": 100}, 3),
|
||||
# should still alert even if we optionally
|
||||
# use a large maximum back window, but because of
|
||||
# non sheriff-able job tier it won't
|
||||
(True, False, None, {'minBackWindow': 12, 'maxBackWindow': 100}, 3),
|
||||
(True, False, None, {"minBackWindow": 12, "maxBackWindow": 100}, 3),
|
||||
# summary+subtest, no metadata, default settings should alert,
|
||||
# but because of non sheriff-able job tier it won't
|
||||
(True, True, {}, {}, 3),
|
||||
# summary+subtest, high alert threshold
|
||||
# (so no alert)
|
||||
(True, True, {'alertThreshold': 500.0}, {}, 2),
|
||||
(True, True, {"alertThreshold": 500.0}, {}, 2),
|
||||
# non sheriff-able job tier won't alert either
|
||||
(True, True, {'alertThreshold': 500.0}, {}, 3),
|
||||
(True, True, {"alertThreshold": 500.0}, {}, 3),
|
||||
# non sheriff-able job tier won't alert
|
||||
(True, True, {'alertThreshold': 500.0}, {}, 2),
|
||||
(True, True, {"alertThreshold": 500.0}, {}, 2),
|
||||
# non sheriff-able job tier won't alert either
|
||||
(True, True, {'alertThreshold': 500.0}, {}, 3),
|
||||
(True, True, {"alertThreshold": 500.0}, {}, 3),
|
||||
# summary+subtest, no metadata, no alerting on summary
|
||||
(True, True, {'shouldAlert': False}, {}, 1),
|
||||
(True, True, {"shouldAlert": False}, {}, 1),
|
||||
# non sheriff-able job tier won't alert either
|
||||
(True, True, {'shouldAlert': False}, {}, 3),
|
||||
(True, True, {"shouldAlert": False}, {}, 3),
|
||||
# summary+subtest, no metadata, no alerting on
|
||||
# summary, alerting on subtest should alert, but
|
||||
# because of non sheriff-able job tier it won't
|
||||
(True, True, {'shouldAlert': False}, {'shouldAlert': True}, 3),
|
||||
(True, True, {"shouldAlert": False}, {"shouldAlert": True}, 3),
|
||||
# summary+subtest, no metadata on summary, alerting
|
||||
# override on subtest should alert, but because of
|
||||
# non sheriff-able job tier it won't
|
||||
(True, True, {}, {'shouldAlert': True}, 3),
|
||||
(True, True, {}, {"shouldAlert": True}, 3),
|
||||
# summary+subtest, alerting override on subtest +
|
||||
# summary & non sheriff-able job tier won't alert
|
||||
(True, True, {'shouldAlert': True}, {'shouldAlert': True}, 3),
|
||||
(True, True, {"shouldAlert": True}, {"shouldAlert": True}, 3),
|
||||
# summary+subtest, alerting override on subtest +
|
||||
# summary -- but alerts disabled
|
||||
(False, True, {'shouldAlert': True}, {'shouldAlert': True}, 2),
|
||||
(False, True, {"shouldAlert": True}, {"shouldAlert": True}, 2),
|
||||
# non sheriff-able job tier won't alert either
|
||||
(False, True, {'shouldAlert': True}, {'shouldAlert': True}, 3),
|
||||
(False, True, {"shouldAlert": True}, {"shouldAlert": True}, 3),
|
||||
# summary+subtest, alerting override on subtest +
|
||||
# summary, but using absolute change so shouldn't
|
||||
# alert
|
||||
(
|
||||
True,
|
||||
True,
|
||||
{'shouldAlert': True, 'alertChangeType': 'absolute'},
|
||||
{'shouldAlert': True, 'alertChangeType': 'absolute'},
|
||||
{"shouldAlert": True, "alertChangeType": "absolute"},
|
||||
{"shouldAlert": True, "alertChangeType": "absolute"},
|
||||
1,
|
||||
),
|
||||
# non sheriff-able job tier won't alert either
|
||||
(
|
||||
True,
|
||||
True,
|
||||
{'shouldAlert': True, 'alertChangeType': 'absolute'},
|
||||
{'shouldAlert': True, 'alertChangeType': 'absolute'},
|
||||
{"shouldAlert": True, "alertChangeType": "absolute"},
|
||||
{"shouldAlert": True, "alertChangeType": "absolute"},
|
||||
3,
|
||||
),
|
||||
# summary + subtest, only subtest is absolute so
|
||||
|
@ -389,8 +389,8 @@ def test_alerts_should_be_generated(
|
|||
(
|
||||
True,
|
||||
True,
|
||||
{'shouldAlert': True},
|
||||
{'shouldAlert': True, 'alertChangeType': 'absolute'},
|
||||
{"shouldAlert": True},
|
||||
{"shouldAlert": True, "alertChangeType": "absolute"},
|
||||
3,
|
||||
),
|
||||
],
|
||||
|
@ -449,4 +449,4 @@ def test_last_updated(
|
|||
_generate_perf_data_range(test_repository, generic_reference_data, reverse_push_range=True)
|
||||
assert PerformanceSignature.objects.count() == 1
|
||||
signature = PerformanceSignature.objects.first()
|
||||
assert signature.last_updated == max(Push.objects.values_list('time', flat=True))
|
||||
assert signature.last_updated == max(Push.objects.values_list("time", flat=True))
|
||||
|
|
|
@ -21,65 +21,65 @@ from treeherder.perf.models import (
|
|||
PerformanceSignature,
|
||||
)
|
||||
|
||||
FRAMEWORK_NAME = 'browsertime'
|
||||
MEASUREMENT_UNIT = 'ms'
|
||||
UPDATED_MEASUREMENT_UNIT = 'seconds'
|
||||
FRAMEWORK_NAME = "browsertime"
|
||||
MEASUREMENT_UNIT = "ms"
|
||||
UPDATED_MEASUREMENT_UNIT = "seconds"
|
||||
DATA_PER_ARTIFACT = 8 # related to sample_perf_artifact fixture
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_perf_artifact() -> dict:
|
||||
return {
|
||||
'job_guid': 'fake_job_guid',
|
||||
'name': 'test',
|
||||
'type': 'test',
|
||||
'blob': {
|
||||
'framework': {'name': FRAMEWORK_NAME},
|
||||
'suites': [
|
||||
"job_guid": "fake_job_guid",
|
||||
"name": "test",
|
||||
"type": "test",
|
||||
"blob": {
|
||||
"framework": {"name": FRAMEWORK_NAME},
|
||||
"suites": [
|
||||
{
|
||||
'name': 'youtube-watch',
|
||||
'extraOptions': ['shell', 'e10s'],
|
||||
'lowerIsBetter': True,
|
||||
'value': 10.0,
|
||||
'unit': MEASUREMENT_UNIT,
|
||||
'subtests': [
|
||||
"name": "youtube-watch",
|
||||
"extraOptions": ["shell", "e10s"],
|
||||
"lowerIsBetter": True,
|
||||
"value": 10.0,
|
||||
"unit": MEASUREMENT_UNIT,
|
||||
"subtests": [
|
||||
{
|
||||
'name': 'fcp',
|
||||
'value': 20.0,
|
||||
'unit': MEASUREMENT_UNIT,
|
||||
'lowerIsBetter': True,
|
||||
"name": "fcp",
|
||||
"value": 20.0,
|
||||
"unit": MEASUREMENT_UNIT,
|
||||
"lowerIsBetter": True,
|
||||
},
|
||||
{
|
||||
'name': 'loadtime',
|
||||
'value': 30.0,
|
||||
'unit': MEASUREMENT_UNIT,
|
||||
'lowerIsBetter': False,
|
||||
"name": "loadtime",
|
||||
"value": 30.0,
|
||||
"unit": MEASUREMENT_UNIT,
|
||||
"lowerIsBetter": False,
|
||||
},
|
||||
{
|
||||
'name': 'fnbpaint',
|
||||
'value': 40.0,
|
||||
'unit': MEASUREMENT_UNIT,
|
||||
"name": "fnbpaint",
|
||||
"value": 40.0,
|
||||
"unit": MEASUREMENT_UNIT,
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
'name': 'youtube-watch 2',
|
||||
'lowerIsBetter': False,
|
||||
'value': 10.0,
|
||||
'unit': MEASUREMENT_UNIT,
|
||||
'subtests': [
|
||||
"name": "youtube-watch 2",
|
||||
"lowerIsBetter": False,
|
||||
"value": 10.0,
|
||||
"unit": MEASUREMENT_UNIT,
|
||||
"subtests": [
|
||||
{
|
||||
'name': 'fcp',
|
||||
'value': 20.0,
|
||||
'unit': MEASUREMENT_UNIT,
|
||||
"name": "fcp",
|
||||
"value": 20.0,
|
||||
"unit": MEASUREMENT_UNIT,
|
||||
}
|
||||
],
|
||||
},
|
||||
{
|
||||
'name': 'youtube-watch 3',
|
||||
'value': 10.0,
|
||||
'unit': MEASUREMENT_UNIT,
|
||||
'subtests': [{'name': 'fcp', 'value': 20.0, 'unit': MEASUREMENT_UNIT}],
|
||||
"name": "youtube-watch 3",
|
||||
"value": 10.0,
|
||||
"unit": MEASUREMENT_UNIT,
|
||||
"subtests": [{"name": "fcp", "value": 20.0, "unit": MEASUREMENT_UNIT}],
|
||||
},
|
||||
],
|
||||
},
|
||||
|
@ -95,14 +95,14 @@ def sibling_perf_artifacts(sample_perf_artifact: dict) -> List[dict]:
|
|||
mocked_push_timestamp = (
|
||||
datetime.datetime.utcnow() + datetime.timedelta(hours=idx)
|
||||
).timestamp()
|
||||
artifact['blob']['pushTimestamp'] = int(mocked_push_timestamp)
|
||||
artifact["blob"]["pushTimestamp"] = int(mocked_push_timestamp)
|
||||
|
||||
# having distinct values for suites & subtests
|
||||
# will make it easier to write tests
|
||||
for suite in artifact['blob']['suites']:
|
||||
suite['value'] = suite['value'] + idx
|
||||
for subtest in suite['subtests']:
|
||||
subtest['value'] = subtest['value'] + idx
|
||||
for suite in artifact["blob"]["suites"]:
|
||||
suite["value"] = suite["value"] + idx
|
||||
for subtest in suite["subtests"]:
|
||||
subtest["value"] = subtest["value"] + idx
|
||||
|
||||
return artifacts
|
||||
|
||||
|
@ -110,35 +110,35 @@ def sibling_perf_artifacts(sample_perf_artifact: dict) -> List[dict]:
|
|||
@pytest.fixture
|
||||
def sample_perf_artifact_with_new_unit():
|
||||
return {
|
||||
'job_guid': 'new_fake_job_guid',
|
||||
'name': 'test',
|
||||
'type': 'test',
|
||||
'blob': {
|
||||
'framework': {'name': FRAMEWORK_NAME},
|
||||
'suites': [
|
||||
"job_guid": "new_fake_job_guid",
|
||||
"name": "test",
|
||||
"type": "test",
|
||||
"blob": {
|
||||
"framework": {"name": FRAMEWORK_NAME},
|
||||
"suites": [
|
||||
{
|
||||
'name': 'youtube-watch',
|
||||
'extraOptions': ['shell', 'e10s'],
|
||||
'lowerIsBetter': True,
|
||||
'value': 10.0,
|
||||
'unit': UPDATED_MEASUREMENT_UNIT,
|
||||
'subtests': [
|
||||
"name": "youtube-watch",
|
||||
"extraOptions": ["shell", "e10s"],
|
||||
"lowerIsBetter": True,
|
||||
"value": 10.0,
|
||||
"unit": UPDATED_MEASUREMENT_UNIT,
|
||||
"subtests": [
|
||||
{
|
||||
'name': 'fcp',
|
||||
'value': 20.0,
|
||||
'unit': UPDATED_MEASUREMENT_UNIT,
|
||||
'lowerIsBetter': True,
|
||||
"name": "fcp",
|
||||
"value": 20.0,
|
||||
"unit": UPDATED_MEASUREMENT_UNIT,
|
||||
"lowerIsBetter": True,
|
||||
},
|
||||
{
|
||||
'name': 'loadtime',
|
||||
'value': 30.0,
|
||||
'unit': MEASUREMENT_UNIT,
|
||||
'lowerIsBetter': False,
|
||||
"name": "loadtime",
|
||||
"value": 30.0,
|
||||
"unit": MEASUREMENT_UNIT,
|
||||
"lowerIsBetter": False,
|
||||
},
|
||||
{
|
||||
'name': 'fnbpaint',
|
||||
'value': 40.0,
|
||||
'unit': MEASUREMENT_UNIT,
|
||||
"name": "fnbpaint",
|
||||
"value": 40.0,
|
||||
"unit": MEASUREMENT_UNIT,
|
||||
},
|
||||
],
|
||||
}
|
||||
|
@ -152,8 +152,8 @@ def later_perf_push(test_repository):
|
|||
later_timestamp = datetime.datetime.fromtimestamp(int(time.time()) + 5)
|
||||
return Push.objects.create(
|
||||
repository=test_repository,
|
||||
revision='1234abcd12',
|
||||
author='foo@bar.com',
|
||||
revision="1234abcd12",
|
||||
author="foo@bar.com",
|
||||
time=later_timestamp,
|
||||
)
|
||||
|
||||
|
@ -170,18 +170,18 @@ def _prepare_test_data(datum):
|
|||
PerformanceFramework.objects.get_or_create(name=FRAMEWORK_NAME, enabled=True)
|
||||
# the perf data adapter expects unserialized performance data
|
||||
submit_datum = copy.copy(datum)
|
||||
submit_datum['blob'] = json.dumps({'performance_data': submit_datum['blob']})
|
||||
perf_datum = datum['blob']
|
||||
submit_datum["blob"] = json.dumps({"performance_data": submit_datum["blob"]})
|
||||
perf_datum = datum["blob"]
|
||||
return perf_datum, submit_datum
|
||||
|
||||
|
||||
def _assert_hash_remains_unchanged():
|
||||
summary_signature = PerformanceSignature.objects.get(suite='youtube-watch', test='')
|
||||
summary_signature = PerformanceSignature.objects.get(suite="youtube-watch", test="")
|
||||
# Ensure we don't inadvertently change the way we generate signature hashes.
|
||||
assert summary_signature.signature_hash == '78aaeaf7d3a0170f8a1fb0c4dc34ca276da47e1c'
|
||||
assert summary_signature.signature_hash == "78aaeaf7d3a0170f8a1fb0c4dc34ca276da47e1c"
|
||||
subtest_signatures = PerformanceSignature.objects.filter(
|
||||
parent_signature=summary_signature
|
||||
).values_list('signature_hash', flat=True)
|
||||
).values_list("signature_hash", flat=True)
|
||||
assert len(subtest_signatures) == 3
|
||||
|
||||
|
||||
|
@ -205,35 +205,35 @@ def test_default_ingest_workflow(
|
|||
assert 1 == PerformanceFramework.objects.all().count()
|
||||
framework = PerformanceFramework.objects.first()
|
||||
assert FRAMEWORK_NAME == framework.name
|
||||
for suite in perf_datum['suites']:
|
||||
for suite in perf_datum["suites"]:
|
||||
# verify summary, then subtests
|
||||
_verify_signature(
|
||||
test_repository.name,
|
||||
perf_datum['framework']['name'],
|
||||
suite['name'],
|
||||
'',
|
||||
'my_option_hash',
|
||||
'my_platform',
|
||||
suite.get('lowerIsBetter', True),
|
||||
suite.get('extraOptions'),
|
||||
suite.get('unit'),
|
||||
perf_datum["framework"]["name"],
|
||||
suite["name"],
|
||||
"",
|
||||
"my_option_hash",
|
||||
"my_platform",
|
||||
suite.get("lowerIsBetter", True),
|
||||
suite.get("extraOptions"),
|
||||
suite.get("unit"),
|
||||
perf_push.time,
|
||||
)
|
||||
_verify_datum(suite['name'], '', suite['value'], perf_push.time)
|
||||
for subtest in suite['subtests']:
|
||||
_verify_datum(suite["name"], "", suite["value"], perf_push.time)
|
||||
for subtest in suite["subtests"]:
|
||||
_verify_signature(
|
||||
test_repository.name,
|
||||
perf_datum['framework']['name'],
|
||||
suite['name'],
|
||||
subtest['name'],
|
||||
'my_option_hash',
|
||||
'my_platform',
|
||||
subtest.get('lowerIsBetter', True),
|
||||
suite.get('extraOptions'),
|
||||
suite.get('unit'),
|
||||
perf_datum["framework"]["name"],
|
||||
suite["name"],
|
||||
subtest["name"],
|
||||
"my_option_hash",
|
||||
"my_platform",
|
||||
subtest.get("lowerIsBetter", True),
|
||||
suite.get("extraOptions"),
|
||||
suite.get("unit"),
|
||||
perf_push.time,
|
||||
)
|
||||
_verify_datum(suite['name'], subtest['name'], subtest['value'], perf_push.time)
|
||||
_verify_datum(suite["name"], subtest["name"], subtest["value"], perf_push.time)
|
||||
|
||||
|
||||
def test_hash_remains_unchanged_for_default_ingestion_workflow(
|
||||
|
@ -253,11 +253,11 @@ def test_timestamp_can_be_updated_for_default_ingestion_workflow(
|
|||
|
||||
# send another datum, a little later, verify that signature is changed accordingly
|
||||
later_job = create_generic_job(
|
||||
'lateguid', test_repository, later_perf_push.id, generic_reference_data
|
||||
"lateguid", test_repository, later_perf_push.id, generic_reference_data
|
||||
)
|
||||
store_performance_artifact(later_job, submit_datum)
|
||||
|
||||
signature = PerformanceSignature.objects.get(suite='youtube-watch', test='fcp')
|
||||
signature = PerformanceSignature.objects.get(suite="youtube-watch", test="fcp")
|
||||
assert signature.last_updated == later_perf_push.time
|
||||
|
||||
|
||||
|
@ -274,19 +274,19 @@ def test_measurement_unit_can_be_updated(
|
|||
|
||||
_, updated_submit_datum = _prepare_test_data(sample_perf_artifact_with_new_unit)
|
||||
later_job = create_generic_job(
|
||||
'lateguid', test_repository, later_perf_push.id, generic_reference_data
|
||||
"lateguid", test_repository, later_perf_push.id, generic_reference_data
|
||||
)
|
||||
store_performance_artifact(later_job, updated_submit_datum)
|
||||
|
||||
summary_signature = PerformanceSignature.objects.get(suite='youtube-watch', test='')
|
||||
updated_subtest_signature = PerformanceSignature.objects.get(suite='youtube-watch', test='fcp')
|
||||
summary_signature = PerformanceSignature.objects.get(suite="youtube-watch", test="")
|
||||
updated_subtest_signature = PerformanceSignature.objects.get(suite="youtube-watch", test="fcp")
|
||||
assert summary_signature.measurement_unit == UPDATED_MEASUREMENT_UNIT
|
||||
assert updated_subtest_signature.measurement_unit == UPDATED_MEASUREMENT_UNIT
|
||||
|
||||
# no side effects when parent/sibling signatures
|
||||
# change measurement units
|
||||
not_changed_subtest_signature = PerformanceSignature.objects.get(
|
||||
suite='youtube-watch', test='loadtime'
|
||||
suite="youtube-watch", test="loadtime"
|
||||
)
|
||||
assert not_changed_subtest_signature.measurement_unit == MEASUREMENT_UNIT
|
||||
|
||||
|
@ -295,9 +295,9 @@ def test_changing_extra_options_decouples_perf_signatures(
|
|||
test_repository, later_perf_push, perf_job, generic_reference_data, sample_perf_artifact
|
||||
):
|
||||
updated_perf_artifact = copy.deepcopy(sample_perf_artifact)
|
||||
updated_perf_artifact['blob']['suites'][0]['extraOptions'] = ['different-extra-options']
|
||||
updated_perf_artifact["blob"]["suites"][0]["extraOptions"] = ["different-extra-options"]
|
||||
later_job = create_generic_job(
|
||||
'lateguid', test_repository, later_perf_push.id, generic_reference_data
|
||||
"lateguid", test_repository, later_perf_push.id, generic_reference_data
|
||||
)
|
||||
_, submit_datum = _prepare_test_data(sample_perf_artifact)
|
||||
_, updated_submit_datum = _prepare_test_data(updated_perf_artifact)
|
||||
|
@ -312,7 +312,7 @@ def test_changing_extra_options_decouples_perf_signatures(
|
|||
|
||||
|
||||
# Multi perf data (for the same job) ingestion workflow
|
||||
@pytest.mark.parametrize('PERFHERDER_ENABLE_MULTIDATA_INGESTION', [True, False])
|
||||
@pytest.mark.parametrize("PERFHERDER_ENABLE_MULTIDATA_INGESTION", [True, False])
|
||||
def test_multi_data_can_be_ingested_for_same_job_and_push(
|
||||
PERFHERDER_ENABLE_MULTIDATA_INGESTION,
|
||||
test_repository,
|
||||
|
@ -331,7 +331,7 @@ def test_multi_data_can_be_ingested_for_same_job_and_push(
|
|||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'PERFHERDER_ENABLE_MULTIDATA_INGESTION, based_on_multidata_toggle',
|
||||
"PERFHERDER_ENABLE_MULTIDATA_INGESTION, based_on_multidata_toggle",
|
||||
[(True, operator.truth), (False, operator.not_)],
|
||||
)
|
||||
def test_multi_data_ingest_workflow(
|
||||
|
@ -376,8 +376,8 @@ def test_multi_data_ingest_workflow(
|
|||
|
||||
# and their essential properties were correctly stored (or not)
|
||||
for artifact in sibling_perf_artifacts:
|
||||
artifact_blob = artifact['blob']
|
||||
push_timestamp = datetime.datetime.fromtimestamp(artifact_blob['pushTimestamp'])
|
||||
artifact_blob = artifact["blob"]
|
||||
push_timestamp = datetime.datetime.fromtimestamp(artifact_blob["pushTimestamp"])
|
||||
common_properties = dict( # to both suites & subtests
|
||||
repository=perf_job.repository,
|
||||
job=perf_job,
|
||||
|
@ -385,21 +385,21 @@ def test_multi_data_ingest_workflow(
|
|||
push_timestamp=push_timestamp,
|
||||
)
|
||||
# check suites
|
||||
for suite in artifact_blob['suites']:
|
||||
for suite in artifact_blob["suites"]:
|
||||
assert performance_datum_exists(
|
||||
**common_properties,
|
||||
value=suite['value'],
|
||||
value=suite["value"],
|
||||
)
|
||||
|
||||
# and subtests
|
||||
for subtest in suite['subtests']:
|
||||
for subtest in suite["subtests"]:
|
||||
assert performance_datum_exists(
|
||||
**common_properties,
|
||||
value=subtest['value'],
|
||||
value=subtest["value"],
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('PERFHERDER_ENABLE_MULTIDATA_INGESTION', [True, False])
|
||||
@pytest.mark.parametrize("PERFHERDER_ENABLE_MULTIDATA_INGESTION", [True, False])
|
||||
def test_hash_remains_unchanged_for_multi_data_ingestion_workflow(
|
||||
PERFHERDER_ENABLE_MULTIDATA_INGESTION,
|
||||
test_repository,
|
||||
|
@ -417,7 +417,7 @@ def test_hash_remains_unchanged_for_multi_data_ingestion_workflow(
|
|||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'PERFHERDER_ENABLE_MULTIDATA_INGESTION, operator_', [(True, operator.eq), (False, operator.ne)]
|
||||
"PERFHERDER_ENABLE_MULTIDATA_INGESTION, operator_", [(True, operator.eq), (False, operator.ne)]
|
||||
)
|
||||
def test_timestamp_can_be_updated_for_multi_data_ingestion_workflow(
|
||||
PERFHERDER_ENABLE_MULTIDATA_INGESTION,
|
||||
|
@ -435,9 +435,9 @@ def test_timestamp_can_be_updated_for_multi_data_ingestion_workflow(
|
|||
_, submit_datum = _prepare_test_data(artifact)
|
||||
store_performance_artifact(perf_job, submit_datum)
|
||||
|
||||
signature = PerformanceSignature.objects.get(suite='youtube-watch', test='fcp')
|
||||
signature = PerformanceSignature.objects.get(suite="youtube-watch", test="fcp")
|
||||
last_artifact = sibling_perf_artifacts[-1]
|
||||
last_push_timestamp = datetime.datetime.fromtimestamp(last_artifact['blob']['pushTimestamp'])
|
||||
last_push_timestamp = datetime.datetime.fromtimestamp(last_artifact["blob"]["pushTimestamp"])
|
||||
|
||||
assert operator_(signature.last_updated, last_push_timestamp)
|
||||
|
||||
|
@ -452,8 +452,8 @@ def test_multi_commit_data_is_removed_by_dedicated_management_script(
|
|||
settings,
|
||||
):
|
||||
settings.PERFHERDER_ENABLE_MULTIDATA_INGESTION = True
|
||||
sibling_perf_artifacts[0]['blob'].pop(
|
||||
'pushTimestamp'
|
||||
sibling_perf_artifacts[0]["blob"].pop(
|
||||
"pushTimestamp"
|
||||
) # assume 1st PERFORMANCE_DATA is ingested in the old way
|
||||
|
||||
# ingest all perf_data
|
||||
|
@ -469,7 +469,7 @@ def test_multi_commit_data_is_removed_by_dedicated_management_script(
|
|||
== (len(sibling_perf_artifacts) - 1) * DATA_PER_ARTIFACT
|
||||
)
|
||||
|
||||
call_command('remove_multi_commit_data')
|
||||
call_command("remove_multi_commit_data")
|
||||
assert MultiCommitDatum.objects.all().count() == 0
|
||||
assert (
|
||||
PerformanceDatum.objects.all().count() == DATA_PER_ARTIFACT
|
||||
|
|
|
@ -5,41 +5,41 @@ import pytest
|
|||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
('suite_value', 'test_value', 'expected_fail'),
|
||||
("suite_value", "test_value", "expected_fail"),
|
||||
[
|
||||
({}, {}, True),
|
||||
({'value': 1234}, {}, True),
|
||||
({}, {'value': 1234}, False),
|
||||
({'value': 1234}, {'value': 1234}, False),
|
||||
({'value': float('inf')}, {}, True),
|
||||
({}, {'value': float('inf')}, True),
|
||||
({"value": 1234}, {}, True),
|
||||
({}, {"value": 1234}, False),
|
||||
({"value": 1234}, {"value": 1234}, False),
|
||||
({"value": float("inf")}, {}, True),
|
||||
({}, {"value": float("inf")}, True),
|
||||
(
|
||||
{
|
||||
'value': 1234,
|
||||
'extraOptions': [
|
||||
"value": 1234,
|
||||
"extraOptions": [
|
||||
# has >45 characters
|
||||
[
|
||||
'android-api-53211-with-google-play-services-and-some-random-other-extra-information'
|
||||
"android-api-53211-with-google-play-services-and-some-random-other-extra-information"
|
||||
]
|
||||
],
|
||||
},
|
||||
{'value': 1234},
|
||||
{"value": 1234},
|
||||
True,
|
||||
),
|
||||
(
|
||||
{'value': 1234, 'extraOptions': ['1', '2', '3', '4', '5', '6', '7', '8', '9']},
|
||||
{'value': 1234},
|
||||
{"value": 1234, "extraOptions": ["1", "2", "3", "4", "5", "6", "7", "8", "9"]},
|
||||
{"value": 1234},
|
||||
True,
|
||||
),
|
||||
(
|
||||
{'value': 1234, 'extraOptions': ['1', '2', '3', '4', '5', '6', '7', '8']},
|
||||
{'value': 1234},
|
||||
{"value": 1234, "extraOptions": ["1", "2", "3", "4", "5", "6", "7", "8"]},
|
||||
{"value": 1234},
|
||||
False,
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_perf_schema(suite_value, test_value, expected_fail):
|
||||
with open('schemas/performance-artifact.json') as f:
|
||||
with open("schemas/performance-artifact.json") as f:
|
||||
perf_schema = json.load(f)
|
||||
|
||||
datum = {
|
||||
|
@ -51,8 +51,8 @@ def test_perf_schema(suite_value, test_value, expected_fail):
|
|||
}
|
||||
],
|
||||
}
|
||||
datum['suites'][0].update(suite_value)
|
||||
datum['suites'][0]['subtests'][0].update(test_value)
|
||||
datum["suites"][0].update(suite_value)
|
||||
datum["suites"][0]["subtests"][0].update(test_value)
|
||||
print(datum)
|
||||
if expected_fail:
|
||||
with pytest.raises(jsonschema.ValidationError):
|
||||
|
|
|
@ -56,7 +56,7 @@ def mock_github_pr_commits(activate_responses):
|
|||
"https://api.github.com/repos/mozilla/test_treeherder/pulls/1692/commits",
|
||||
body=mocked_content,
|
||||
status=200,
|
||||
content_type='application/json',
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
|
||||
|
@ -74,7 +74,7 @@ def mock_github_push_compare(activate_responses):
|
|||
"5fdb785b28b356f50fc1d9cb180d401bb03fc1f1",
|
||||
json=mocked_content[0],
|
||||
status=200,
|
||||
content_type='application/json',
|
||||
content_type="application/json",
|
||||
)
|
||||
responses.add(
|
||||
responses.GET,
|
||||
|
@ -83,7 +83,7 @@ def mock_github_push_compare(activate_responses):
|
|||
"ad9bfc2a62b70b9f3dbb1c3a5969f30bacce3d74",
|
||||
json=mocked_content[1],
|
||||
status=200,
|
||||
content_type='application/json',
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
|
||||
|
@ -98,7 +98,7 @@ def mock_hg_push_commits(activate_responses):
|
|||
"https://hg.mozilla.org/try/json-pushes",
|
||||
body=mocked_content,
|
||||
status=200,
|
||||
content_type='application/json',
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@ from treeherder.model.models import Commit, Push
|
|||
def test_ingest_hg_pushlog(test_repository, test_base_dir, activate_responses):
|
||||
"""ingesting a number of pushes should populate push and revisions"""
|
||||
|
||||
pushlog_path = os.path.join(test_base_dir, 'sample_data', 'hg_pushlog.json')
|
||||
pushlog_path = os.path.join(test_base_dir, "sample_data", "hg_pushlog.json")
|
||||
with open(pushlog_path) as f:
|
||||
pushlog_content = f.read()
|
||||
pushlog_fake_url = "http://www.thisismypushlog.com"
|
||||
|
@ -20,7 +20,7 @@ def test_ingest_hg_pushlog(test_repository, test_base_dir, activate_responses):
|
|||
pushlog_fake_url,
|
||||
body=pushlog_content,
|
||||
status=200,
|
||||
content_type='application/json',
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
process = HgPushlogProcess()
|
||||
|
@ -37,10 +37,10 @@ def test_ingest_hg_pushlog_already_stored(test_repository, test_base_dir, activa
|
|||
all the pushes in the request,
|
||||
e.g. trying to store [A,B] with A already stored, B will be stored"""
|
||||
|
||||
pushlog_path = os.path.join(test_base_dir, 'sample_data', 'hg_pushlog.json')
|
||||
pushlog_path = os.path.join(test_base_dir, "sample_data", "hg_pushlog.json")
|
||||
with open(pushlog_path) as f:
|
||||
pushlog_json = json.load(f)
|
||||
pushes = list(pushlog_json['pushes'].values())
|
||||
pushes = list(pushlog_json["pushes"].values())
|
||||
first_push, second_push = pushes[0:2]
|
||||
|
||||
pushlog_fake_url = "http://www.thisismypushlog.com/?full=1&version=2"
|
||||
|
@ -52,7 +52,7 @@ def test_ingest_hg_pushlog_already_stored(test_repository, test_base_dir, activa
|
|||
pushlog_fake_url,
|
||||
body=first_push_json,
|
||||
status=200,
|
||||
content_type='application/json',
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
process = HgPushlogProcess()
|
||||
|
@ -70,7 +70,7 @@ def test_ingest_hg_pushlog_already_stored(test_repository, test_base_dir, activa
|
|||
pushlog_fake_url + "&startID=1",
|
||||
body=first_and_second_push_json,
|
||||
status=200,
|
||||
content_type='application/json',
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
process = HgPushlogProcess()
|
||||
|
@ -85,7 +85,7 @@ def test_ingest_hg_pushlog_cache_last_push(test_repository, test_base_dir, activ
|
|||
ingesting a number of pushes should cache the top revision of the last push
|
||||
"""
|
||||
|
||||
pushlog_path = os.path.join(test_base_dir, 'sample_data', 'hg_pushlog.json')
|
||||
pushlog_path = os.path.join(test_base_dir, "sample_data", "hg_pushlog.json")
|
||||
with open(pushlog_path) as f:
|
||||
pushlog_content = f.read()
|
||||
pushlog_fake_url = "http://www.thisismypushlog.com"
|
||||
|
@ -94,14 +94,14 @@ def test_ingest_hg_pushlog_cache_last_push(test_repository, test_base_dir, activ
|
|||
pushlog_fake_url,
|
||||
body=pushlog_content,
|
||||
status=200,
|
||||
content_type='application/json',
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
process = HgPushlogProcess()
|
||||
process.run(pushlog_fake_url, test_repository.name)
|
||||
|
||||
pushlog_dict = json.loads(pushlog_content)
|
||||
pushes = pushlog_dict['pushes']
|
||||
pushes = pushlog_dict["pushes"]
|
||||
max_push_id = max(int(k) for k in pushes.keys())
|
||||
|
||||
cache_key = "{}:last_push_id".format(test_repository.name)
|
||||
|
@ -123,7 +123,7 @@ def test_empty_json_pushes(test_repository, test_base_dir, activate_responses):
|
|||
pushlog_fake_url,
|
||||
body=empty_push_json,
|
||||
status=200,
|
||||
content_type='application/json',
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
process = HgPushlogProcess()
|
||||
|
|
|
@ -6,30 +6,30 @@ from treeherder.etl.runnable_jobs import (
|
|||
_taskcluster_runnable_jobs,
|
||||
)
|
||||
|
||||
TASK_ID = 'AFq3FRt4TyiTwIN7fUqOQg'
|
||||
CONTENT1 = {'taskId': TASK_ID}
|
||||
TASK_ID = "AFq3FRt4TyiTwIN7fUqOQg"
|
||||
CONTENT1 = {"taskId": TASK_ID}
|
||||
RUNNABLE_JOBS_URL = RUNNABLE_JOBS_URL.format(task_id=TASK_ID, run_number=0)
|
||||
JOB_NAME = 'job name'
|
||||
JOB_NAME = "job name"
|
||||
API_RETURN = {
|
||||
'build_platform': 'plaform name',
|
||||
'build_system_type': 'taskcluster',
|
||||
'job_group_name': 'Group Name',
|
||||
'job_group_symbol': 'GRP',
|
||||
'job_type_name': JOB_NAME,
|
||||
'job_type_symbol': 'sym',
|
||||
'platform': 'plaform name',
|
||||
'platform_option': 'opt',
|
||||
'ref_data_name': JOB_NAME,
|
||||
'state': 'runnable',
|
||||
'result': 'runnable',
|
||||
"build_platform": "plaform name",
|
||||
"build_system_type": "taskcluster",
|
||||
"job_group_name": "Group Name",
|
||||
"job_group_symbol": "GRP",
|
||||
"job_type_name": JOB_NAME,
|
||||
"job_type_symbol": "sym",
|
||||
"platform": "plaform name",
|
||||
"platform_option": "opt",
|
||||
"ref_data_name": JOB_NAME,
|
||||
"state": "runnable",
|
||||
"result": "runnable",
|
||||
}
|
||||
RUNNABLE_JOBS_CONTENTS = {
|
||||
JOB_NAME: {
|
||||
'collection': {'opt': True},
|
||||
'groupName': API_RETURN['job_group_name'],
|
||||
'groupSymbol': API_RETURN['job_group_symbol'],
|
||||
'platform': API_RETURN['platform'],
|
||||
'symbol': API_RETURN['job_type_symbol'],
|
||||
"collection": {"opt": True},
|
||||
"groupName": API_RETURN["job_group_name"],
|
||||
"groupSymbol": API_RETURN["job_group_symbol"],
|
||||
"platform": API_RETURN["platform"],
|
||||
"symbol": API_RETURN["job_type_symbol"],
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -4,9 +4,9 @@ from treeherder.etl.text import astral_filter, filter_re
|
|||
|
||||
def test_filter_re_matching():
|
||||
points = [
|
||||
u"\U00010045",
|
||||
u"\U00010053",
|
||||
u"\U00010054",
|
||||
"\U00010045",
|
||||
"\U00010053",
|
||||
"\U00010054",
|
||||
]
|
||||
for point in points:
|
||||
assert bool(filter_re.match(point)) is True
|
||||
|
@ -14,21 +14,21 @@ def test_filter_re_matching():
|
|||
|
||||
def test_filter_not_matching():
|
||||
points = [
|
||||
u"\U00000045",
|
||||
u"\U00000053",
|
||||
u"\U00000054",
|
||||
"\U00000045",
|
||||
"\U00000053",
|
||||
"\U00000054",
|
||||
]
|
||||
for point in points:
|
||||
assert bool(filter_re.match(point)) is False
|
||||
|
||||
|
||||
def test_astra_filter_emoji():
|
||||
output = astral_filter(u'🍆')
|
||||
expected = '<U+01F346>'
|
||||
output = astral_filter("🍆")
|
||||
expected = "<U+01F346>"
|
||||
assert output == expected
|
||||
|
||||
|
||||
def test_astra_filter_hex_value():
|
||||
"""check the expected outcome is also not changed"""
|
||||
hex_values = '\U00000048\U00000049'
|
||||
hex_values = "\U00000048\U00000049"
|
||||
assert hex_values == astral_filter(hex_values)
|
||||
|
|
|
@ -6,39 +6,39 @@ from treeherder.intermittents_commenter.commenter import Commenter
|
|||
|
||||
@responses.activate
|
||||
def test_intermittents_commenter(bug_data):
|
||||
startday = '2012-05-09'
|
||||
endday = '2018-05-10'
|
||||
startday = "2012-05-09"
|
||||
endday = "2018-05-10"
|
||||
alt_startday = startday
|
||||
alt_endday = endday
|
||||
|
||||
process = Commenter(weekly_mode=True, dry_run=True)
|
||||
params = {'include_fields': 'product%2C+component%2C+priority%2C+whiteboard%2C+id'}
|
||||
url = '{}/rest/bug?id={}&include_fields={}'.format(
|
||||
settings.BZ_API_URL, bug_data['bug_id'], params['include_fields']
|
||||
params = {"include_fields": "product%2C+component%2C+priority%2C+whiteboard%2C+id"}
|
||||
url = "{}/rest/bug?id={}&include_fields={}".format(
|
||||
settings.BZ_API_URL, bug_data["bug_id"], params["include_fields"]
|
||||
)
|
||||
|
||||
content = {
|
||||
"bugs": [
|
||||
{
|
||||
u"component": u"General",
|
||||
u"priority": u"P3",
|
||||
u"product": u"Testing",
|
||||
u"whiteboard": u"[stockwell infra] [see summary at comment 92]",
|
||||
u"id": bug_data['bug_id'],
|
||||
"component": "General",
|
||||
"priority": "P3",
|
||||
"product": "Testing",
|
||||
"whiteboard": "[stockwell infra] [see summary at comment 92]",
|
||||
"id": bug_data["bug_id"],
|
||||
}
|
||||
],
|
||||
"faults": [],
|
||||
}
|
||||
|
||||
responses.add(responses.Response(method='GET', url=url, json=content, status=200))
|
||||
responses.add(responses.Response(method="GET", url=url, json=content, status=200))
|
||||
|
||||
resp = process.fetch_bug_details(bug_data['bug_id'])
|
||||
assert resp == content['bugs']
|
||||
resp = process.fetch_bug_details(bug_data["bug_id"])
|
||||
assert resp == content["bugs"]
|
||||
|
||||
comment_params = process.generate_bug_changes(startday, endday, alt_startday, alt_endday)
|
||||
|
||||
with open('tests/intermittents_commenter/expected_comment.text', 'r') as comment:
|
||||
with open("tests/intermittents_commenter/expected_comment.text", "r") as comment:
|
||||
expected_comment = comment.read()
|
||||
print(len(expected_comment))
|
||||
print(len(comment_params[0]['changes']['comment']['body']))
|
||||
assert comment_params[0]['changes']['comment']['body'] == expected_comment
|
||||
print(len(comment_params[0]["changes"]["comment"]["body"]))
|
||||
assert comment_params[0]["changes"]["comment"]["body"] == expected_comment
|
||||
|
|
|
@ -57,14 +57,14 @@ def test_all_builders_complete():
|
|||
@responses.activate
|
||||
def test_log_download_size_limit():
|
||||
"""Test that logs whose Content-Length exceed the size limit are not parsed."""
|
||||
url = 'http://foo.tld/fake_large_log.tar.gz'
|
||||
url = "http://foo.tld/fake_large_log.tar.gz"
|
||||
responses.add(
|
||||
responses.GET,
|
||||
url,
|
||||
body='',
|
||||
body="",
|
||||
adding_headers={
|
||||
'Content-Encoding': 'gzip',
|
||||
'Content-Length': str(MAX_DOWNLOAD_SIZE_IN_BYTES + 1),
|
||||
"Content-Encoding": "gzip",
|
||||
"Content-Length": str(MAX_DOWNLOAD_SIZE_IN_BYTES + 1),
|
||||
},
|
||||
)
|
||||
lpc = ArtifactBuilderCollection(url)
|
||||
|
|
|
@ -120,7 +120,7 @@ def test_error_lines_matched(line):
|
|||
def test_error_lines_taskcluster(line):
|
||||
parser = ErrorParser()
|
||||
# Make the log parser think this is a TaskCluster log.
|
||||
parser.parse_line('[taskcluster foo] this is a taskcluster log', 1)
|
||||
parser.parse_line("[taskcluster foo] this is a taskcluster log", 1)
|
||||
assert parser.is_taskcluster
|
||||
parser.parse_line(line, 2)
|
||||
assert len(parser.artifact) == 1
|
||||
|
@ -155,4 +155,4 @@ def test_taskcluster_strip_prefix():
|
|||
# TC prefix is stripped.
|
||||
parser.parse_line("[vcs 2016-09-07T19:03:02.188327Z] 23:57:52 ERROR - Return code: 1", 3)
|
||||
assert len(parser.artifact) == 1
|
||||
assert parser.artifact[0]['linenumber'] == 3
|
||||
assert parser.artifact[0]["linenumber"] == 3
|
||||
|
|
|
@ -15,9 +15,9 @@ def test_performance_log_parsing():
|
|||
|
||||
# first two have only one artifact, second has two artifacts
|
||||
for logfile, num_perf_artifacts in [
|
||||
('mozilla-inbound-android-api-11-debug-bm91-build1-build1317.txt.gz', 1),
|
||||
('try_ubuntu64_hw_test-chromez-bm103-tests1-linux-build1429.txt.gz', 1),
|
||||
('mozilla-inbound-linux64-bm72-build1-build225.txt.gz', 2),
|
||||
("mozilla-inbound-android-api-11-debug-bm91-build1-build1317.txt.gz", 1),
|
||||
("try_ubuntu64_hw_test-chromez-bm103-tests1-linux-build1429.txt.gz", 1),
|
||||
("mozilla-inbound-linux64-bm72-build1-build225.txt.gz", 2),
|
||||
]:
|
||||
url = add_log_response(logfile)
|
||||
|
||||
|
@ -25,6 +25,6 @@ def test_performance_log_parsing():
|
|||
lpc = ArtifactBuilderCollection(url, builders=[builder])
|
||||
lpc.parse()
|
||||
act = lpc.artifacts[builder.name]
|
||||
assert len(act['performance_data']) == num_perf_artifacts
|
||||
for perfherder_artifact in act['performance_data']:
|
||||
assert len(act["performance_data"]) == num_perf_artifacts
|
||||
for perfherder_artifact in act["performance_data"]:
|
||||
validate(perfherder_artifact, PERFHERDER_SCHEMA)
|
||||
|
|
|
@ -27,6 +27,6 @@ def test_performance_log_parsing_malformed_perfherder_data():
|
|||
}
|
||||
],
|
||||
}
|
||||
parser.parse_line('PERFHERDER_DATA: {}'.format(json.dumps(valid_perfherder_data)), 3)
|
||||
parser.parse_line("PERFHERDER_DATA: {}".format(json.dumps(valid_perfherder_data)), 3)
|
||||
|
||||
assert parser.get_artifact() == [valid_perfherder_data]
|
||||
|
|
|
@ -17,7 +17,7 @@ from ..sampledata import SampleData
|
|||
|
||||
def test_store_error_summary(activate_responses, test_repository, test_job):
|
||||
log_path = SampleData().get_log_path("plain-chunked_errorsummary.log")
|
||||
log_url = 'http://my-log.mozilla.org'
|
||||
log_url = "http://my-log.mozilla.org"
|
||||
|
||||
with open(log_path) as log_handler:
|
||||
responses.add(responses.GET, log_url, body=log_handler.read(), status=200)
|
||||
|
@ -37,7 +37,7 @@ def test_store_error_summary(activate_responses, test_repository, test_job):
|
|||
|
||||
def test_store_error_summary_default_group(activate_responses, test_repository, test_job):
|
||||
log_path = SampleData().get_log_path("plain-chunked_errorsummary.log")
|
||||
log_url = 'http://my-log.mozilla.org'
|
||||
log_url = "http://my-log.mozilla.org"
|
||||
|
||||
with open(log_path) as log_handler:
|
||||
resp_body = json.load(log_handler)
|
||||
|
@ -54,9 +54,9 @@ def test_store_error_summary_default_group(activate_responses, test_repository,
|
|||
|
||||
def test_store_error_summary_truncated(activate_responses, test_repository, test_job, monkeypatch):
|
||||
log_path = SampleData().get_log_path("plain-chunked_errorsummary_10_lines.log")
|
||||
log_url = 'http://my-log.mozilla.org'
|
||||
log_url = "http://my-log.mozilla.org"
|
||||
|
||||
monkeypatch.setattr(settings, 'FAILURE_LINES_CUTOFF', 5)
|
||||
monkeypatch.setattr(settings, "FAILURE_LINES_CUTOFF", 5)
|
||||
|
||||
with open(log_path) as log_handler:
|
||||
responses.add(responses.GET, log_url, body=log_handler.read(), status=200)
|
||||
|
@ -67,7 +67,7 @@ def test_store_error_summary_truncated(activate_responses, test_repository, test
|
|||
|
||||
assert FailureLine.objects.count() == 5 + 1
|
||||
|
||||
failure = FailureLine.objects.get(action='truncated')
|
||||
failure = FailureLine.objects.get(action="truncated")
|
||||
|
||||
assert failure.job_guid == test_job.guid
|
||||
|
||||
|
@ -76,9 +76,9 @@ def test_store_error_summary_truncated(activate_responses, test_repository, test
|
|||
|
||||
def test_store_error_summary_astral(activate_responses, test_repository, test_job):
|
||||
log_path = SampleData().get_log_path("plain-chunked_errorsummary_astral.log")
|
||||
log_url = 'http://my-log.mozilla.org'
|
||||
log_url = "http://my-log.mozilla.org"
|
||||
|
||||
with open(log_path, encoding='utf8') as log_handler:
|
||||
with open(log_path, encoding="utf8") as log_handler:
|
||||
responses.add(
|
||||
responses.GET,
|
||||
log_url,
|
||||
|
@ -100,7 +100,7 @@ def test_store_error_summary_astral(activate_responses, test_repository, test_jo
|
|||
assert failure.repository == test_repository
|
||||
|
||||
# Specific unicode chars cannot be inserted as MySQL pseudo-UTF8 and are replaced by a plain text representation
|
||||
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
|
||||
if settings.DATABASES["default"]["ENGINE"] == "django.db.backends.mysql":
|
||||
assert (
|
||||
failure.test
|
||||
== "toolkit/content/tests/widgets/test_videocontrols_video_direction.html <U+01F346>"
|
||||
|
@ -122,7 +122,7 @@ def test_store_error_summary_astral(activate_responses, test_repository, test_jo
|
|||
|
||||
def test_store_error_summary_404(activate_responses, test_repository, test_job):
|
||||
log_path = SampleData().get_log_path("plain-chunked_errorsummary.log")
|
||||
log_url = 'http://my-log.mozilla.org'
|
||||
log_url = "http://my-log.mozilla.org"
|
||||
|
||||
with open(log_path) as log_handler:
|
||||
responses.add(responses.GET, log_url, body=log_handler.read(), status=404)
|
||||
|
@ -137,7 +137,7 @@ def test_store_error_summary_404(activate_responses, test_repository, test_job):
|
|||
|
||||
def test_store_error_summary_500(activate_responses, test_repository, test_job):
|
||||
log_path = SampleData().get_log_path("plain-chunked_errorsummary.log")
|
||||
log_url = 'http://my-log.mozilla.org'
|
||||
log_url = "http://my-log.mozilla.org"
|
||||
|
||||
with open(log_path) as log_handler:
|
||||
responses.add(responses.GET, log_url, body=log_handler.read(), status=500)
|
||||
|
@ -152,7 +152,7 @@ def test_store_error_summary_500(activate_responses, test_repository, test_job):
|
|||
|
||||
|
||||
def test_store_error_summary_duplicate(activate_responses, test_repository, test_job):
|
||||
log_url = 'http://my-log.mozilla.org'
|
||||
log_url = "http://my-log.mozilla.org"
|
||||
log_obj = JobLog.objects.create(job=test_job, name="errorsummary_json", url=log_url)
|
||||
|
||||
write_failure_lines(
|
||||
|
@ -171,7 +171,7 @@ def test_store_error_summary_duplicate(activate_responses, test_repository, test
|
|||
|
||||
def test_store_error_summary_group_status(activate_responses, test_repository, test_job):
|
||||
log_path = SampleData().get_log_path("mochitest-browser-chrome_errorsummary.log")
|
||||
log_url = 'http://my-log.mozilla.org'
|
||||
log_url = "http://my-log.mozilla.org"
|
||||
|
||||
with open(log_path) as log_handler:
|
||||
responses.add(responses.GET, log_url, body=log_handler.read(), status=200)
|
||||
|
@ -195,7 +195,7 @@ def test_store_error_summary_group_status(activate_responses, test_repository, t
|
|||
|
||||
def test_group_status_duration(activate_responses, test_repository, test_job):
|
||||
log_path = SampleData().get_log_path("mochitest-browser-chrome_errorsummary.log")
|
||||
log_url = 'http://my-log.mozilla.org'
|
||||
log_url = "http://my-log.mozilla.org"
|
||||
|
||||
with open(log_path) as log_handler:
|
||||
responses.add(responses.GET, log_url, body=log_handler.read(), status=200)
|
||||
|
@ -215,7 +215,7 @@ def test_group_status_duration(activate_responses, test_repository, test_job):
|
|||
|
||||
def test_get_group_results(activate_responses, test_repository, test_job):
|
||||
log_path = SampleData().get_log_path("mochitest-browser-chrome_errorsummary.log")
|
||||
log_url = 'http://my-log.mozilla.org'
|
||||
log_url = "http://my-log.mozilla.org"
|
||||
|
||||
with open(log_path) as log_handler:
|
||||
responses.add(responses.GET, log_url, body=log_handler.read(), status=200)
|
||||
|
@ -224,14 +224,14 @@ def test_get_group_results(activate_responses, test_repository, test_job):
|
|||
store_failure_lines(log_obj)
|
||||
|
||||
groups = get_group_results(test_job.push)
|
||||
task_groups = groups['V3SVuxO8TFy37En_6HcXLs']
|
||||
task_groups = groups["V3SVuxO8TFy37En_6HcXLs"]
|
||||
|
||||
assert task_groups['dom/base/test/browser.ini']
|
||||
assert task_groups["dom/base/test/browser.ini"]
|
||||
|
||||
|
||||
def test_get_group_results_with_colon(activate_responses, test_repository, test_job):
|
||||
log_path = SampleData().get_log_path("xpcshell-errorsummary-with-colon.log")
|
||||
log_url = 'http://my-log.mozilla.org'
|
||||
log_url = "http://my-log.mozilla.org"
|
||||
|
||||
with open(log_path) as log_handler:
|
||||
responses.add(responses.GET, log_url, body=log_handler.read(), status=200)
|
||||
|
@ -240,12 +240,12 @@ def test_get_group_results_with_colon(activate_responses, test_repository, test_
|
|||
store_failure_lines(log_obj)
|
||||
|
||||
groups = get_group_results(test_job.push)
|
||||
task_groups = groups['V3SVuxO8TFy37En_6HcXLs']
|
||||
task_groups = groups["V3SVuxO8TFy37En_6HcXLs"]
|
||||
|
||||
assert task_groups[
|
||||
'toolkit/components/extensions/test/xpcshell/xpcshell-e10s.ini:toolkit/components/extensions/test/xpcshell/xpcshell-content.ini'
|
||||
"toolkit/components/extensions/test/xpcshell/xpcshell-e10s.ini:toolkit/components/extensions/test/xpcshell/xpcshell-content.ini"
|
||||
]
|
||||
assert task_groups['toolkit/components/places/tests/unit/xpcshell.ini']
|
||||
assert task_groups["toolkit/components/places/tests/unit/xpcshell.ini"]
|
||||
assert task_groups[
|
||||
'toolkit/components/extensions/test/xpcshell/xpcshell-e10s.ini:toolkit/components/extensions/test/xpcshell/xpcshell-common-e10s.ini'
|
||||
"toolkit/components/extensions/test/xpcshell/xpcshell-e10s.ini:toolkit/components/extensions/test/xpcshell/xpcshell-common-e10s.ini"
|
||||
]
|
||||
|
|
|
@ -19,7 +19,7 @@ def jobs_with_local_log(activate_responses):
|
|||
job = sample_data.job_data[0]
|
||||
|
||||
# substitute the log url with a local url
|
||||
job['job']['log_references'][0]['url'] = url
|
||||
job["job"]["log_references"][0]["url"] = url
|
||||
return [job]
|
||||
|
||||
|
||||
|
@ -35,8 +35,8 @@ def test_create_error_summary(
|
|||
|
||||
jobs = jobs_with_local_log
|
||||
for job in jobs:
|
||||
job['job']['result'] = "testfailed"
|
||||
job['revision'] = sample_push[0]['revision']
|
||||
job["job"]["result"] = "testfailed"
|
||||
job["revision"] = sample_push[0]["revision"]
|
||||
|
||||
store_job_data(test_repository, jobs)
|
||||
|
||||
|
|
|
@ -14,58 +14,58 @@ from treeherder.log_parser.utils import (
|
|||
)
|
||||
|
||||
LENGTH_OK = {
|
||||
'framework': {},
|
||||
'suites': [
|
||||
"framework": {},
|
||||
"suites": [
|
||||
{
|
||||
'extraOptions': [
|
||||
'.' * 45,
|
||||
'.' * 100,
|
||||
"extraOptions": [
|
||||
"." * 45,
|
||||
"." * 100,
|
||||
],
|
||||
'name': 'testing',
|
||||
'subtests': [],
|
||||
"name": "testing",
|
||||
"subtests": [],
|
||||
}
|
||||
]
|
||||
* 3,
|
||||
}
|
||||
|
||||
LONGER_THAN_ALL_MAX = {
|
||||
'framework': {},
|
||||
'suites': [
|
||||
"framework": {},
|
||||
"suites": [
|
||||
{
|
||||
'extraOptions': [
|
||||
'.' * 46,
|
||||
'.' * 101,
|
||||
"extraOptions": [
|
||||
"." * 46,
|
||||
"." * 101,
|
||||
],
|
||||
'name': 'testing',
|
||||
'subtests': [],
|
||||
"name": "testing",
|
||||
"subtests": [],
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
LONGER_THAN_BIGGER_MAX = {
|
||||
'framework': {},
|
||||
'suites': [
|
||||
"framework": {},
|
||||
"suites": [
|
||||
{
|
||||
'extraOptions': [
|
||||
'.' * 45,
|
||||
'.' * 101,
|
||||
"extraOptions": [
|
||||
"." * 45,
|
||||
"." * 101,
|
||||
],
|
||||
'name': 'testing',
|
||||
'subtests': [],
|
||||
"name": "testing",
|
||||
"subtests": [],
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
LONGER_THAN_SMALLER_MAX = {
|
||||
'framework': {},
|
||||
'suites': [
|
||||
"framework": {},
|
||||
"suites": [
|
||||
{
|
||||
'extraOptions': [
|
||||
'.' * 46,
|
||||
'.' * 100,
|
||||
"extraOptions": [
|
||||
"." * 46,
|
||||
"." * 100,
|
||||
],
|
||||
'name': 'testing',
|
||||
'subtests': [],
|
||||
"name": "testing",
|
||||
"subtests": [],
|
||||
}
|
||||
]
|
||||
* 3,
|
||||
|
@ -77,7 +77,7 @@ def test_smaller_than_bigger():
|
|||
|
||||
|
||||
def test_extra_option_max_length():
|
||||
with open(os.path.join('schemas', 'performance-artifact.json')) as f:
|
||||
with open(os.path.join("schemas", "performance-artifact.json")) as f:
|
||||
PERFHERDER_SCHEMA = json.load(f)
|
||||
assert 100 == _lookup_extra_options_max(PERFHERDER_SCHEMA)
|
||||
|
||||
|
@ -90,7 +90,7 @@ def test_validate_perf_schema_no_exception():
|
|||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'data', (LONGER_THAN_ALL_MAX, LONGER_THAN_BIGGER_MAX, LONGER_THAN_SMALLER_MAX)
|
||||
"data", (LONGER_THAN_ALL_MAX, LONGER_THAN_BIGGER_MAX, LONGER_THAN_SMALLER_MAX)
|
||||
)
|
||||
def test_validate_perf_schema(data):
|
||||
for datum in data:
|
||||
|
|
|
@ -36,11 +36,11 @@ def empty_backfill_report(test_perf_alert_summary) -> BackfillReport:
|
|||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'repository_name',
|
||||
"repository_name",
|
||||
[
|
||||
'autoland',
|
||||
'mozilla-beta',
|
||||
'mozilla-central',
|
||||
"autoland",
|
||||
"mozilla-beta",
|
||||
"mozilla-central",
|
||||
],
|
||||
)
|
||||
def test_cycle_performance_data(
|
||||
|
@ -57,13 +57,13 @@ def test_cycle_performance_data(
|
|||
expired_timestamp = datetime.now() - timedelta(days=400)
|
||||
|
||||
test_perf_signature_2 = PerformanceSignature.objects.create(
|
||||
signature_hash='b' * 40,
|
||||
signature_hash="b" * 40,
|
||||
repository=test_perf_signature.repository,
|
||||
framework=test_perf_signature.framework,
|
||||
platform=test_perf_signature.platform,
|
||||
option_collection=test_perf_signature.option_collection,
|
||||
suite=test_perf_signature.suite,
|
||||
test='test 2',
|
||||
test="test 2",
|
||||
last_updated=expired_timestamp,
|
||||
has_subtests=False,
|
||||
)
|
||||
|
@ -100,12 +100,12 @@ def test_cycle_performance_data(
|
|||
|
||||
command = filter(
|
||||
lambda arg: arg is not None,
|
||||
['cycle_data', 'from:perfherder'],
|
||||
["cycle_data", "from:perfherder"],
|
||||
)
|
||||
call_command(*list(command)) # test repository isn't a main one
|
||||
|
||||
assert list(PerformanceDatum.objects.values_list('id', flat=True)) == [1]
|
||||
assert list(PerformanceSignature.objects.values_list('id', flat=True)) == [
|
||||
assert list(PerformanceDatum.objects.values_list("id", flat=True)) == [1]
|
||||
assert list(PerformanceSignature.objects.values_list("id", flat=True)) == [
|
||||
test_perf_signature.id
|
||||
]
|
||||
|
||||
|
@ -115,36 +115,36 @@ def test_performance_signatures_are_deleted(test_perf_signature, taskcluster_not
|
|||
expired_timestamp = cycler.max_timestamp
|
||||
|
||||
perf_signature_to_delete = PerformanceSignature.objects.create(
|
||||
signature_hash='b' * 40,
|
||||
signature_hash="b" * 40,
|
||||
repository=test_perf_signature.repository,
|
||||
framework=test_perf_signature.framework,
|
||||
platform=test_perf_signature.platform,
|
||||
option_collection=test_perf_signature.option_collection,
|
||||
suite=test_perf_signature.suite,
|
||||
test='test_perf_signature_to_delete',
|
||||
test="test_perf_signature_to_delete",
|
||||
last_updated=expired_timestamp,
|
||||
has_subtests=False,
|
||||
)
|
||||
|
||||
perf_signature_to_keep = PerformanceSignature.objects.create(
|
||||
signature_hash='h' * 40,
|
||||
signature_hash="h" * 40,
|
||||
repository=test_perf_signature.repository,
|
||||
framework=test_perf_signature.framework,
|
||||
platform=test_perf_signature.platform,
|
||||
option_collection=test_perf_signature.option_collection,
|
||||
suite=test_perf_signature.suite,
|
||||
test='test_perf_signature_to_keep',
|
||||
test="test_perf_signature_to_keep",
|
||||
last_updated=datetime.now(),
|
||||
has_subtests=False,
|
||||
)
|
||||
|
||||
call_command('cycle_data', 'from:perfherder')
|
||||
call_command("cycle_data", "from:perfherder")
|
||||
|
||||
assert perf_signature_to_keep.id in list(
|
||||
PerformanceSignature.objects.values_list('id', flat=True)
|
||||
PerformanceSignature.objects.values_list("id", flat=True)
|
||||
)
|
||||
assert perf_signature_to_delete.id not in list(
|
||||
PerformanceSignature.objects.values_list('id', flat=True)
|
||||
PerformanceSignature.objects.values_list("id", flat=True)
|
||||
)
|
||||
|
||||
|
||||
|
@ -160,7 +160,7 @@ def test_try_data_removal(
|
|||
test_perf_signature.repository = try_repository
|
||||
test_perf_signature.save()
|
||||
|
||||
try_pushes = list(Push.objects.filter(repository=try_repository).order_by('id').all())
|
||||
try_pushes = list(Push.objects.filter(repository=try_repository).order_by("id").all())
|
||||
|
||||
for idx, push in enumerate(try_pushes[:-2]):
|
||||
push_timestamp = datetime.now()
|
||||
|
@ -191,7 +191,7 @@ def test_try_data_removal(
|
|||
|
||||
total_initial_data = PerformanceDatum.objects.count()
|
||||
|
||||
call_command('cycle_data', 'from:perfherder')
|
||||
call_command("cycle_data", "from:perfherder")
|
||||
assert PerformanceDatum.objects.count() == total_initial_data - total_removals
|
||||
assert not PerformanceDatum.objects.filter(
|
||||
push_timestamp__lt=datetime.now() - timedelta(weeks=6),
|
||||
|
@ -203,8 +203,8 @@ def test_try_data_removal(
|
|||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'repository_name',
|
||||
['autoland', 'mozilla-beta', 'fenix', 'reference-browser'],
|
||||
"repository_name",
|
||||
["autoland", "mozilla-beta", "fenix", "reference-browser"],
|
||||
)
|
||||
def test_irrelevant_repos_data_removal(
|
||||
test_repository,
|
||||
|
@ -262,7 +262,7 @@ def test_irrelevant_repos_data_removal(
|
|||
|
||||
total_initial_data = PerformanceDatum.objects.count()
|
||||
|
||||
call_command('cycle_data', 'from:perfherder')
|
||||
call_command("cycle_data", "from:perfherder")
|
||||
assert PerformanceDatum.objects.count() == total_initial_data - 1
|
||||
assert PerformanceDatum.objects.filter(repository=relevant_repository).exists()
|
||||
assert not PerformanceDatum.objects.filter(
|
||||
|
@ -285,14 +285,14 @@ def test_signature_remover(
|
|||
|
||||
assert len(PerformanceSignature.objects.all()) == 2
|
||||
|
||||
call_command('cycle_data', 'from:perfherder')
|
||||
call_command("cycle_data", "from:perfherder")
|
||||
|
||||
assert taskcluster_notify_mock.email.call_count == 1
|
||||
assert len(PerformanceSignature.objects.all()) == 1
|
||||
assert PerformanceSignature.objects.first() == test_perf_signature
|
||||
|
||||
|
||||
@pytest.mark.parametrize('total_signatures', [3, 4, 8, 10])
|
||||
@pytest.mark.parametrize("total_signatures", [3, 4, 8, 10])
|
||||
def test_total_emails_sent(
|
||||
test_perf_signature, try_repository, total_signatures, mock_tc_prod_notify_credentials
|
||||
):
|
||||
|
@ -311,13 +311,13 @@ def test_total_emails_sent(
|
|||
for n in range(0, total_signatures):
|
||||
PerformanceSignature.objects.create(
|
||||
repository=test_perf_signature.repository,
|
||||
signature_hash=(20 * ('t%s' % n)),
|
||||
signature_hash=(20 * ("t%s" % n)),
|
||||
framework=test_perf_signature.framework,
|
||||
platform=test_perf_signature.platform,
|
||||
option_collection=test_perf_signature.option_collection,
|
||||
suite='mysuite%s' % n,
|
||||
test='mytest%s' % n,
|
||||
application='firefox',
|
||||
suite="mysuite%s" % n,
|
||||
test="mytest%s" % n,
|
||||
application="firefox",
|
||||
has_subtests=test_perf_signature.has_subtests,
|
||||
extra_options=test_perf_signature.extra_options,
|
||||
last_updated=datetime.now(),
|
||||
|
@ -326,13 +326,13 @@ def test_total_emails_sent(
|
|||
for n in range(0, 10):
|
||||
PerformanceSignature.objects.create(
|
||||
repository=try_repository,
|
||||
signature_hash=(20 * ('e%s' % n)),
|
||||
signature_hash=(20 * ("e%s" % n)),
|
||||
framework=test_perf_signature.framework,
|
||||
platform=test_perf_signature.platform,
|
||||
option_collection=test_perf_signature.option_collection,
|
||||
suite='mysuite%s' % n,
|
||||
test='mytest%s' % n,
|
||||
application='firefox',
|
||||
suite="mysuite%s" % n,
|
||||
test="mytest%s" % n,
|
||||
application="firefox",
|
||||
has_subtests=test_perf_signature.has_subtests,
|
||||
extra_options=test_perf_signature.extra_options,
|
||||
last_updated=datetime.now(),
|
||||
|
@ -348,7 +348,7 @@ def test_total_emails_sent(
|
|||
signatures_remover.remove_in_chunks(signatures)
|
||||
|
||||
assert notify_client_mock.email.call_count == expected_call_count
|
||||
assert not PerformanceSignature.objects.filter(repository__name='try').exists()
|
||||
assert not PerformanceSignature.objects.filter(repository__name="try").exists()
|
||||
|
||||
|
||||
def test_remove_try_signatures_without_data(
|
||||
|
@ -367,13 +367,13 @@ def test_remove_try_signatures_without_data(
|
|||
)
|
||||
signature_with_perf_data = PerformanceSignature.objects.create(
|
||||
repository=try_repository,
|
||||
signature_hash=(20 * 'e1'),
|
||||
signature_hash=(20 * "e1"),
|
||||
framework=test_perf_signature.framework,
|
||||
platform=test_perf_signature.platform,
|
||||
option_collection=test_perf_signature.option_collection,
|
||||
suite='mysuite',
|
||||
test='mytest',
|
||||
application='firefox',
|
||||
suite="mysuite",
|
||||
test="mytest",
|
||||
application="firefox",
|
||||
has_subtests=test_perf_signature.has_subtests,
|
||||
extra_options=test_perf_signature.extra_options,
|
||||
last_updated=datetime.now(),
|
||||
|
@ -414,7 +414,7 @@ def test_performance_cycler_quit_indicator(taskcluster_notify_mock):
|
|||
max_runtime.started_at = two_seconds_ago
|
||||
max_runtime.quit_on_timeout()
|
||||
except MaxRuntimeExceeded:
|
||||
pytest.fail('Performance cycling shouldn\'t have timed out')
|
||||
pytest.fail("Performance cycling shouldn't have timed out")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -432,7 +432,7 @@ def empty_alert_summary(
|
|||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'expired_time',
|
||||
"expired_time",
|
||||
[
|
||||
datetime.now() - timedelta(days=365),
|
||||
datetime.now() - timedelta(days=181),
|
||||
|
@ -449,12 +449,12 @@ def test_summary_without_any_kind_of_alerts_is_deleted(
|
|||
assert empty_alert_summary.alerts.count() == 0
|
||||
assert empty_alert_summary.related_alerts.count() == 0
|
||||
|
||||
call_command('cycle_data', 'from:perfherder')
|
||||
call_command("cycle_data", "from:perfherder")
|
||||
assert not PerformanceAlertSummary.objects.exists()
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'recently',
|
||||
"recently",
|
||||
[
|
||||
datetime.now(),
|
||||
datetime.now() - timedelta(minutes=30),
|
||||
|
@ -472,12 +472,12 @@ def test_summary_without_any_kind_of_alerts_isnt_deleted(
|
|||
assert empty_alert_summary.alerts.count() == 0
|
||||
assert empty_alert_summary.related_alerts.count() == 0
|
||||
|
||||
call_command('cycle_data', 'from:perfherder')
|
||||
call_command("cycle_data", "from:perfherder")
|
||||
assert PerformanceAlertSummary.objects.count() == 1
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'creation_time',
|
||||
"creation_time",
|
||||
[
|
||||
# expired
|
||||
datetime.now() - timedelta(days=365),
|
||||
|
@ -515,7 +515,7 @@ def test_summary_with_alerts_isnt_deleted(
|
|||
assert empty_alert_summary.alerts.count() == 1
|
||||
assert empty_alert_summary.related_alerts.count() == 0
|
||||
|
||||
call_command('cycle_data', 'from:perfherder')
|
||||
call_command("cycle_data", "from:perfherder")
|
||||
assert PerformanceAlertSummary.objects.filter(id=empty_alert_summary.id).exists()
|
||||
|
||||
# with both
|
||||
|
@ -526,7 +526,7 @@ def test_summary_with_alerts_isnt_deleted(
|
|||
assert empty_alert_summary.alerts.count() == 1
|
||||
assert empty_alert_summary.related_alerts.count() == 1
|
||||
|
||||
call_command('cycle_data', 'from:perfherder')
|
||||
call_command("cycle_data", "from:perfherder")
|
||||
assert PerformanceAlertSummary.objects.filter(id=empty_alert_summary.id).exists()
|
||||
|
||||
# with related_alerts only
|
||||
|
@ -536,7 +536,7 @@ def test_summary_with_alerts_isnt_deleted(
|
|||
assert empty_alert_summary.alerts.count() == 0
|
||||
assert empty_alert_summary.related_alerts.count() == 1
|
||||
|
||||
call_command('cycle_data', 'from:perfherder')
|
||||
call_command("cycle_data", "from:perfherder")
|
||||
assert PerformanceAlertSummary.objects.filter(id=empty_alert_summary.id).exists()
|
||||
|
||||
|
||||
|
@ -563,7 +563,7 @@ def test_stalled_data_removal(
|
|||
last_updated__lt=max_timestamp
|
||||
)
|
||||
|
||||
call_command('cycle_data', 'from:perfherder')
|
||||
call_command("cycle_data", "from:perfherder")
|
||||
|
||||
assert test_perf_signature not in PerformanceSignature.objects.all()
|
||||
assert test_perf_data not in PerformanceDatum.objects.all()
|
||||
|
@ -573,8 +573,8 @@ def test_stalled_data_removal(
|
|||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'nr_months, repository',
|
||||
[(8, 'autoland'), (6, 'autoland'), (5, 'mozilla-central')],
|
||||
"nr_months, repository",
|
||||
[(8, "autoland"), (6, "autoland"), (5, "mozilla-central")],
|
||||
)
|
||||
def test_equal_distribution_for_historical_data(
|
||||
test_repository,
|
||||
|
@ -610,7 +610,7 @@ def test_equal_distribution_for_historical_data(
|
|||
)
|
||||
perf_data.append(data)
|
||||
|
||||
call_command('cycle_data', 'from:perfherder')
|
||||
call_command("cycle_data", "from:perfherder")
|
||||
|
||||
assert PerformanceSignature.objects.filter(id=perf_signature.id).exists()
|
||||
all_perf_datum = PerformanceDatum.objects.all()
|
||||
|
@ -619,8 +619,8 @@ def test_equal_distribution_for_historical_data(
|
|||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'nr_months, repository',
|
||||
[(8, 'autoland'), (6, 'autoland'), (5, 'mozilla-central')],
|
||||
"nr_months, repository",
|
||||
[(8, "autoland"), (6, "autoland"), (5, "mozilla-central")],
|
||||
)
|
||||
def test_big_density_in_historical_data(
|
||||
test_repository,
|
||||
|
@ -667,7 +667,7 @@ def test_big_density_in_historical_data(
|
|||
)
|
||||
perf_data.append(data)
|
||||
|
||||
call_command('cycle_data', 'from:perfherder')
|
||||
call_command("cycle_data", "from:perfherder")
|
||||
|
||||
assert PerformanceSignature.objects.filter(id=perf_signature.id).exists()
|
||||
all_perf_datum = PerformanceDatum.objects.all()
|
||||
|
@ -676,8 +676,8 @@ def test_big_density_in_historical_data(
|
|||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'nr_months, repository',
|
||||
[(5, 'autoland'), (8, 'mozilla-central'), (11, 'mozilla-central')],
|
||||
"nr_months, repository",
|
||||
[(5, "autoland"), (8, "mozilla-central"), (11, "mozilla-central")],
|
||||
)
|
||||
def test_one_month_worth_of_data_points(
|
||||
test_repository,
|
||||
|
@ -721,7 +721,7 @@ def test_one_month_worth_of_data_points(
|
|||
)
|
||||
perf_data.append(data)
|
||||
|
||||
call_command('cycle_data', 'from:perfherder')
|
||||
call_command("cycle_data", "from:perfherder")
|
||||
|
||||
stalled_signature.refresh_from_db()
|
||||
assert PerformanceSignature.objects.filter(id=stalled_signature.id).exists()
|
||||
|
@ -731,8 +731,8 @@ def test_one_month_worth_of_data_points(
|
|||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'nr_months, repository',
|
||||
[(8, 'autoland'), (6, 'autoland'), (5, 'mozilla-central')],
|
||||
"nr_months, repository",
|
||||
[(8, "autoland"), (6, "autoland"), (5, "mozilla-central")],
|
||||
)
|
||||
def test_non_historical_stalled_data_is_removed(
|
||||
test_repository,
|
||||
|
@ -768,7 +768,7 @@ def test_non_historical_stalled_data_is_removed(
|
|||
)
|
||||
perf_data.append(data)
|
||||
|
||||
call_command('cycle_data', 'from:perfherder')
|
||||
call_command("cycle_data", "from:perfherder")
|
||||
|
||||
assert not PerformanceSignature.objects.filter(id=perf_signature.id).exists()
|
||||
all_perf_datum = PerformanceDatum.objects.all()
|
||||
|
@ -783,8 +783,8 @@ def test_try_data_removal_errors_out_on_missing_try_data(try_repository):
|
|||
_ = try_removal_strategy.target_signatures
|
||||
|
||||
|
||||
@patch('treeherder.config.settings.SITE_HOSTNAME', 'treeherder-production.com')
|
||||
@pytest.mark.parametrize('days', [5, 30, 100, 364])
|
||||
@patch("treeherder.config.settings.SITE_HOSTNAME", "treeherder-production.com")
|
||||
@pytest.mark.parametrize("days", [5, 30, 100, 364])
|
||||
def test_explicit_days_validation_on_all_envs(days):
|
||||
with pytest.raises(ValueError):
|
||||
_ = PerfherderCycler(10_000, 0, days=days)
|
||||
|
@ -810,12 +810,12 @@ def test_deleting_performance_data_cascades_to_perf_multicomit_data(test_perf_da
|
|||
|
||||
try:
|
||||
cursor = connection.cursor()
|
||||
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
|
||||
if settings.DATABASES["default"]["ENGINE"] == "django.db.backends.mysql":
|
||||
cursor.execute(
|
||||
'''
|
||||
"""
|
||||
DELETE FROM `performance_datum`
|
||||
WHERE id = %s
|
||||
''',
|
||||
""",
|
||||
[perf_datum.id],
|
||||
)
|
||||
else:
|
||||
|
@ -837,10 +837,10 @@ def test_deleting_performance_data_cascades_to_perf_datum_replicate(test_perf_da
|
|||
try:
|
||||
cursor = connection.cursor()
|
||||
cursor.execute(
|
||||
'''
|
||||
"""
|
||||
DELETE FROM performance_datum
|
||||
WHERE id = %s
|
||||
''',
|
||||
""",
|
||||
[perf_datum.id],
|
||||
)
|
||||
except IntegrityError:
|
||||
|
@ -870,7 +870,7 @@ def test_empty_backfill_reports_get_removed(empty_backfill_report):
|
|||
assert BackfillReport.objects.count() == 0
|
||||
|
||||
|
||||
@pytest.mark.parametrize('days_since_created', [0, 30, 100])
|
||||
@pytest.mark.parametrize("days_since_created", [0, 30, 100])
|
||||
def test_empty_backfill_reports_arent_removed_if_not_enough_time_passed(
|
||||
empty_backfill_report, days_since_created
|
||||
):
|
||||
|
|
|
@ -11,13 +11,13 @@ from treeherder.perf.models import PerformanceDatum
|
|||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'days, expected_jobs, expected_failure_lines, expected_job_logs, cmd_args, cmd_kwargs',
|
||||
"days, expected_jobs, expected_failure_lines, expected_job_logs, cmd_args, cmd_kwargs",
|
||||
[
|
||||
(7, 0, 0, 0, ('cycle_data', 'from:treeherder'), {'sleep_time': 0, 'days': 1}),
|
||||
(7, 0, 0, 0, ("cycle_data", "from:treeherder"), {"sleep_time": 0, "days": 1}),
|
||||
# also check default '--days' param from treeherder
|
||||
(119, 20, 2, 22, ('cycle_data',), {'sleep_time': 0}),
|
||||
(120, 0, 0, 0, ('cycle_data',), {'sleep_time': 0}),
|
||||
(150, 0, 0, 0, ('cycle_data',), {'sleep_time': 0}),
|
||||
(119, 20, 2, 22, ("cycle_data",), {"sleep_time": 0}),
|
||||
(120, 0, 0, 0, ("cycle_data",), {"sleep_time": 0}),
|
||||
(150, 0, 0, 0, ("cycle_data",), {"sleep_time": 0}),
|
||||
],
|
||||
)
|
||||
def test_cycle_all_data(
|
||||
|
@ -75,7 +75,7 @@ def test_cycle_all_but_one_job(
|
|||
job_not_deleted.save()
|
||||
|
||||
extra_objects = {
|
||||
'failure_lines': (
|
||||
"failure_lines": (
|
||||
FailureLine,
|
||||
create_failure_lines(
|
||||
job_not_deleted, [(test_line, {}), (test_line, {"subtest": "subtest2"})]
|
||||
|
@ -91,7 +91,7 @@ def test_cycle_all_but_one_job(
|
|||
num_job_logs_to_be_deleted = JobLog.objects.all().exclude(job__id=job_not_deleted.id).count()
|
||||
num_job_logs_before = JobLog.objects.count()
|
||||
|
||||
call_command('cycle_data', 'from:treeherder', sleep_time=0, days=1, debug=True, chunk_size=1)
|
||||
call_command("cycle_data", "from:treeherder", sleep_time=0, days=1, debug=True, chunk_size=1)
|
||||
|
||||
assert Job.objects.count() == 1
|
||||
assert JobLog.objects.count() == (num_job_logs_before - num_job_logs_to_be_deleted)
|
||||
|
@ -119,7 +119,7 @@ def test_cycle_all_data_in_chunks(
|
|||
|
||||
create_failure_lines(Job.objects.get(id=1), [(test_line, {})] * 7)
|
||||
|
||||
call_command('cycle_data', 'from:treeherder', sleep_time=0, days=1, chunk_size=3)
|
||||
call_command("cycle_data", "from:treeherder", sleep_time=0, days=1, chunk_size=3)
|
||||
|
||||
# There should be no jobs after cycling
|
||||
assert Job.objects.count() == 0
|
||||
|
@ -133,17 +133,17 @@ def test_cycle_job_model_reference_data(
|
|||
test_utils.do_job_ingestion(test_repository, job_data, sample_push, False)
|
||||
|
||||
# get a list of ids of original reference data
|
||||
original_job_type_ids = JobType.objects.values_list('id', flat=True)
|
||||
original_job_group_ids = JobGroup.objects.values_list('id', flat=True)
|
||||
original_machine_ids = Machine.objects.values_list('id', flat=True)
|
||||
original_job_type_ids = JobType.objects.values_list("id", flat=True)
|
||||
original_job_group_ids = JobGroup.objects.values_list("id", flat=True)
|
||||
original_machine_ids = Machine.objects.values_list("id", flat=True)
|
||||
|
||||
# create a bunch of job model data that should be cycled, since they don't
|
||||
# reference any current jobs
|
||||
jg = JobGroup.objects.create(symbol='moo', name='moo')
|
||||
jt = JobType.objects.create(symbol='mu', name='mu')
|
||||
m = Machine.objects.create(name='machine_with_no_job')
|
||||
jg = JobGroup.objects.create(symbol="moo", name="moo")
|
||||
jt = JobType.objects.create(symbol="mu", name="mu")
|
||||
m = Machine.objects.create(name="machine_with_no_job")
|
||||
(jg_id, jt_id, m_id) = (jg.id, jt.id, m.id)
|
||||
call_command('cycle_data', 'from:treeherder', sleep_time=0, days=1, chunk_size=3)
|
||||
call_command("cycle_data", "from:treeherder", sleep_time=0, days=1, chunk_size=3)
|
||||
|
||||
# assert that reference data that should have been cycled, was cycled
|
||||
assert JobGroup.objects.filter(id=jg_id).count() == 0
|
||||
|
@ -186,7 +186,7 @@ def test_cycle_job_with_performance_data(
|
|||
value=1.0,
|
||||
)
|
||||
|
||||
call_command('cycle_data', 'from:treeherder', sleep_time=0, days=1, chunk_size=3)
|
||||
call_command("cycle_data", "from:treeherder", sleep_time=0, days=1, chunk_size=3)
|
||||
|
||||
# assert that the job got cycled
|
||||
assert Job.objects.count() == 0
|
||||
|
|
|
@ -11,26 +11,26 @@ fifty_days_ago = datetime.now() - timedelta(days=50)
|
|||
|
||||
@pytest.fixture
|
||||
def sample_bugs(test_base_dir):
|
||||
filename = os.path.join(test_base_dir, 'sample_data', 'bug_list.json')
|
||||
filename = os.path.join(test_base_dir, "sample_data", "bug_list.json")
|
||||
with open(filename) as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
def _update_bugscache(bug_list):
|
||||
max_summary_length = Bugscache._meta.get_field('summary').max_length
|
||||
max_whiteboard_length = Bugscache._meta.get_field('whiteboard').max_length
|
||||
max_summary_length = Bugscache._meta.get_field("summary").max_length
|
||||
max_whiteboard_length = Bugscache._meta.get_field("whiteboard").max_length
|
||||
|
||||
for bug in bug_list:
|
||||
Bugscache.objects.create(
|
||||
id=bug['id'],
|
||||
status=bug['status'],
|
||||
resolution=bug['resolution'],
|
||||
summary=bug['summary'][:max_summary_length],
|
||||
dupe_of=bug['dupe_of'],
|
||||
crash_signature=bug['cf_crash_signature'],
|
||||
keywords=",".join(bug['keywords']),
|
||||
modified=bug['last_change_time'],
|
||||
whiteboard=bug['whiteboard'][:max_whiteboard_length],
|
||||
id=bug["id"],
|
||||
status=bug["status"],
|
||||
resolution=bug["resolution"],
|
||||
summary=bug["summary"][:max_summary_length],
|
||||
dupe_of=bug["dupe_of"],
|
||||
crash_signature=bug["cf_crash_signature"],
|
||||
keywords=",".join(bug["keywords"]),
|
||||
modified=bug["last_change_time"],
|
||||
whiteboard=bug["whiteboard"][:max_whiteboard_length],
|
||||
processed_update=True,
|
||||
)
|
||||
|
||||
|
@ -47,7 +47,7 @@ BUG_SEARCHES = (
|
|||
[1054456],
|
||||
),
|
||||
(
|
||||
"[taskcluster:error] Command \" [./test-macosx.sh --no-read-buildbot-config --installer-url=https://q",
|
||||
'[taskcluster:error] Command " [./test-macosx.sh --no-read-buildbot-config --installer-url=https://q',
|
||||
[100],
|
||||
),
|
||||
("should not be match_d", []),
|
||||
|
@ -64,33 +64,33 @@ BUG_SEARCHES = (
|
|||
@pytest.mark.parametrize(("search_term", "exp_bugs"), BUG_SEARCHES)
|
||||
def test_get_open_recent_bugs(transactional_db, sample_bugs, search_term, exp_bugs):
|
||||
"""Test that we retrieve the expected open recent bugs for a search term."""
|
||||
bug_list = sample_bugs['bugs']
|
||||
bug_list = sample_bugs["bugs"]
|
||||
# Update the resolution so that all bugs will be placed in
|
||||
# the open_recent bucket, and none in all_others.
|
||||
for bug in bug_list:
|
||||
bug['resolution'] = ''
|
||||
bug['last_change_time'] = fifty_days_ago
|
||||
bug["resolution"] = ""
|
||||
bug["last_change_time"] = fifty_days_ago
|
||||
_update_bugscache(bug_list)
|
||||
suggestions = Bugscache.search(search_term)
|
||||
open_recent_bugs = [b['id'] for b in suggestions['open_recent']]
|
||||
open_recent_bugs = [b["id"] for b in suggestions["open_recent"]]
|
||||
assert open_recent_bugs == exp_bugs
|
||||
assert suggestions['all_others'] == []
|
||||
assert suggestions["all_others"] == []
|
||||
|
||||
|
||||
@pytest.mark.parametrize(("search_term", "exp_bugs"), BUG_SEARCHES)
|
||||
def test_get_all_other_bugs(transactional_db, sample_bugs, search_term, exp_bugs):
|
||||
"""Test that we retrieve the expected old bugs for a search term."""
|
||||
bug_list = sample_bugs['bugs']
|
||||
bug_list = sample_bugs["bugs"]
|
||||
# Update the resolution so that all bugs will be placed in
|
||||
# the all_others bucket, and none in open_recent.
|
||||
for bug in bug_list:
|
||||
bug['resolution'] = 'FIXED'
|
||||
bug['last_change_time'] = fifty_days_ago
|
||||
bug["resolution"] = "FIXED"
|
||||
bug["last_change_time"] = fifty_days_ago
|
||||
_update_bugscache(bug_list)
|
||||
|
||||
suggestions = Bugscache.search(search_term)
|
||||
assert suggestions['open_recent'] == []
|
||||
all_others_bugs = [b['id'] for b in suggestions['all_others']]
|
||||
assert suggestions["open_recent"] == []
|
||||
all_others_bugs = [b["id"] for b in suggestions["all_others"]]
|
||||
assert all_others_bugs == exp_bugs
|
||||
|
||||
|
||||
|
@ -99,46 +99,46 @@ def test_get_recent_resolved_bugs(transactional_db, sample_bugs):
|
|||
search_term = "Recently modified resolved bugs should be returned in all_others"
|
||||
exp_bugs = [100001]
|
||||
|
||||
bug_list = sample_bugs['bugs']
|
||||
bug_list = sample_bugs["bugs"]
|
||||
# Update the resolution so that all bugs will be placed in
|
||||
# the open_recent bucket, and none in all_others.
|
||||
for bug in bug_list:
|
||||
bug['resolution'] = 'FIXED'
|
||||
bug['last_change_time'] = fifty_days_ago
|
||||
bug["resolution"] = "FIXED"
|
||||
bug["last_change_time"] = fifty_days_ago
|
||||
_update_bugscache(bug_list)
|
||||
|
||||
suggestions = Bugscache.search(search_term)
|
||||
assert suggestions['open_recent'] == []
|
||||
all_others_bugs = [b['id'] for b in suggestions['all_others']]
|
||||
assert suggestions["open_recent"] == []
|
||||
all_others_bugs = [b["id"] for b in suggestions["all_others"]]
|
||||
assert all_others_bugs == exp_bugs
|
||||
|
||||
|
||||
def test_bug_properties(transactional_db, sample_bugs):
|
||||
"""Test that we retrieve recent, but fixed bugs for a search term."""
|
||||
search_term = "test_popup_preventdefault_chrome.xul"
|
||||
bug_list = sample_bugs['bugs']
|
||||
bug_list = sample_bugs["bugs"]
|
||||
# Update the resolution so that all bugs will be placed in
|
||||
# the open_recent bucket, and none in all_others.
|
||||
for bug in bug_list:
|
||||
bug['resolution'] = ''
|
||||
bug['last_change_time'] = fifty_days_ago
|
||||
bug["resolution"] = ""
|
||||
bug["last_change_time"] = fifty_days_ago
|
||||
_update_bugscache(bug_list)
|
||||
|
||||
expected_keys = set(
|
||||
[
|
||||
'crash_signature',
|
||||
'resolution',
|
||||
'summary',
|
||||
'dupe_of',
|
||||
'keywords',
|
||||
'id',
|
||||
'status',
|
||||
'whiteboard',
|
||||
"crash_signature",
|
||||
"resolution",
|
||||
"summary",
|
||||
"dupe_of",
|
||||
"keywords",
|
||||
"id",
|
||||
"status",
|
||||
"whiteboard",
|
||||
]
|
||||
)
|
||||
|
||||
suggestions = Bugscache.search(search_term)
|
||||
assert set(suggestions['open_recent'][0].keys()) == expected_keys
|
||||
assert set(suggestions["open_recent"][0].keys()) == expected_keys
|
||||
|
||||
|
||||
SEARCH_TERMS = (
|
||||
|
@ -152,7 +152,7 @@ SEARCH_TERMS = (
|
|||
" command timed out: 3600 seconds without output running ",
|
||||
),
|
||||
(
|
||||
"\"input password unmask.html#abc_def 0 7 7 7\"",
|
||||
'"input password unmask.html#abc_def 0 7 7 7"',
|
||||
" input password unmask.html#abc_def 0 7 7 7 ",
|
||||
),
|
||||
)
|
||||
|
@ -199,7 +199,7 @@ def test_import(mock_bugscache_bugzilla_request):
|
|||
|
||||
for open_bug, duplicates in EXPECTED_BUG_DUPE_OF_DATA.items():
|
||||
assert Bugscache.objects.get(id=open_bug).dupe_of is None
|
||||
assert set(Bugscache.objects.filter(dupe_of=open_bug).values_list('id', flat=True)) == set(
|
||||
assert set(Bugscache.objects.filter(dupe_of=open_bug).values_list("id", flat=True)) == set(
|
||||
duplicates
|
||||
)
|
||||
|
||||
|
|
|
@ -9,14 +9,14 @@ from treeherder.model.error_summary import (
|
|||
LINE_CLEANING_TEST_CASES = (
|
||||
(
|
||||
(
|
||||
'00:54:20 INFO - GECKO(1943) | Assertion failure: rc != 0 '
|
||||
'(destroyed timer off its target thread!), at '
|
||||
'/builds/worker/checkouts/gecko/xpcom/threads/TimerThread.cpp:434'
|
||||
"00:54:20 INFO - GECKO(1943) | Assertion failure: rc != 0 "
|
||||
"(destroyed timer off its target thread!), at "
|
||||
"/builds/worker/checkouts/gecko/xpcom/threads/TimerThread.cpp:434"
|
||||
),
|
||||
(
|
||||
'Assertion failure: rc != 0 (destroyed timer off its target thread!),'
|
||||
' at '
|
||||
'/builds/worker/checkouts/gecko/xpcom/threads/TimerThread.cpp:434'
|
||||
"Assertion failure: rc != 0 (destroyed timer off its target thread!),"
|
||||
" at "
|
||||
"/builds/worker/checkouts/gecko/xpcom/threads/TimerThread.cpp:434"
|
||||
),
|
||||
),
|
||||
)
|
||||
|
@ -35,35 +35,35 @@ def test_get_cleaned_line(line_raw, exp_line_cleaned):
|
|||
PIPE_DELIMITED_LINE_TEST_CASES = (
|
||||
(
|
||||
(
|
||||
'596 INFO TEST-UNEXPECTED-FAIL '
|
||||
'| chrome://mochitests/content/browser/browser/components/loop/test/mochitest/browser_fxa_login.js '
|
||||
'| Check settings tab URL - Got http://mochi.test:8888/browser/browser/components/loop/test/mochitest/loop_fxa.sjs'
|
||||
"596 INFO TEST-UNEXPECTED-FAIL "
|
||||
"| chrome://mochitests/content/browser/browser/components/loop/test/mochitest/browser_fxa_login.js "
|
||||
"| Check settings tab URL - Got http://mochi.test:8888/browser/browser/components/loop/test/mochitest/loop_fxa.sjs"
|
||||
),
|
||||
{
|
||||
'path_end': 'chrome://mochitests/content/browser/browser/components/loop/test/mochitest/browser_fxa_login.js',
|
||||
'search_term': ['browser_fxa_login.js'],
|
||||
"path_end": "chrome://mochitests/content/browser/browser/components/loop/test/mochitest/browser_fxa_login.js",
|
||||
"search_term": ["browser_fxa_login.js"],
|
||||
},
|
||||
),
|
||||
(
|
||||
(
|
||||
'REFTEST TEST-UNEXPECTED-FAIL '
|
||||
'| file:///C:/slave/test/build/tests/reftest/tests/layout/reftests/layers/component-alpha-exit-1.html '
|
||||
'| image comparison (==), max difference: 255, number of differing pixels: 251'
|
||||
"REFTEST TEST-UNEXPECTED-FAIL "
|
||||
"| file:///C:/slave/test/build/tests/reftest/tests/layout/reftests/layers/component-alpha-exit-1.html "
|
||||
"| image comparison (==), max difference: 255, number of differing pixels: 251"
|
||||
),
|
||||
{
|
||||
'path_end': 'file:///C:/slave/test/build/tests/reftest/tests/layout/reftests/layers/component-alpha-exit-1.html',
|
||||
'search_term': ['component-alpha-exit-1.html'],
|
||||
"path_end": "file:///C:/slave/test/build/tests/reftest/tests/layout/reftests/layers/component-alpha-exit-1.html",
|
||||
"search_term": ["component-alpha-exit-1.html"],
|
||||
},
|
||||
),
|
||||
(
|
||||
(
|
||||
'2423 INFO TEST-UNEXPECTED-FAIL '
|
||||
'| /tests/dom/media/tests/mochitest/test_dataChannel_basicAudio.html '
|
||||
'| undefined assertion name - Result logged after SimpleTest.finish()'
|
||||
"2423 INFO TEST-UNEXPECTED-FAIL "
|
||||
"| /tests/dom/media/tests/mochitest/test_dataChannel_basicAudio.html "
|
||||
"| undefined assertion name - Result logged after SimpleTest.finish()"
|
||||
),
|
||||
{
|
||||
'path_end': '/tests/dom/media/tests/mochitest/test_dataChannel_basicAudio.html',
|
||||
'search_term': ['test_dataChannel_basicAudio.html'],
|
||||
"path_end": "/tests/dom/media/tests/mochitest/test_dataChannel_basicAudio.html",
|
||||
"search_term": ["test_dataChannel_basicAudio.html"],
|
||||
},
|
||||
),
|
||||
(
|
||||
|
@ -73,8 +73,8 @@ PIPE_DELIMITED_LINE_TEST_CASES = (
|
|||
r"| File 'c:\users\cltbld~1.t-w' was accessed and we were not expecting it: {'Count': 6, 'Duration': 0.112512, 'RunCount': 6}"
|
||||
),
|
||||
{
|
||||
'path_end': 'mainthreadio',
|
||||
'search_term': ['mainthreadio'],
|
||||
"path_end": "mainthreadio",
|
||||
"search_term": ["mainthreadio"],
|
||||
},
|
||||
),
|
||||
(
|
||||
|
@ -85,8 +85,8 @@ PIPE_DELIMITED_LINE_TEST_CASES = (
|
|||
"http://10.0.2.2:8854/tests/dom/canvas/test/reftest/wrapper.html?green.png"
|
||||
),
|
||||
{
|
||||
'path_end': 'http://10.0.2.2:8854/tests/dom/canvas/test/reftest/webgl-resize-test.html',
|
||||
'search_term': ['application crashed [@ jemalloc_crash]'],
|
||||
"path_end": "http://10.0.2.2:8854/tests/dom/canvas/test/reftest/webgl-resize-test.html",
|
||||
"search_term": ["application crashed [@ jemalloc_crash]"],
|
||||
},
|
||||
),
|
||||
(
|
||||
|
@ -97,8 +97,8 @@ PIPE_DELIMITED_LINE_TEST_CASES = (
|
|||
"http://10.0.2.2:8854/tests/dom/canvas/test/reftest/wrapper.html?green.png"
|
||||
),
|
||||
{
|
||||
'path_end': 'http://10.0.2.2:8854/tests/dom/canvas/test/reftest/webgl-resize-test.html',
|
||||
'search_term': ['application crashed [@ jemalloc_crash]'],
|
||||
"path_end": "http://10.0.2.2:8854/tests/dom/canvas/test/reftest/webgl-resize-test.html",
|
||||
"search_term": ["application crashed [@ jemalloc_crash]"],
|
||||
},
|
||||
),
|
||||
(
|
||||
|
@ -108,8 +108,8 @@ PIPE_DELIMITED_LINE_TEST_CASES = (
|
|||
"| touch-action attribute test on the cell: assert_true: scroll received while shouldn't expected true got false"
|
||||
),
|
||||
{
|
||||
'path_end': '/tests/dom/events/test/pointerevents/pointerevent_touch-action-table-test_touch-manual.html',
|
||||
'search_term': ['pointerevent_touch-action-table-test_touch-manual.html'],
|
||||
"path_end": "/tests/dom/events/test/pointerevents/pointerevent_touch-action-table-test_touch-manual.html",
|
||||
"search_term": ["pointerevent_touch-action-table-test_touch-manual.html"],
|
||||
},
|
||||
),
|
||||
)
|
||||
|
@ -125,15 +125,15 @@ def test_get_delimited_search_term(line, exp_search_info):
|
|||
PIPE_DELIMITED_LINE_TEST_CASES_WITH_PARAMS = (
|
||||
(
|
||||
(
|
||||
'INFO TEST-UNEXPECTED-TIMEOUT '
|
||||
'| /html/cross-origin-opener-policy/coep-navigate-popup.https.html?4-last '
|
||||
'| TestRunner hit external timeout (this may indicate a hang)'
|
||||
"INFO TEST-UNEXPECTED-TIMEOUT "
|
||||
"| /html/cross-origin-opener-policy/coep-navigate-popup.https.html?4-last "
|
||||
"| TestRunner hit external timeout (this may indicate a hang)"
|
||||
),
|
||||
{
|
||||
'path_end': '/html/cross-origin-opener-policy/coep-navigate-popup.https.html?4-last',
|
||||
'search_term': [
|
||||
'coep-navigate-popup.https.html?4-last',
|
||||
'coep-navigate-popup.https.html',
|
||||
"path_end": "/html/cross-origin-opener-policy/coep-navigate-popup.https.html?4-last",
|
||||
"search_term": [
|
||||
"coep-navigate-popup.https.html?4-last",
|
||||
"coep-navigate-popup.https.html",
|
||||
],
|
||||
},
|
||||
),
|
||||
|
@ -150,42 +150,42 @@ def test_get_delimited_search_term_with_params(line, exp_search_info):
|
|||
LEAK_LINE_TEST_CASES = (
|
||||
(
|
||||
(
|
||||
'TEST-UNEXPECTED-FAIL '
|
||||
'| leakcheck | 13195 bytes leaked '
|
||||
'(BackstagePass, CallbackObject, DOMEventTargetHelper, '
|
||||
'EventListenerManager, EventTokenBucket, ...)'
|
||||
"TEST-UNEXPECTED-FAIL "
|
||||
"| leakcheck | 13195 bytes leaked "
|
||||
"(BackstagePass, CallbackObject, DOMEventTargetHelper, "
|
||||
"EventListenerManager, EventTokenBucket, ...)"
|
||||
),
|
||||
{
|
||||
'path_end': None,
|
||||
'search_term': [
|
||||
'BackstagePass, CallbackObject, DOMEventTargetHelper, EventListenerManager, EventTokenBucket, ...'
|
||||
"path_end": None,
|
||||
"search_term": [
|
||||
"BackstagePass, CallbackObject, DOMEventTargetHelper, EventListenerManager, EventTokenBucket, ..."
|
||||
],
|
||||
},
|
||||
),
|
||||
(
|
||||
(
|
||||
'TEST-UNEXPECTED-FAIL '
|
||||
'| leakcheck | tab process: 44330 bytes leaked '
|
||||
'(AsyncLatencyLogger, AsyncTransactionTrackersHolder, AudioOutputObserver, '
|
||||
'BufferRecycleBin, CipherSuiteChangeObserver, ...)'
|
||||
"TEST-UNEXPECTED-FAIL "
|
||||
"| leakcheck | tab process: 44330 bytes leaked "
|
||||
"(AsyncLatencyLogger, AsyncTransactionTrackersHolder, AudioOutputObserver, "
|
||||
"BufferRecycleBin, CipherSuiteChangeObserver, ...)"
|
||||
),
|
||||
{
|
||||
'path_end': None,
|
||||
'search_term': [
|
||||
'AsyncLatencyLogger, AsyncTransactionTrackersHolder, AudioOutputObserver, BufferRecycleBin, CipherSui'
|
||||
"path_end": None,
|
||||
"search_term": [
|
||||
"AsyncLatencyLogger, AsyncTransactionTrackersHolder, AudioOutputObserver, BufferRecycleBin, CipherSui"
|
||||
],
|
||||
},
|
||||
),
|
||||
(
|
||||
(
|
||||
'TEST-UNEXPECTED-FAIL '
|
||||
'| LeakSanitizer | leak at '
|
||||
'MakeUnique, nsThread::nsChainedEventQueue::nsChainedEventQueue, nsThread, nsThreadManager::Init'
|
||||
"TEST-UNEXPECTED-FAIL "
|
||||
"| LeakSanitizer | leak at "
|
||||
"MakeUnique, nsThread::nsChainedEventQueue::nsChainedEventQueue, nsThread, nsThreadManager::Init"
|
||||
),
|
||||
{
|
||||
'path_end': None,
|
||||
'search_term': [
|
||||
'MakeUnique, nsThread::nsChainedEventQueue::nsChainedEventQueue, nsThread, nsThreadManager::Init'
|
||||
"path_end": None,
|
||||
"search_term": [
|
||||
"MakeUnique, nsThread::nsChainedEventQueue::nsChainedEventQueue, nsThread, nsThreadManager::Init"
|
||||
],
|
||||
},
|
||||
),
|
||||
|
@ -201,21 +201,21 @@ def test_get_leak_search_term(line, exp_search_info):
|
|||
|
||||
FULL_LINE_FALLBACK_TEST_CASES = (
|
||||
(
|
||||
'Automation Error: No crash directory (/mnt/sdcard/tests/profile/minidumps/) found on remote device',
|
||||
"Automation Error: No crash directory (/mnt/sdcard/tests/profile/minidumps/) found on remote device",
|
||||
{
|
||||
'path_end': None,
|
||||
'search_term': [
|
||||
'Automation Error: No crash directory (/mnt/sdcard/tests/profile/minidumps/) found on remote device'
|
||||
"path_end": None,
|
||||
"search_term": [
|
||||
"Automation Error: No crash directory (/mnt/sdcard/tests/profile/minidumps/) found on remote device"
|
||||
],
|
||||
},
|
||||
),
|
||||
(
|
||||
'PROCESS-CRASH | Automation Error: Missing end of test marker (process crashed?)',
|
||||
"PROCESS-CRASH | Automation Error: Missing end of test marker (process crashed?)",
|
||||
{
|
||||
'path_end': None,
|
||||
'search_term': [
|
||||
'Automation Error: Missing end of test marker (process crashed?)',
|
||||
'Automation Error: Missing end of test marker (process crashed',
|
||||
"path_end": None,
|
||||
"search_term": [
|
||||
"Automation Error: Missing end of test marker (process crashed?)",
|
||||
"Automation Error: Missing end of test marker (process crashed",
|
||||
],
|
||||
},
|
||||
),
|
||||
|
@ -232,32 +232,32 @@ def test_get_full_line_search_term(line, exp_search_info):
|
|||
LONG_LINE_TEST_CASES = (
|
||||
(
|
||||
(
|
||||
'command timed out: 2400 seconds without output running '
|
||||
'[\'/tools/buildbot/bin/python\', '
|
||||
'\'scripts/scripts/android_emulator_unittest.py\', \'--cfg\', '
|
||||
'\'android/androidx86.py\', \'--test-suite\', \'robocop-1\', '
|
||||
'\'--test-suite\', \'robocop-2\', \'--test-suite\', \'robocop-3\', '
|
||||
'\'--test-suite\', \'xpcshell\', \'--blob-upload-branch\', '
|
||||
'\'b2g-inbound\', \'--download-symbols\', \'ondemand\'], '
|
||||
'attempting to kill'
|
||||
"command timed out: 2400 seconds without output running "
|
||||
"['/tools/buildbot/bin/python', "
|
||||
"'scripts/scripts/android_emulator_unittest.py', '--cfg', "
|
||||
"'android/androidx86.py', '--test-suite', 'robocop-1', "
|
||||
"'--test-suite', 'robocop-2', '--test-suite', 'robocop-3', "
|
||||
"'--test-suite', 'xpcshell', '--blob-upload-branch', "
|
||||
"'b2g-inbound', '--download-symbols', 'ondemand'], "
|
||||
"attempting to kill"
|
||||
),
|
||||
{
|
||||
'path_end': None,
|
||||
'search_term': [
|
||||
'command timed out: 2400 seconds without output running '
|
||||
'[\'/tools/buildbot/bin/python\', \'scripts/scrip'
|
||||
"path_end": None,
|
||||
"search_term": [
|
||||
"command timed out: 2400 seconds without output running "
|
||||
"['/tools/buildbot/bin/python', 'scripts/scrip"
|
||||
],
|
||||
},
|
||||
),
|
||||
(
|
||||
(
|
||||
'TEST-UNEXPECTED-FAIL '
|
||||
'| frames/marionette/test_switch_frame.py TestSwitchFrame.test_should_be_able_to_carry_on_working_if_the_frame_is_deleted_from_under_us '
|
||||
'| AssertionError: 0 != 1'
|
||||
"TEST-UNEXPECTED-FAIL "
|
||||
"| frames/marionette/test_switch_frame.py TestSwitchFrame.test_should_be_able_to_carry_on_working_if_the_frame_is_deleted_from_under_us "
|
||||
"| AssertionError: 0 != 1"
|
||||
),
|
||||
{
|
||||
'path_end': 'frames/marionette/test_switch_frame.py',
|
||||
'search_term': ['test_switch_frame.py'],
|
||||
"path_end": "frames/marionette/test_switch_frame.py",
|
||||
"search_term": ["test_switch_frame.py"],
|
||||
},
|
||||
),
|
||||
)
|
||||
|
@ -275,11 +275,11 @@ def test_get_long_search_term(line, exp_search_info):
|
|||
CRASH_LINE_TEST_CASES = (
|
||||
(
|
||||
(
|
||||
'PROCESS-CRASH | application crashed [@ nsInputStreamPump::OnStateStop()] | '
|
||||
'file:///C:/slave/test/build/tests/jsreftest/tests/'
|
||||
'jsreftest.html?test=test262/ch11/11.4/11.4.1/11.4.1-4.a-6.js'
|
||||
"PROCESS-CRASH | application crashed [@ nsInputStreamPump::OnStateStop()] | "
|
||||
"file:///C:/slave/test/build/tests/jsreftest/tests/"
|
||||
"jsreftest.html?test=test262/ch11/11.4/11.4.1/11.4.1-4.a-6.js"
|
||||
),
|
||||
'nsInputStreamPump::OnStateStop()',
|
||||
"nsInputStreamPump::OnStateStop()",
|
||||
),
|
||||
)
|
||||
|
||||
|
@ -293,30 +293,30 @@ def test_get_crash_signature(line, exp_search_info):
|
|||
|
||||
BLACKLIST_TEST_CASES = (
|
||||
(
|
||||
'TEST-UNEXPECTED-FAIL | remoteautomation.py | application timed out after 330 seconds with no output',
|
||||
"TEST-UNEXPECTED-FAIL | remoteautomation.py | application timed out after 330 seconds with no output",
|
||||
{
|
||||
'path_end': 'remoteautomation.py',
|
||||
'search_term': [
|
||||
'remoteautomation.py | application timed out after 330 seconds with no output'
|
||||
"path_end": "remoteautomation.py",
|
||||
"search_term": [
|
||||
"remoteautomation.py | application timed out after 330 seconds with no output"
|
||||
],
|
||||
},
|
||||
),
|
||||
(
|
||||
'Return code: 1',
|
||||
"Return code: 1",
|
||||
{
|
||||
'path_end': None,
|
||||
'search_term': [None],
|
||||
"path_end": None,
|
||||
"search_term": [None],
|
||||
},
|
||||
),
|
||||
(
|
||||
(
|
||||
'REFTEST PROCESS-CRASH '
|
||||
'| application crashed [@ mozalloc_abort] '
|
||||
'| file:///home/worker/workspace/build/tests/reftest/tests/layout/reftests/font-inflation/video-1.html'
|
||||
"REFTEST PROCESS-CRASH "
|
||||
"| application crashed [@ mozalloc_abort] "
|
||||
"| file:///home/worker/workspace/build/tests/reftest/tests/layout/reftests/font-inflation/video-1.html"
|
||||
),
|
||||
{
|
||||
'path_end': 'file:///home/worker/workspace/build/tests/reftest/tests/layout/reftests/font-inflation/video-1.html',
|
||||
'search_term': ['application crashed [@ mozalloc_abort]'],
|
||||
"path_end": "file:///home/worker/workspace/build/tests/reftest/tests/layout/reftests/font-inflation/video-1.html",
|
||||
"search_term": ["application crashed [@ mozalloc_abort]"],
|
||||
},
|
||||
),
|
||||
)
|
||||
|
|
|
@ -4,10 +4,10 @@ from treeherder.model.models import BugzillaComponent, FilesBugzillaMap, Reposit
|
|||
from treeherder.etl.files_bugzilla_map import FilesBugzillaMapProcess
|
||||
|
||||
EXPECTED_PROJECTS = [
|
||||
'mozilla-central',
|
||||
'mozilla-beta',
|
||||
'mozilla-release',
|
||||
'mozilla-esr78',
|
||||
"mozilla-central",
|
||||
"mozilla-beta",
|
||||
"mozilla-release",
|
||||
"mozilla-esr78",
|
||||
]
|
||||
|
||||
|
||||
|
@ -18,11 +18,11 @@ def test_get_project_to_import(setup_repository_data):
|
|||
imported and if the order is correct.
|
||||
"""
|
||||
actual_projects = list(
|
||||
Repository.objects.filter(codebase='gecko')
|
||||
.filter(active_status='active')
|
||||
Repository.objects.filter(codebase="gecko")
|
||||
.filter(active_status="active")
|
||||
.filter(life_cycle_order__isnull=False)
|
||||
.values_list('name', flat=True)
|
||||
.order_by('life_cycle_order')
|
||||
.values_list("name", flat=True)
|
||||
.order_by("life_cycle_order")
|
||||
)
|
||||
assert actual_projects == EXPECTED_PROJECTS
|
||||
|
||||
|
@ -41,61 +41,61 @@ def test_data_ingestion(setup_repository_data, mock_file_bugzilla_map_request):
|
|||
assert FilesBugzillaMap.objects.count() == 7
|
||||
|
||||
EXPECTED_FILES_BUGZILLA_DATA_IMPORT_1 = [
|
||||
('AUTHORS', 'AUTHORS', 'mozilla.org', 'Licensing'),
|
||||
('browser/components/BrowserGlue.jsm', 'BrowserGlue.jsm', 'Firefox', 'General'),
|
||||
("AUTHORS", "AUTHORS", "mozilla.org", "Licensing"),
|
||||
("browser/components/BrowserGlue.jsm", "BrowserGlue.jsm", "Firefox", "General"),
|
||||
(
|
||||
'mozilla-esr78-folder/file.new.here',
|
||||
'file.new.here',
|
||||
'Mock Component',
|
||||
'File only present in mozilla-esr78',
|
||||
"mozilla-esr78-folder/file.new.here",
|
||||
"file.new.here",
|
||||
"Mock Component",
|
||||
"File only present in mozilla-esr78",
|
||||
),
|
||||
(
|
||||
'otherfolder/AUTHORS',
|
||||
'AUTHORS',
|
||||
'mozilla.org',
|
||||
'Different path, same product, different component',
|
||||
"otherfolder/AUTHORS",
|
||||
"AUTHORS",
|
||||
"mozilla.org",
|
||||
"Different path, same product, different component",
|
||||
),
|
||||
(
|
||||
'testing/web-platform/meta/IndexedDB/historical.html.ini',
|
||||
'historical.html.ini',
|
||||
'Testing',
|
||||
'web-platform-tests',
|
||||
"testing/web-platform/meta/IndexedDB/historical.html.ini",
|
||||
"historical.html.ini",
|
||||
"Testing",
|
||||
"web-platform-tests",
|
||||
),
|
||||
(
|
||||
'testing/web-platform/tests/IndexedDB/historical.html',
|
||||
'historical.html',
|
||||
'Core',
|
||||
'Storage: IndexedDB',
|
||||
"testing/web-platform/tests/IndexedDB/historical.html",
|
||||
"historical.html",
|
||||
"Core",
|
||||
"Storage: IndexedDB",
|
||||
),
|
||||
(
|
||||
'toolkit/mozilla-beta/fantasy_file.js',
|
||||
'fantasy_file.js',
|
||||
'Mock',
|
||||
'File first seen on mozilla-beta',
|
||||
"toolkit/mozilla-beta/fantasy_file.js",
|
||||
"fantasy_file.js",
|
||||
"Mock",
|
||||
"File first seen on mozilla-beta",
|
||||
),
|
||||
]
|
||||
assert EXPECTED_FILES_BUGZILLA_DATA_IMPORT_1 == list(
|
||||
FilesBugzillaMap.objects.all()
|
||||
.values_list(
|
||||
'path', 'file_name', 'bugzilla_component__product', 'bugzilla_component__component'
|
||||
"path", "file_name", "bugzilla_component__product", "bugzilla_component__component"
|
||||
)
|
||||
.order_by('path')
|
||||
.order_by("path")
|
||||
)
|
||||
|
||||
EXPECTED_BUGZILLA_COMPONENTS_IMPORT_1 = [
|
||||
('Core', 'Storage: IndexedDB'),
|
||||
('Firefox', 'General'),
|
||||
('Mock', 'File first seen on mozilla-beta'),
|
||||
('Mock Component', 'File only present in mozilla-esr78'),
|
||||
('Testing', 'web-platform-tests'),
|
||||
('mozilla.org', 'Different path, same product, different component'),
|
||||
('mozilla.org', 'Licensing'),
|
||||
("Core", "Storage: IndexedDB"),
|
||||
("Firefox", "General"),
|
||||
("Mock", "File first seen on mozilla-beta"),
|
||||
("Mock Component", "File only present in mozilla-esr78"),
|
||||
("Testing", "web-platform-tests"),
|
||||
("mozilla.org", "Different path, same product, different component"),
|
||||
("mozilla.org", "Licensing"),
|
||||
]
|
||||
assert EXPECTED_BUGZILLA_COMPONENTS_IMPORT_1 == sorted(
|
||||
list(
|
||||
BugzillaComponent.objects.all()
|
||||
.values_list('product', 'component')
|
||||
.order_by('product', 'component')
|
||||
.values_list("product", "component")
|
||||
.order_by("product", "component")
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -104,55 +104,55 @@ def test_data_ingestion(setup_repository_data, mock_file_bugzilla_map_request):
|
|||
assert FilesBugzillaMap.objects.count() == 6
|
||||
|
||||
EXPECTED_FILES_BUGZILLA_DATA_IMPORT_2 = [
|
||||
('AUTHORS', 'AUTHORS', 'mozilla.org', 'Import 2: same product, different component'),
|
||||
('browser/components/BrowserGlue.jsm', 'BrowserGlue.jsm', 'Firefox', 'General'),
|
||||
("AUTHORS", "AUTHORS", "mozilla.org", "Import 2: same product, different component"),
|
||||
("browser/components/BrowserGlue.jsm", "BrowserGlue.jsm", "Firefox", "General"),
|
||||
(
|
||||
'testing/web-platform/meta/IndexedDB/historical.html.ini',
|
||||
'historical.html.ini',
|
||||
'Testing',
|
||||
'web-platform-tests',
|
||||
"testing/web-platform/meta/IndexedDB/historical.html.ini",
|
||||
"historical.html.ini",
|
||||
"Testing",
|
||||
"web-platform-tests",
|
||||
),
|
||||
(
|
||||
'testing/web-platform/tests/IndexedDB/historical.html',
|
||||
'historical.html',
|
||||
'Core',
|
||||
'Storage: IndexedDB',
|
||||
"testing/web-platform/tests/IndexedDB/historical.html",
|
||||
"historical.html",
|
||||
"Core",
|
||||
"Storage: IndexedDB",
|
||||
),
|
||||
(
|
||||
'testing/web-platform/tests/IndexedDB2/historical.html',
|
||||
'historical.html',
|
||||
'Core',
|
||||
'Storage: IndexedDB2',
|
||||
"testing/web-platform/tests/IndexedDB2/historical.html",
|
||||
"historical.html",
|
||||
"Core",
|
||||
"Storage: IndexedDB2",
|
||||
),
|
||||
(
|
||||
'toolkit/mozilla-beta/fantasy_file.js',
|
||||
'fantasy_file.js',
|
||||
'Mock (import 2)',
|
||||
'File first seen on mozilla-beta',
|
||||
"toolkit/mozilla-beta/fantasy_file.js",
|
||||
"fantasy_file.js",
|
||||
"Mock (import 2)",
|
||||
"File first seen on mozilla-beta",
|
||||
),
|
||||
]
|
||||
assert EXPECTED_FILES_BUGZILLA_DATA_IMPORT_2 == sorted(
|
||||
list(
|
||||
FilesBugzillaMap.objects.all()
|
||||
.values_list(
|
||||
'path', 'file_name', 'bugzilla_component__product', 'bugzilla_component__component'
|
||||
"path", "file_name", "bugzilla_component__product", "bugzilla_component__component"
|
||||
)
|
||||
.order_by('path')
|
||||
.order_by("path")
|
||||
)
|
||||
)
|
||||
|
||||
EXPECTED_BUGZILLA_COMPONENTS_IMPORT_2 = [
|
||||
('Core', 'Storage: IndexedDB'),
|
||||
('Core', 'Storage: IndexedDB2'),
|
||||
('Firefox', 'General'),
|
||||
('Mock (import 2)', 'File first seen on mozilla-beta'),
|
||||
('Testing', 'web-platform-tests'),
|
||||
('mozilla.org', 'Import 2: same product, different component'),
|
||||
("Core", "Storage: IndexedDB"),
|
||||
("Core", "Storage: IndexedDB2"),
|
||||
("Firefox", "General"),
|
||||
("Mock (import 2)", "File first seen on mozilla-beta"),
|
||||
("Testing", "web-platform-tests"),
|
||||
("mozilla.org", "Import 2: same product, different component"),
|
||||
]
|
||||
assert EXPECTED_BUGZILLA_COMPONENTS_IMPORT_2 == sorted(
|
||||
list(
|
||||
BugzillaComponent.objects.all()
|
||||
.values_list('product', 'component')
|
||||
.order_by('product', 'component')
|
||||
.values_list("product", "component")
|
||||
.order_by("product", "component")
|
||||
)
|
||||
)
|
||||
|
|
|
@ -3,4 +3,4 @@ from treeherder.model.models import OptionCollection
|
|||
|
||||
def test_option_collection_map(sample_option_collections):
|
||||
option_map = OptionCollection.objects.get_option_collection_map()
|
||||
assert option_map == {'option_hash1': 'opt1', 'option_hash2': 'opt2'}
|
||||
assert option_map == {"option_hash1": "opt1", "option_hash2": "opt2"}
|
||||
|
|
|
@ -6,7 +6,7 @@ def test_performance_signatures_with_different_applications(test_perf_signature)
|
|||
|
||||
# create a performance signature that only differs from another existing one by the application name
|
||||
test_perf_signature.id = None
|
||||
test_perf_signature.application = 'chrome'
|
||||
test_perf_signature.application = "chrome"
|
||||
test_perf_signature.save()
|
||||
|
||||
assert PerformanceSignature.objects.count() == 2
|
||||
|
|
|
@ -6,7 +6,7 @@ from django.db.utils import IntegrityError
|
|||
|
||||
|
||||
def test_performance_tags_cannot_have_duplicate_names(transactional_db):
|
||||
PerformanceTag.objects.create(name='harness')
|
||||
PerformanceTag.objects.create(name="harness")
|
||||
|
||||
with pytest.raises(IntegrityError):
|
||||
PerformanceTag.objects.create(name='harness')
|
||||
PerformanceTag.objects.create(name="harness")
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
import pytest
|
||||
from django.db.utils import IntegrityError
|
||||
|
||||
SAME_SUITE_PUBLIC_NAME = 'same suite name'
|
||||
SAME_TEST_PUBLIC_NAME = 'same test name'
|
||||
SAME_SUITE = 'same suite'
|
||||
SAME_TEST = 'same test'
|
||||
SAME_SUITE_PUBLIC_NAME = "same suite name"
|
||||
SAME_TEST_PUBLIC_NAME = "same test name"
|
||||
SAME_SUITE = "same suite"
|
||||
SAME_TEST = "same test"
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
@ -19,16 +19,16 @@ SAME_TEST = 'same test'
|
|||
SAME_TEST_PUBLIC_NAME,
|
||||
SAME_SUITE,
|
||||
SAME_SUITE,
|
||||
'test',
|
||||
'test_2',
|
||||
"test",
|
||||
"test_2",
|
||||
),
|
||||
(
|
||||
SAME_SUITE_PUBLIC_NAME,
|
||||
SAME_SUITE_PUBLIC_NAME,
|
||||
SAME_TEST_PUBLIC_NAME,
|
||||
SAME_TEST_PUBLIC_NAME,
|
||||
'suite',
|
||||
'suite_2',
|
||||
"suite",
|
||||
"suite_2",
|
||||
SAME_TEST,
|
||||
SAME_TEST,
|
||||
),
|
||||
|
@ -37,10 +37,10 @@ SAME_TEST = 'same test'
|
|||
SAME_SUITE_PUBLIC_NAME,
|
||||
SAME_TEST_PUBLIC_NAME,
|
||||
SAME_TEST_PUBLIC_NAME,
|
||||
'suite',
|
||||
'suite_2',
|
||||
'test',
|
||||
'test_2',
|
||||
"suite",
|
||||
"suite_2",
|
||||
"test",
|
||||
"test_2",
|
||||
),
|
||||
],
|
||||
)
|
||||
|
@ -77,42 +77,42 @@ def test_trigger_public_suite_name_constraint(
|
|||
"test_public_name, test_public_name_2,"
|
||||
"suite, suite_2, test, test_2",
|
||||
[
|
||||
(None, None, None, None, 'suite', 'suite_2', 'test', 'test_2'),
|
||||
(None, None, None, None, "suite", "suite_2", "test", "test_2"),
|
||||
(
|
||||
'suite_public_name',
|
||||
'suite_public_name_2',
|
||||
"suite_public_name",
|
||||
"suite_public_name_2",
|
||||
None,
|
||||
None,
|
||||
'suite',
|
||||
'suite_2',
|
||||
'test',
|
||||
'test_2',
|
||||
"suite",
|
||||
"suite_2",
|
||||
"test",
|
||||
"test_2",
|
||||
),
|
||||
(None, None, 'test', 'test_2', 'suite', 'suite_2', 'test', 'test_2'),
|
||||
('suite_public_name', None, 'test', None, 'suite', 'suite_2', 'test', 'test_2'),
|
||||
(None, None, "test", "test_2", "suite", "suite_2", "test", "test_2"),
|
||||
("suite_public_name", None, "test", None, "suite", "suite_2", "test", "test_2"),
|
||||
(
|
||||
'suite_public_name',
|
||||
'suite_public_name_2',
|
||||
"suite_public_name",
|
||||
"suite_public_name_2",
|
||||
SAME_TEST_PUBLIC_NAME,
|
||||
SAME_TEST_PUBLIC_NAME,
|
||||
'suite',
|
||||
'suite_2',
|
||||
'test',
|
||||
'test_2',
|
||||
"suite",
|
||||
"suite_2",
|
||||
"test",
|
||||
"test_2",
|
||||
),
|
||||
(
|
||||
SAME_SUITE_PUBLIC_NAME,
|
||||
SAME_SUITE_PUBLIC_NAME,
|
||||
'test_public_name',
|
||||
'test_public_name_2',
|
||||
'suite',
|
||||
'suite_2',
|
||||
'test',
|
||||
'test_2',
|
||||
"test_public_name",
|
||||
"test_public_name_2",
|
||||
"suite",
|
||||
"suite_2",
|
||||
"test",
|
||||
"test_2",
|
||||
),
|
||||
(
|
||||
'suite_public_name',
|
||||
'suite_public_name_2',
|
||||
"suite_public_name",
|
||||
"suite_public_name_2",
|
||||
SAME_TEST_PUBLIC_NAME,
|
||||
SAME_TEST_PUBLIC_NAME,
|
||||
SAME_SUITE,
|
||||
|
@ -121,14 +121,14 @@ def test_trigger_public_suite_name_constraint(
|
|||
SAME_TEST,
|
||||
),
|
||||
(
|
||||
'suite_public_name',
|
||||
'suite_public_name_2',
|
||||
'test_public_name',
|
||||
'test_public_name_2',
|
||||
'suite',
|
||||
'suite_2',
|
||||
'test',
|
||||
'test_2',
|
||||
"suite_public_name",
|
||||
"suite_public_name_2",
|
||||
"test_public_name",
|
||||
"test_public_name_2",
|
||||
"suite",
|
||||
"suite_2",
|
||||
"test",
|
||||
"test_2",
|
||||
),
|
||||
],
|
||||
)
|
||||
|
|
|
@ -4,7 +4,7 @@ MON, TUE, WED, THU, FRI, SAT, SUN = range(1, 8)
|
|||
|
||||
|
||||
def test_triage_due_alert_summary_created_monday(test_perf_alert_summary):
|
||||
test_perf_alert_summary.created = datetime.datetime.fromisoformat('2022-05-30')
|
||||
test_perf_alert_summary.created = datetime.datetime.fromisoformat("2022-05-30")
|
||||
test_perf_alert_summary.triage_due_date = None
|
||||
|
||||
assert not test_perf_alert_summary.triage_due_date
|
||||
|
@ -16,7 +16,7 @@ def test_triage_due_alert_summary_created_monday(test_perf_alert_summary):
|
|||
|
||||
|
||||
def test_triage_due_alert_summary_created_tuesday(test_perf_alert_summary):
|
||||
test_perf_alert_summary.created = datetime.datetime.fromisoformat('2022-05-31')
|
||||
test_perf_alert_summary.created = datetime.datetime.fromisoformat("2022-05-31")
|
||||
test_perf_alert_summary.triage_due_date = None
|
||||
|
||||
assert not test_perf_alert_summary.triage_due_date
|
||||
|
@ -28,7 +28,7 @@ def test_triage_due_alert_summary_created_tuesday(test_perf_alert_summary):
|
|||
|
||||
|
||||
def test_triage_due_alert_summary_created_wednesday(test_perf_alert_summary):
|
||||
test_perf_alert_summary.created = datetime.datetime.fromisoformat('2022-06-01')
|
||||
test_perf_alert_summary.created = datetime.datetime.fromisoformat("2022-06-01")
|
||||
test_perf_alert_summary.triage_due_date = None
|
||||
|
||||
assert not test_perf_alert_summary.triage_due_date
|
||||
|
@ -40,7 +40,7 @@ def test_triage_due_alert_summary_created_wednesday(test_perf_alert_summary):
|
|||
|
||||
|
||||
def test_triage_due_alert_summary_created_thursday(test_perf_alert_summary):
|
||||
test_perf_alert_summary.created = datetime.datetime.fromisoformat('2022-06-02')
|
||||
test_perf_alert_summary.created = datetime.datetime.fromisoformat("2022-06-02")
|
||||
test_perf_alert_summary.triage_due_date = None
|
||||
|
||||
assert not test_perf_alert_summary.triage_due_date
|
||||
|
@ -53,7 +53,7 @@ def test_triage_due_alert_summary_created_thursday(test_perf_alert_summary):
|
|||
|
||||
|
||||
def test_triage_due_alert_summary_created_friday(test_perf_alert_summary):
|
||||
test_perf_alert_summary.created = datetime.datetime.fromisoformat('2022-06-03')
|
||||
test_perf_alert_summary.created = datetime.datetime.fromisoformat("2022-06-03")
|
||||
test_perf_alert_summary.triage_due_date = None
|
||||
|
||||
assert not test_perf_alert_summary.triage_due_date
|
||||
|
@ -66,7 +66,7 @@ def test_triage_due_alert_summary_created_friday(test_perf_alert_summary):
|
|||
|
||||
|
||||
def test_triage_due_alert_summary_created_saturday(test_perf_alert_summary):
|
||||
test_perf_alert_summary.created = datetime.datetime.fromisoformat('2022-06-04')
|
||||
test_perf_alert_summary.created = datetime.datetime.fromisoformat("2022-06-04")
|
||||
test_perf_alert_summary.triage_due_date = None
|
||||
|
||||
assert not test_perf_alert_summary.triage_due_date
|
||||
|
@ -78,7 +78,7 @@ def test_triage_due_alert_summary_created_saturday(test_perf_alert_summary):
|
|||
|
||||
|
||||
def test_triage_due_alert_summary_created_sunday(test_perf_alert_summary):
|
||||
test_perf_alert_summary.created = datetime.datetime.fromisoformat('2022-06-05')
|
||||
test_perf_alert_summary.created = datetime.datetime.fromisoformat("2022-06-05")
|
||||
test_perf_alert_summary.triage_due_date = None
|
||||
|
||||
assert not test_perf_alert_summary.triage_due_date
|
||||
|
@ -90,7 +90,7 @@ def test_triage_due_alert_summary_created_sunday(test_perf_alert_summary):
|
|||
|
||||
|
||||
def test_alert_summary_with_modified_created_date(test_perf_alert_summary):
|
||||
test_perf_alert_summary.created = datetime.datetime.fromisoformat('2022-05-30')
|
||||
test_perf_alert_summary.created = datetime.datetime.fromisoformat("2022-05-30")
|
||||
test_perf_alert_summary.triage_due_date = None
|
||||
|
||||
assert not test_perf_alert_summary.triage_due_date
|
||||
|
@ -100,7 +100,7 @@ def test_alert_summary_with_modified_created_date(test_perf_alert_summary):
|
|||
# created monday isoweekday = 1 + OKR = 3 => 4
|
||||
assert test_perf_alert_summary.triage_due_date.isoweekday() == THU
|
||||
|
||||
test_perf_alert_summary.created = datetime.datetime.fromisoformat('2022-06-03')
|
||||
test_perf_alert_summary.created = datetime.datetime.fromisoformat("2022-06-03")
|
||||
|
||||
test_perf_alert_summary.update_status()
|
||||
|
||||
|
@ -110,7 +110,7 @@ def test_alert_summary_with_modified_created_date(test_perf_alert_summary):
|
|||
|
||||
|
||||
def test_bug_due_alert_summary_created_monday(test_perf_alert_summary):
|
||||
test_perf_alert_summary.created = datetime.datetime.fromisoformat('2022-05-30')
|
||||
test_perf_alert_summary.created = datetime.datetime.fromisoformat("2022-05-30")
|
||||
test_perf_alert_summary.bug_due_date = None
|
||||
|
||||
assert not test_perf_alert_summary.bug_due_date
|
||||
|
@ -122,7 +122,7 @@ def test_bug_due_alert_summary_created_monday(test_perf_alert_summary):
|
|||
|
||||
|
||||
def test_bug_due_alert_summary_created_tuesday(test_perf_alert_summary):
|
||||
test_perf_alert_summary.created = datetime.datetime.fromisoformat('2022-05-31')
|
||||
test_perf_alert_summary.created = datetime.datetime.fromisoformat("2022-05-31")
|
||||
test_perf_alert_summary.bug_due_date = None
|
||||
|
||||
assert not test_perf_alert_summary.bug_due_date
|
||||
|
@ -135,7 +135,7 @@ def test_bug_due_alert_summary_created_tuesday(test_perf_alert_summary):
|
|||
|
||||
|
||||
def test_bug_due_alert_summary_created_wednesday(test_perf_alert_summary):
|
||||
test_perf_alert_summary.created = datetime.datetime.fromisoformat('2022-06-01')
|
||||
test_perf_alert_summary.created = datetime.datetime.fromisoformat("2022-06-01")
|
||||
test_perf_alert_summary.bug_due_date = None
|
||||
|
||||
assert not test_perf_alert_summary.bug_due_date
|
||||
|
@ -148,7 +148,7 @@ def test_bug_due_alert_summary_created_wednesday(test_perf_alert_summary):
|
|||
|
||||
|
||||
def test_bug_due_alert_summary_created_thursday(test_perf_alert_summary):
|
||||
test_perf_alert_summary.created = datetime.datetime.fromisoformat('2022-06-02')
|
||||
test_perf_alert_summary.created = datetime.datetime.fromisoformat("2022-06-02")
|
||||
test_perf_alert_summary.bug_due_date = None
|
||||
|
||||
assert not test_perf_alert_summary.bug_due_date
|
||||
|
@ -161,7 +161,7 @@ def test_bug_due_alert_summary_created_thursday(test_perf_alert_summary):
|
|||
|
||||
|
||||
def test_bug_due_alert_summary_created_friday(test_perf_alert_summary):
|
||||
test_perf_alert_summary.created = datetime.datetime.fromisoformat('2022-06-03')
|
||||
test_perf_alert_summary.created = datetime.datetime.fromisoformat("2022-06-03")
|
||||
test_perf_alert_summary.bug_due_date = None
|
||||
|
||||
assert not test_perf_alert_summary.bug_due_date
|
||||
|
@ -174,7 +174,7 @@ def test_bug_due_alert_summary_created_friday(test_perf_alert_summary):
|
|||
|
||||
|
||||
def test_bug_due_alert_summary_created_saturday(test_perf_alert_summary):
|
||||
test_perf_alert_summary.created = datetime.datetime.fromisoformat('2022-06-04')
|
||||
test_perf_alert_summary.created = datetime.datetime.fromisoformat("2022-06-04")
|
||||
test_perf_alert_summary.bug_due_date = None
|
||||
|
||||
assert not test_perf_alert_summary.bug_due_date
|
||||
|
@ -186,7 +186,7 @@ def test_bug_due_alert_summary_created_saturday(test_perf_alert_summary):
|
|||
|
||||
|
||||
def test_bug_due_alert_summary_created_sunday(test_perf_alert_summary):
|
||||
test_perf_alert_summary.created = datetime.datetime.fromisoformat('2022-06-05')
|
||||
test_perf_alert_summary.created = datetime.datetime.fromisoformat("2022-06-05")
|
||||
test_perf_alert_summary.bug_due_date = None
|
||||
|
||||
assert not test_perf_alert_summary.bug_due_date
|
||||
|
|
|
@ -22,42 +22,42 @@ from treeherder.perf.models import (
|
|||
from treeherder.services.taskcluster import notify_client_factory
|
||||
from treeherder.utils import default_serializer
|
||||
|
||||
load_json_fixture = SampleDataJSONLoader('sherlock')
|
||||
load_json_fixture = SampleDataJSONLoader("sherlock")
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def record_context_sample():
|
||||
# contains 5 data points that can be backfilled
|
||||
return load_json_fixture('recordContext.json')
|
||||
return load_json_fixture("recordContext.json")
|
||||
|
||||
|
||||
@pytest.fixture(params=['totally_broken_json', 'missing_job_fields', 'null_job_fields'])
|
||||
@pytest.fixture(params=["totally_broken_json", "missing_job_fields", "null_job_fields"])
|
||||
def broken_context_str(record_context_sample: dict, request) -> list:
|
||||
context_str = json.dumps(record_context_sample)
|
||||
specific = request.param
|
||||
|
||||
if specific == 'totally_broken_json':
|
||||
return copy(context_str).replace(r'"', '<')
|
||||
if specific == "totally_broken_json":
|
||||
return copy(context_str).replace(r'"', "<")
|
||||
|
||||
else:
|
||||
record_copy = deepcopy(record_context_sample)
|
||||
if specific == 'missing_job_fields':
|
||||
if specific == "missing_job_fields":
|
||||
for data_point in record_copy:
|
||||
del data_point['job_id']
|
||||
del data_point["job_id"]
|
||||
|
||||
elif specific == 'null_job_fields':
|
||||
elif specific == "null_job_fields":
|
||||
for data_point in record_copy:
|
||||
data_point['job_id'] = None
|
||||
data_point["job_id"] = None
|
||||
return json.dumps(record_copy)
|
||||
|
||||
|
||||
@pytest.fixture(params=['preliminary', 'from_non_linux'])
|
||||
@pytest.fixture(params=["preliminary", "from_non_linux"])
|
||||
def record_unsuited_for_backfill(test_perf_alert, request):
|
||||
report = BackfillReport.objects.create(summary=test_perf_alert.summary)
|
||||
|
||||
if request.param == 'preliminary':
|
||||
if request.param == "preliminary":
|
||||
return BackfillRecord.objects.create(alert=test_perf_alert, report=report)
|
||||
elif request.param == 'from_non_linux':
|
||||
elif request.param == "from_non_linux":
|
||||
# test_perf_alert originates from wind platform, by default
|
||||
return BackfillRecord.objects.create(
|
||||
alert=test_perf_alert, report=report, status=BackfillRecord.READY_FOR_PROCESSING
|
||||
|
@ -69,9 +69,9 @@ def record_with_job_symbol(test_perf_alert):
|
|||
report = BackfillReport.objects.create(summary=test_perf_alert.summary)
|
||||
|
||||
job_group = JobGroup.objects.create(
|
||||
symbol='Btime', name='Browsertime performance tests on Firefox'
|
||||
symbol="Btime", name="Browsertime performance tests on Firefox"
|
||||
)
|
||||
job_type = JobType.objects.create(symbol='Bogo', name='Bogo tests')
|
||||
job_type = JobType.objects.create(symbol="Bogo", name="Bogo tests")
|
||||
return BackfillRecord.objects.create(
|
||||
alert=test_perf_alert,
|
||||
report=report,
|
||||
|
@ -81,15 +81,15 @@ def record_with_job_symbol(test_perf_alert):
|
|||
)
|
||||
|
||||
|
||||
@pytest.fixture(params=['no_job_tier', 'no_job_group', 'no_job_type'])
|
||||
@pytest.fixture(params=["no_job_tier", "no_job_group", "no_job_type"])
|
||||
def record_with_missing_job_symbol_components(record_with_job_symbol, request):
|
||||
if request.param == 'no_job_tier':
|
||||
if request.param == "no_job_tier":
|
||||
record_with_job_symbol.job_tier = None
|
||||
record_with_job_symbol.save()
|
||||
elif request.param == 'no_job_group':
|
||||
elif request.param == "no_job_group":
|
||||
record_with_job_symbol.job_group = None
|
||||
record_with_job_symbol.save()
|
||||
elif request.param == 'no_job_type':
|
||||
elif request.param == "no_job_type":
|
||||
record_with_job_symbol.job_type = None
|
||||
record_with_job_symbol.save()
|
||||
|
||||
|
@ -97,22 +97,22 @@ def record_with_missing_job_symbol_components(record_with_job_symbol, request):
|
|||
|
||||
|
||||
def prepare_record_with_search_str(record_with_job_symbol, search_str_with):
|
||||
if search_str_with == 'no_job_group':
|
||||
if search_str_with == "no_job_group":
|
||||
record_with_job_symbol.job_group = None
|
||||
record_with_job_symbol.save()
|
||||
elif search_str_with == 'no_job_type':
|
||||
elif search_str_with == "no_job_type":
|
||||
record_with_job_symbol.job_type = None
|
||||
record_with_job_symbol.save()
|
||||
|
||||
return record_with_job_symbol
|
||||
|
||||
|
||||
@pytest.fixture(params=['windows', 'linux', 'osx'])
|
||||
@pytest.fixture(params=["windows", "linux", "osx"])
|
||||
def platform_specific_signature(
|
||||
test_repository, test_perf_framework, request
|
||||
) -> PerformanceSignature:
|
||||
new_platform = MachinePlatform.objects.create(
|
||||
os_name=request.param, platform=request.param, architecture='x86'
|
||||
os_name=request.param, platform=request.param, architecture="x86"
|
||||
)
|
||||
return create_perf_signature(test_perf_framework, test_repository, new_platform)
|
||||
|
||||
|
@ -153,7 +153,7 @@ def record_from_mature_report(test_perf_alert_2):
|
|||
|
||||
@pytest.fixture
|
||||
def report_maintainer_mock():
|
||||
return type('', (), {'provide_updated_reports': lambda *params: []})
|
||||
return type("", (), {"provide_updated_reports": lambda *params: []})
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -161,9 +161,9 @@ def backfill_tool_mock():
|
|||
def backfill_job(job_id):
|
||||
if job_id is None:
|
||||
raise Job.DoesNotExist
|
||||
return 'RANDOM_TASK_ID'
|
||||
return "RANDOM_TASK_ID"
|
||||
|
||||
return type('', (), {'backfill_job': backfill_job})
|
||||
return type("", (), {"backfill_job": backfill_job})
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -174,17 +174,17 @@ def secretary():
|
|||
@pytest.fixture
|
||||
def sherlock_settings(secretary, db):
|
||||
secretary.validate_settings()
|
||||
return PerformanceSettings.objects.get(name='perf_sheriff_bot')
|
||||
return PerformanceSettings.objects.get(name="perf_sheriff_bot")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def empty_sheriff_settings(secretary):
|
||||
all_of_them = 1_000_000_000
|
||||
secretary.validate_settings()
|
||||
secretary.consume_backfills(on_platform='linux', amount=all_of_them)
|
||||
secretary.consume_backfills(on_platform='windows', amount=all_of_them)
|
||||
secretary.consume_backfills(on_platform='osx', amount=all_of_them)
|
||||
return PerformanceSettings.objects.get(name='perf_sheriff_bot')
|
||||
secretary.consume_backfills(on_platform="linux", amount=all_of_them)
|
||||
secretary.consume_backfills(on_platform="windows", amount=all_of_them)
|
||||
secretary.consume_backfills(on_platform="osx", amount=all_of_them)
|
||||
return PerformanceSettings.objects.get(name="perf_sheriff_bot")
|
||||
|
||||
|
||||
# For testing Secretary
|
||||
|
@ -224,7 +224,7 @@ def create_record():
|
|||
@pytest.fixture
|
||||
def notify_client_mock() -> taskcluster.Notify:
|
||||
return MagicMock(
|
||||
spec=notify_client_factory('https://fakerooturl.org', 'FAKE_CLIENT_ID', 'FAKE_ACCESS_TOKEN')
|
||||
spec=notify_client_factory("https://fakerooturl.org", "FAKE_CLIENT_ID", "FAKE_ACCESS_TOKEN")
|
||||
)
|
||||
|
||||
|
||||
|
@ -239,13 +239,13 @@ def tc_notify_mock(monkeypatch):
|
|||
|
||||
mock = MagicMock()
|
||||
response = Response()
|
||||
mock.email.return_value = {'response': response}
|
||||
mock.email.return_value = {"response": response}
|
||||
|
||||
def mockreturn(*arg, **kwargs):
|
||||
nonlocal mock
|
||||
return mock
|
||||
|
||||
monkeypatch.setattr(tc_services, 'notify_client_factory', mockreturn)
|
||||
monkeypatch.setattr(tc_services, "notify_client_factory", mockreturn)
|
||||
return mock
|
||||
|
||||
|
||||
|
|
|
@ -16,13 +16,13 @@ LETTERS = string.ascii_lowercase
|
|||
RANDOM_STRINGS = set()
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
@pytest.fixture(scope="module")
|
||||
def alerts_picker():
|
||||
# real-world instance
|
||||
return AlertsPicker(
|
||||
max_alerts=5,
|
||||
max_improvements=2,
|
||||
platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android'),
|
||||
platforms_of_interest=("windows10", "windows7", "linux", "osx", "android"),
|
||||
)
|
||||
|
||||
|
||||
|
@ -34,19 +34,19 @@ def mock_backfill_context_fetcher(backfill_record_context):
|
|||
|
||||
@pytest.fixture
|
||||
def option_collection():
|
||||
option = Option.objects.create(name='opt')
|
||||
return OptionCollection.objects.create(option_collection_hash='my_option_hash', option=option)
|
||||
option = Option.objects.create(name="opt")
|
||||
return OptionCollection.objects.create(option_collection_hash="my_option_hash", option=option)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def relevant_platform():
|
||||
return MachinePlatform.objects.create(os_name='win', platform='windows10', architecture='x86')
|
||||
return MachinePlatform.objects.create(os_name="win", platform="windows10", architecture="x86")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def irrelevant_platform():
|
||||
return MachinePlatform.objects.create(
|
||||
os_name='OS_OF_NO_INTEREST', platform='PLATFORM_OF_NO_INTEREST', architecture='x86'
|
||||
os_name="OS_OF_NO_INTEREST", platform="PLATFORM_OF_NO_INTEREST", architecture="x86"
|
||||
)
|
||||
|
||||
|
||||
|
@ -56,7 +56,7 @@ def unique_random_string():
|
|||
|
||||
def _unique_random_string(length=14):
|
||||
while True:
|
||||
random_string = ''.join(random.choice(LETTERS) for _ in range(length))
|
||||
random_string = "".join(random.choice(LETTERS) for _ in range(length))
|
||||
if random_string not in RANDOM_STRINGS:
|
||||
RANDOM_STRINGS.add(random_string)
|
||||
return random_string
|
||||
|
@ -111,16 +111,16 @@ def create_alerts(create_perf_signature):
|
|||
def test_many_various_alerts():
|
||||
alerts = [Mock(spec=PerformanceAlert) for _ in range(10)]
|
||||
platforms = (
|
||||
'windows10-64-shippable',
|
||||
'windows10-64-shippable',
|
||||
'windows7-32-shippable',
|
||||
'windows7-32-shippable',
|
||||
'linux64-shippable-qr',
|
||||
'linux64-shippable-qr',
|
||||
'osx-10-10-shippable',
|
||||
'osx-10-10-shippable',
|
||||
'android-hw-pix-7-1-android-aarch64',
|
||||
'android-hw-pix-7-1-android-aarch64',
|
||||
"windows10-64-shippable",
|
||||
"windows10-64-shippable",
|
||||
"windows7-32-shippable",
|
||||
"windows7-32-shippable",
|
||||
"linux64-shippable-qr",
|
||||
"linux64-shippable-qr",
|
||||
"osx-10-10-shippable",
|
||||
"osx-10-10-shippable",
|
||||
"android-hw-pix-7-1-android-aarch64",
|
||||
"android-hw-pix-7-1-android-aarch64",
|
||||
)
|
||||
|
||||
reversed_magnitudes = list(reversed(range(len(alerts))))
|
||||
|
@ -137,7 +137,7 @@ def test_many_various_alerts():
|
|||
@pytest.fixture
|
||||
def test_few_various_alerts():
|
||||
alerts = [Mock(spec=PerformanceAlert) for _ in range(2)]
|
||||
platforms = ('windows7-32-shippable', 'linux64-shippable-qr')
|
||||
platforms = ("windows7-32-shippable", "linux64-shippable-qr")
|
||||
reversed_magnitudes = list(reversed(range(len(alerts))))
|
||||
toggle = True
|
||||
for idx, alert in enumerate(alerts):
|
||||
|
@ -151,7 +151,7 @@ def test_few_various_alerts():
|
|||
@pytest.fixture
|
||||
def test_macosx_alert():
|
||||
alert = Mock(spec=PerformanceAlert)
|
||||
platform = 'macosx1015-64-shippable-qr'
|
||||
platform = "macosx1015-64-shippable-qr"
|
||||
alert.series_signature.platform.platform = platform
|
||||
alert.is_regression = True
|
||||
return alert
|
||||
|
@ -161,11 +161,11 @@ def test_macosx_alert():
|
|||
def test_few_regressions():
|
||||
alerts = [Mock(spec=PerformanceAlert) for _ in range(5)]
|
||||
platforms = (
|
||||
'windows10-64-shippable',
|
||||
'windows7-32-shippable',
|
||||
'linux64-shippable-qr',
|
||||
'osx-10-10-shippable',
|
||||
'android-hw-pix-7-1-android-aarch64',
|
||||
"windows10-64-shippable",
|
||||
"windows7-32-shippable",
|
||||
"linux64-shippable-qr",
|
||||
"osx-10-10-shippable",
|
||||
"android-hw-pix-7-1-android-aarch64",
|
||||
)
|
||||
reversed_magnitudes = list(reversed(range(len(alerts))))
|
||||
for idx, alert in enumerate(alerts):
|
||||
|
@ -187,10 +187,10 @@ def test_few_improvements(test_few_regressions):
|
|||
def test_bad_platform_names():
|
||||
alerts = [Mock(spec=PerformanceAlert) for _ in range(4)]
|
||||
platforms = (
|
||||
'rfvrtgb', # noqa
|
||||
'4.0',
|
||||
'54dcwec58', # noqa
|
||||
'8y6 t g',
|
||||
"rfvrtgb", # noqa
|
||||
"4.0",
|
||||
"54dcwec58", # noqa
|
||||
"8y6 t g",
|
||||
)
|
||||
for idx, alert in enumerate(alerts):
|
||||
alert.series_signature.platform.platform = platforms[idx]
|
||||
|
@ -204,7 +204,7 @@ ONE_DAY_INTERVAL = datetime.timedelta(days=1)
|
|||
|
||||
def prepare_graph_data_scenario(push_ids_to_keep, highlighted_push_id, perf_alert, perf_signature):
|
||||
original_job_count = Job.objects.count()
|
||||
selectable_jobs = Job.objects.filter(push_id__in=push_ids_to_keep).order_by('push_id', 'id')
|
||||
selectable_jobs = Job.objects.filter(push_id__in=push_ids_to_keep).order_by("push_id", "id")
|
||||
Job.objects.exclude(push_id__in=push_ids_to_keep).delete()
|
||||
|
||||
assert Job.objects.count() < original_job_count
|
||||
|
|
|
@ -11,14 +11,14 @@ def test_init():
|
|||
AlertsPicker(
|
||||
max_alerts=0,
|
||||
max_improvements=2,
|
||||
platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android'),
|
||||
platforms_of_interest=("windows10", "windows7", "linux", "osx", "android"),
|
||||
)
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
AlertsPicker(
|
||||
max_alerts=3,
|
||||
max_improvements=0,
|
||||
platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android'),
|
||||
platforms_of_interest=("windows10", "windows7", "linux", "osx", "android"),
|
||||
)
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
|
@ -37,15 +37,15 @@ def test_extract_important_alerts(
|
|||
picker = AlertsPicker(
|
||||
max_alerts=5,
|
||||
max_improvements=2,
|
||||
platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android'),
|
||||
platforms_of_interest=("windows10", "windows7", "linux", "osx", "android"),
|
||||
)
|
||||
|
||||
expected_platforms_order = (
|
||||
'windows10-64-shippable',
|
||||
'windows7-32-shippable',
|
||||
'linux64-shippable-qr',
|
||||
'osx-10-10-shippable',
|
||||
'windows10-64-shippable',
|
||||
"windows10-64-shippable",
|
||||
"windows7-32-shippable",
|
||||
"linux64-shippable-qr",
|
||||
"osx-10-10-shippable",
|
||||
"windows10-64-shippable",
|
||||
)
|
||||
expected_magnitudes_order = (4, 3, 2, 1, 4)
|
||||
|
||||
|
@ -73,7 +73,7 @@ def test_ensure_alerts_variety(
|
|||
picker = AlertsPicker(
|
||||
max_alerts=5,
|
||||
max_improvements=2,
|
||||
platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android'),
|
||||
platforms_of_interest=("windows10", "windows7", "linux", "osx", "android"),
|
||||
)
|
||||
|
||||
selected_alerts = picker._ensure_alerts_variety(test_few_regressions)
|
||||
|
@ -101,7 +101,7 @@ def test_ensure_alerts_variety(
|
|||
picker = AlertsPicker(
|
||||
max_alerts=1,
|
||||
max_improvements=2,
|
||||
platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android'),
|
||||
platforms_of_interest=("windows10", "windows7", "linux", "osx", "android"),
|
||||
)
|
||||
|
||||
selected_alerts = picker._ensure_alerts_variety(test_few_various_alerts)
|
||||
|
@ -112,17 +112,17 @@ def test_ensure_alerts_variety(
|
|||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
('max_alerts, expected_alerts_platforms'), # noqa
|
||||
("max_alerts, expected_alerts_platforms"), # noqa
|
||||
[
|
||||
(5, ('windows10', 'windows7', 'linux', 'osx', 'android')),
|
||||
(8, ('windows10', 'windows7', 'linux', 'osx', 'android', 'windows10', 'windows7', 'linux')),
|
||||
(5, ("windows10", "windows7", "linux", "osx", "android")),
|
||||
(8, ("windows10", "windows7", "linux", "osx", "android", "windows10", "windows7", "linux")),
|
||||
],
|
||||
)
|
||||
def test_ensure_platform_variety(test_many_various_alerts, max_alerts, expected_alerts_platforms):
|
||||
picker = AlertsPicker(
|
||||
max_alerts=max_alerts,
|
||||
max_improvements=2,
|
||||
platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android'),
|
||||
platforms_of_interest=("windows10", "windows7", "linux", "osx", "android"),
|
||||
)
|
||||
|
||||
picked_alerts = picker._ensure_platform_variety(test_many_various_alerts)
|
||||
|
@ -134,17 +134,17 @@ def test_os_relevance():
|
|||
picker = AlertsPicker(
|
||||
max_alerts=5,
|
||||
max_improvements=2,
|
||||
platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android'),
|
||||
platforms_of_interest=("windows10", "windows7", "linux", "osx", "android"),
|
||||
)
|
||||
assert 5 == picker._os_relevance('windows10')
|
||||
assert 4 == picker._os_relevance('windows7')
|
||||
assert 3 == picker._os_relevance('linux')
|
||||
assert 2 == picker._os_relevance('osx')
|
||||
assert 2 == picker._os_relevance('macosx') # ensure macosx has the same relevance as osx
|
||||
assert 1 == picker._os_relevance('android')
|
||||
assert 5 == picker._os_relevance("windows10")
|
||||
assert 4 == picker._os_relevance("windows7")
|
||||
assert 3 == picker._os_relevance("linux")
|
||||
assert 2 == picker._os_relevance("osx")
|
||||
assert 2 == picker._os_relevance("macosx") # ensure macosx has the same relevance as osx
|
||||
assert 1 == picker._os_relevance("android")
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
picker._os_relevance('some weird OS')
|
||||
picker._os_relevance("some weird OS")
|
||||
|
||||
|
||||
def test_has_relevant_platform(
|
||||
|
@ -153,7 +153,7 @@ def test_has_relevant_platform(
|
|||
picker = AlertsPicker(
|
||||
max_alerts=5,
|
||||
max_improvements=2,
|
||||
platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android'),
|
||||
platforms_of_interest=("windows10", "windows7", "linux", "osx", "android"),
|
||||
)
|
||||
|
||||
for alert in test_many_various_alerts:
|
||||
|
@ -167,7 +167,7 @@ def test_extract_by_relevant_platforms(test_many_various_alerts, test_bad_platfo
|
|||
picker = AlertsPicker(
|
||||
max_alerts=5,
|
||||
max_improvements=2,
|
||||
platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android'),
|
||||
platforms_of_interest=("windows10", "windows7", "linux", "osx", "android"),
|
||||
)
|
||||
all_alerts = test_many_various_alerts + test_bad_platform_names
|
||||
|
||||
|
@ -183,20 +183,20 @@ def test_multi_criterion_sort(test_many_various_alerts):
|
|||
picker = AlertsPicker(
|
||||
max_alerts=5,
|
||||
max_improvements=2,
|
||||
platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android'),
|
||||
platforms_of_interest=("windows10", "windows7", "linux", "osx", "android"),
|
||||
)
|
||||
|
||||
expected_platforms_order = (
|
||||
'android-hw-pix-7-1-android-aarch64',
|
||||
'windows10-64-shippable',
|
||||
'windows7-32-shippable',
|
||||
'linux64-shippable-qr',
|
||||
'osx-10-10-shippable',
|
||||
'osx-10-10-shippable',
|
||||
'android-hw-pix-7-1-android-aarch64',
|
||||
'windows10-64-shippable',
|
||||
'windows7-32-shippable',
|
||||
'linux64-shippable-qr',
|
||||
"android-hw-pix-7-1-android-aarch64",
|
||||
"windows10-64-shippable",
|
||||
"windows7-32-shippable",
|
||||
"linux64-shippable-qr",
|
||||
"osx-10-10-shippable",
|
||||
"osx-10-10-shippable",
|
||||
"android-hw-pix-7-1-android-aarch64",
|
||||
"windows10-64-shippable",
|
||||
"windows7-32-shippable",
|
||||
"linux64-shippable-qr",
|
||||
)
|
||||
expected_magnitudes_order = (1, 9, 7, 5, 3, 2, 0, 8, 6, 4)
|
||||
|
||||
|
|
|
@ -24,16 +24,16 @@ def test_identify_retriggerables_as_unit():
|
|||
|
||||
# its small private methods
|
||||
annotated_data_points = [
|
||||
{'job_id': 1, 'push_id': 1},
|
||||
{'job_id': 2, 'push_id': 2},
|
||||
{'job_id': 3, 'push_id': 2},
|
||||
{'job_id': 4, 'push_id': 3},
|
||||
{'job_id': 5, 'push_id': 3},
|
||||
{'job_id': 6, 'push_id': 3},
|
||||
{"job_id": 1, "push_id": 1},
|
||||
{"job_id": 2, "push_id": 2},
|
||||
{"job_id": 3, "push_id": 2},
|
||||
{"job_id": 4, "push_id": 3},
|
||||
{"job_id": 5, "push_id": 3},
|
||||
{"job_id": 6, "push_id": 3},
|
||||
]
|
||||
operation = IdentifyAlertRetriggerables(max_data_points=5, time_interval=one_day)
|
||||
flattened_data_points = operation._one_data_point_per_push(annotated_data_points) # noqa
|
||||
push_counter = Counter([data_point['push_id'] for data_point in flattened_data_points])
|
||||
push_counter = Counter([data_point["push_id"] for data_point in flattened_data_points])
|
||||
|
||||
assert max(count for count in push_counter.values()) == 1
|
||||
|
||||
|
|
|
@ -6,9 +6,9 @@ from treeherder.services.taskcluster import TaskclusterModelNullObject
|
|||
|
||||
|
||||
class TestBackfillTool:
|
||||
FAKE_ROOT_URL = 'https://fakerooturl.org'
|
||||
FAKE_OPTIONS = (FAKE_ROOT_URL, 'FAKE_CLIENT_ID', 'FAKE_ACCESS_TOKEN')
|
||||
MISSING_JOB_ID = '12830123912'
|
||||
FAKE_ROOT_URL = "https://fakerooturl.org"
|
||||
FAKE_OPTIONS = (FAKE_ROOT_URL, "FAKE_CLIENT_ID", "FAKE_ACCESS_TOKEN")
|
||||
MISSING_JOB_ID = "12830123912"
|
||||
|
||||
def test_backfilling_missing_job_errors_out(self, db):
|
||||
backfill_tool = BackfillTool(TaskclusterModelNullObject(*self.FAKE_OPTIONS))
|
||||
|
|
|
@ -27,12 +27,12 @@ def test_email_is_sent_after_successful_backfills(
|
|||
)
|
||||
sherlock.sheriff(
|
||||
since=EPOCH,
|
||||
frameworks=['test_talos'],
|
||||
frameworks=["test_talos"],
|
||||
repositories=[test_settings.TREEHERDER_TEST_REPOSITORY_NAME],
|
||||
)
|
||||
record_ready_for_processing.refresh_from_db()
|
||||
assert BackfillNotificationRecord.objects.count() == 1
|
||||
call_command('report_backfill_outcome')
|
||||
call_command("report_backfill_outcome")
|
||||
assert BackfillNotificationRecord.objects.count() == 0
|
||||
|
||||
|
||||
|
@ -56,12 +56,12 @@ def test_email_is_still_sent_if_context_is_too_corrupt_to_be_actionable(
|
|||
)
|
||||
sherlock.sheriff(
|
||||
since=EPOCH,
|
||||
frameworks=['test_talos'],
|
||||
frameworks=["test_talos"],
|
||||
repositories=[test_settings.TREEHERDER_TEST_REPOSITORY_NAME],
|
||||
)
|
||||
|
||||
assert BackfillNotificationRecord.objects.count() == 1
|
||||
call_command('report_backfill_outcome')
|
||||
call_command("report_backfill_outcome")
|
||||
assert BackfillNotificationRecord.objects.count() == 0
|
||||
|
||||
|
||||
|
@ -77,21 +77,21 @@ def test_no_email_is_sent_if_runtime_exceeded(
|
|||
|
||||
sherlock = Sherlock(report_maintainer_mock, backfill_tool_mock, secretary, no_time_left)
|
||||
try:
|
||||
sherlock.sheriff(since=EPOCH, frameworks=['raptor', 'talos'], repositories=['autoland'])
|
||||
sherlock.sheriff(since=EPOCH, frameworks=["raptor", "talos"], repositories=["autoland"])
|
||||
except MaxRuntimeExceeded:
|
||||
pass
|
||||
|
||||
assert BackfillNotificationRecord.objects.count() == 0
|
||||
call_command('report_backfill_outcome')
|
||||
call_command("report_backfill_outcome")
|
||||
assert BackfillNotificationRecord.objects.count() == 0
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'framework, repository',
|
||||
"framework, repository",
|
||||
[
|
||||
('non_existent_framework', test_settings.TREEHERDER_TEST_REPOSITORY_NAME),
|
||||
('test_talos', 'non_existent_repository'),
|
||||
('non_existent_framework', 'non_existent_repository'),
|
||||
("non_existent_framework", test_settings.TREEHERDER_TEST_REPOSITORY_NAME),
|
||||
("test_talos", "non_existent_repository"),
|
||||
("non_existent_framework", "non_existent_repository"),
|
||||
],
|
||||
)
|
||||
def test_no_email_is_sent_for_untargeted_alerts(
|
||||
|
@ -117,5 +117,5 @@ def test_no_email_is_sent_for_untargeted_alerts(
|
|||
record_ready_for_processing.refresh_from_db()
|
||||
|
||||
assert BackfillNotificationRecord.objects.count() == 0
|
||||
call_command('report_backfill_outcome')
|
||||
call_command("report_backfill_outcome")
|
||||
assert BackfillNotificationRecord.objects.count() == 0
|
||||
|
|
|
@ -42,15 +42,15 @@ def record_backfilled(test_perf_alert, record_context_sample):
|
|||
|
||||
@pytest.fixture
|
||||
def range_dates(record_context_sample):
|
||||
from_date = datetime.fromisoformat(record_context_sample[0]['push_timestamp'])
|
||||
to_date = datetime.fromisoformat(record_context_sample[-1]['push_timestamp'])
|
||||
from_date = datetime.fromisoformat(record_context_sample[0]["push_timestamp"])
|
||||
to_date = datetime.fromisoformat(record_context_sample[-1]["push_timestamp"])
|
||||
|
||||
return {
|
||||
'before_date': from_date - timedelta(days=5),
|
||||
'from_date': from_date,
|
||||
'in_range_date': from_date + timedelta(hours=13),
|
||||
'to_date': to_date,
|
||||
'after_date': to_date + timedelta(days=3),
|
||||
"before_date": from_date - timedelta(days=5),
|
||||
"from_date": from_date,
|
||||
"in_range_date": from_date + timedelta(hours=13),
|
||||
"to_date": to_date,
|
||||
"after_date": to_date + timedelta(days=3),
|
||||
}
|
||||
|
||||
|
||||
|
@ -58,28 +58,28 @@ def range_dates(record_context_sample):
|
|||
def outcome_checking_pushes(
|
||||
create_push, range_dates, record_context_sample, test_repository, test_repository_2
|
||||
):
|
||||
from_push_id = record_context_sample[0]['push_id']
|
||||
to_push_id = record_context_sample[-1]['push_id']
|
||||
from_push_id = record_context_sample[0]["push_id"]
|
||||
to_push_id = record_context_sample[-1]["push_id"]
|
||||
|
||||
pushes = [
|
||||
create_push(test_repository, revision=uuid.uuid4(), time=range_dates['before_date']),
|
||||
create_push(test_repository, revision=uuid.uuid4(), time=range_dates["before_date"]),
|
||||
create_push(
|
||||
test_repository,
|
||||
revision=uuid.uuid4(),
|
||||
time=range_dates['from_date'],
|
||||
time=range_dates["from_date"],
|
||||
explicit_id=from_push_id,
|
||||
),
|
||||
create_push(test_repository, revision=uuid.uuid4(), time=range_dates['in_range_date']),
|
||||
create_push(test_repository, revision=uuid.uuid4(), time=range_dates['in_range_date']),
|
||||
create_push(test_repository, revision=uuid.uuid4(), time=range_dates['in_range_date']),
|
||||
create_push(test_repository, revision=uuid.uuid4(), time=range_dates['in_range_date']),
|
||||
create_push(test_repository, revision=uuid.uuid4(), time=range_dates["in_range_date"]),
|
||||
create_push(test_repository, revision=uuid.uuid4(), time=range_dates["in_range_date"]),
|
||||
create_push(test_repository, revision=uuid.uuid4(), time=range_dates["in_range_date"]),
|
||||
create_push(test_repository, revision=uuid.uuid4(), time=range_dates["in_range_date"]),
|
||||
create_push(
|
||||
test_repository,
|
||||
revision=uuid.uuid4(),
|
||||
time=range_dates['to_date'],
|
||||
time=range_dates["to_date"],
|
||||
explicit_id=to_push_id,
|
||||
),
|
||||
create_push(test_repository, revision=uuid.uuid4(), time=range_dates['after_date']),
|
||||
create_push(test_repository, revision=uuid.uuid4(), time=range_dates["after_date"]),
|
||||
]
|
||||
|
||||
return pushes
|
||||
|
@ -92,7 +92,7 @@ def successful_jobs(outcome_checking_pushes, eleven_jobs_stored):
|
|||
pairs = zip(outcome_checking_pushes, jobs)
|
||||
for push, job in pairs:
|
||||
job.push = push
|
||||
job.result = 'success'
|
||||
job.result = "success"
|
||||
job.job_type_id = JOB_TYPE_ID
|
||||
job.save()
|
||||
_successful_jobs.append(job)
|
||||
|
@ -103,7 +103,7 @@ def successful_jobs(outcome_checking_pushes, eleven_jobs_stored):
|
|||
def jobs_with_one_failed(successful_jobs):
|
||||
index_in_range = get_middle_index(successful_jobs)
|
||||
job_to_fail = successful_jobs[index_in_range]
|
||||
job_to_fail.result = 'testfailed'
|
||||
job_to_fail.result = "testfailed"
|
||||
job_to_fail.save()
|
||||
|
||||
|
||||
|
@ -111,7 +111,7 @@ def jobs_with_one_failed(successful_jobs):
|
|||
def jobs_with_one_pending(successful_jobs):
|
||||
index_in_range = get_middle_index(successful_jobs)
|
||||
job_pending = successful_jobs[index_in_range]
|
||||
job_pending.result = 'unknown'
|
||||
job_pending.result = "unknown"
|
||||
job_pending.save()
|
||||
|
||||
|
||||
|
@ -120,17 +120,17 @@ def jobs_with_one_pending_and_one_failed(successful_jobs):
|
|||
index_in_range = get_middle_index(successful_jobs)
|
||||
next_index_in_range = get_middle_index(successful_jobs) + 1
|
||||
job_pending = successful_jobs[index_in_range]
|
||||
job_pending.result = 'unknown'
|
||||
job_pending.result = "unknown"
|
||||
job_pending.save()
|
||||
job_to_fail = successful_jobs[next_index_in_range]
|
||||
job_to_fail.result = 'testfailed'
|
||||
job_to_fail.result = "testfailed"
|
||||
job_to_fail.save()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def get_outcome_checker_mock():
|
||||
def get_outcome_checker_mock(outcome: OutcomeStatus):
|
||||
return type('', (), {'check': lambda *params: outcome})
|
||||
return type("", (), {"check": lambda *params: outcome})
|
||||
|
||||
return get_outcome_checker_mock
|
||||
|
||||
|
@ -184,8 +184,8 @@ def test_outcome_checker_identifies_pushes_in_range(
|
|||
):
|
||||
total_pushes = Push.objects.count()
|
||||
|
||||
from_time = range_dates['from_date']
|
||||
to_time = range_dates['to_date']
|
||||
from_time = range_dates["from_date"]
|
||||
to_time = range_dates["to_date"]
|
||||
|
||||
total_outside_pushes = Push.objects.filter(
|
||||
Q(time__lt=from_time) | Q(time__gt=to_time), repository=test_repository
|
||||
|
|
|
@ -35,16 +35,16 @@ def test_record_job_symbol_is_none_if_component_misses(record_with_missing_job_s
|
|||
|
||||
|
||||
def test_record_correct_job_symbol(record_with_job_symbol):
|
||||
expected_job_symbol = 'Btime[tier 2](Bogo)'
|
||||
expected_job_symbol = "Btime[tier 2](Bogo)"
|
||||
assert record_with_job_symbol.job_symbol == expected_job_symbol
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'search_str_with, expected_search_str',
|
||||
"search_str_with, expected_search_str",
|
||||
[
|
||||
('all_fields', 'win7,Browsertime performance tests on Firefox,Bogo tests,Bogo'),
|
||||
('no_job_group', 'win7,Bogo tests,Bogo'),
|
||||
('no_job_type', 'win7,Browsertime performance tests on Firefox'),
|
||||
("all_fields", "win7,Browsertime performance tests on Firefox,Bogo tests,Bogo"),
|
||||
("no_job_group", "win7,Bogo tests,Bogo"),
|
||||
("no_job_type", "win7,Browsertime performance tests on Firefox"),
|
||||
],
|
||||
)
|
||||
def test_record_search_str(record_with_job_symbol, search_str_with, expected_search_str):
|
||||
|
@ -78,7 +78,7 @@ def test_records_change_to_ready_for_processing(
|
|||
backfill_tool_mock,
|
||||
secretary,
|
||||
)
|
||||
sherlock.sheriff(since=EPOCH, frameworks=['raptor', 'talos'], repositories=['autoland'])
|
||||
sherlock.sheriff(since=EPOCH, frameworks=["raptor", "talos"], repositories=["autoland"])
|
||||
|
||||
assert preliminary_records.count() == 1
|
||||
assert ready_records.count() == 1
|
||||
|
@ -123,7 +123,7 @@ def test_records_and_db_limits_remain_unchanged_if_no_records_suitable_for_backf
|
|||
record_unsuited_for_backfill,
|
||||
):
|
||||
sherlock = Sherlock(report_maintainer_mock, backfill_tool_mock, secretary)
|
||||
sherlock._backfill(['test_talos'], [test_settings.TREEHERDER_TEST_REPOSITORY_NAME])
|
||||
sherlock._backfill(["test_talos"], [test_settings.TREEHERDER_TEST_REPOSITORY_NAME])
|
||||
|
||||
assert not has_changed(record_unsuited_for_backfill)
|
||||
assert not has_changed(sherlock_settings)
|
||||
|
@ -137,7 +137,7 @@ def test_records_remain_unchanged_if_no_backfills_left(
|
|||
empty_sheriff_settings,
|
||||
):
|
||||
sherlock = Sherlock(report_maintainer_mock, backfill_tool_mock, secretary)
|
||||
sherlock._backfill(['test_talos'], [test_settings.TREEHERDER_TEST_REPOSITORY_NAME])
|
||||
sherlock._backfill(["test_talos"], [test_settings.TREEHERDER_TEST_REPOSITORY_NAME])
|
||||
|
||||
assert not has_changed(record_ready_for_processing)
|
||||
|
||||
|
@ -152,7 +152,7 @@ def test_records_and_db_limits_remain_unchanged_if_runtime_exceeded(
|
|||
no_time_left = timedelta(seconds=0)
|
||||
sherlock = Sherlock(report_maintainer_mock, backfill_tool_mock, secretary, no_time_left)
|
||||
try:
|
||||
sherlock.sheriff(since=EPOCH, frameworks=['raptor', 'talos'], repositories=['autoland'])
|
||||
sherlock.sheriff(since=EPOCH, frameworks=["raptor", "talos"], repositories=["autoland"])
|
||||
except MaxRuntimeExceeded:
|
||||
pass
|
||||
|
||||
|
@ -170,11 +170,11 @@ def test_db_limits_update_if_backfills_left(
|
|||
targeted_platform = record_ready_for_processing.platform.platform
|
||||
|
||||
initial_backfills = secretary.backfills_left(on_platform=targeted_platform)
|
||||
assert initial_backfills == json.loads(sherlock_settings.settings)['limits'][targeted_platform]
|
||||
assert initial_backfills == json.loads(sherlock_settings.settings)["limits"][targeted_platform]
|
||||
sherlock = Sherlock(report_maintainer_mock, backfill_tool_mock, secretary)
|
||||
sherlock.sheriff(
|
||||
since=EPOCH,
|
||||
frameworks=['test_talos'],
|
||||
frameworks=["test_talos"],
|
||||
repositories=[test_settings.TREEHERDER_TEST_REPOSITORY_NAME],
|
||||
)
|
||||
|
||||
|
@ -198,7 +198,7 @@ def test_backfilling_gracefully_handles_invalid_json_contexts_without_blowing_up
|
|||
try:
|
||||
sherlock.sheriff(
|
||||
since=EPOCH,
|
||||
frameworks=['test_talos'],
|
||||
frameworks=["test_talos"],
|
||||
repositories=[test_settings.TREEHERDER_TEST_REPOSITORY_NAME],
|
||||
)
|
||||
except (JSONDecodeError, KeyError, Job.DoesNotExist, Push.DoesNotExist):
|
||||
|
|
|
@ -5,8 +5,8 @@ from requests import Session
|
|||
|
||||
from treeherder.perf.sheriffing_criteria import NonBlockableSession
|
||||
|
||||
CASSETTE_LIBRARY_DIR = 'tests/sample_data/betamax_cassettes/perf_sheriffing_criteria'
|
||||
CASSETTES_RECORDING_DATE = 'June 2nd, 2020' # when VCR has been conducted
|
||||
CASSETTE_LIBRARY_DIR = "tests/sample_data/betamax_cassettes/perf_sheriffing_criteria"
|
||||
CASSETTES_RECORDING_DATE = "June 2nd, 2020" # when VCR has been conducted
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
|
|
@ -30,12 +30,12 @@ def concrete_formula_classes() -> List[Type[BugzillaFormula]]:
|
|||
return [EngineerTractionFormula, FixRatioFormula]
|
||||
|
||||
|
||||
@pytest.mark.parametrize('formula', formula_instances())
|
||||
@pytest.mark.parametrize("formula", formula_instances())
|
||||
def test_formula_exposes_quantifying_period(formula, nonblock_session):
|
||||
assert formula.quantifying_period == settings.QUANTIFYING_PERIOD
|
||||
|
||||
|
||||
@pytest.mark.parametrize('formula', bugzilla_formula_instances())
|
||||
@pytest.mark.parametrize("formula", bugzilla_formula_instances())
|
||||
def test_formula_exposes_oldest_timestamp(formula, nonblock_session):
|
||||
no_older_than = datetime.now() - timedelta(weeks=24, seconds=5)
|
||||
|
||||
|
@ -48,9 +48,9 @@ def test_total_alerts_formula_exposes_oldest_timestamp():
|
|||
assert TotalAlertsFormula().oldest_timestamp >= no_older_than
|
||||
|
||||
|
||||
@pytest.mark.parametrize('formula', bugzilla_formula_instances())
|
||||
@pytest.mark.parametrize("formula", bugzilla_formula_instances())
|
||||
@pytest.mark.parametrize(
|
||||
'cooled_down_bug',
|
||||
"cooled_down_bug",
|
||||
[
|
||||
{"creation_time": "2020-05-18T15:20:55Z"}, # older than 2 weeks
|
||||
{"creation_time": "2020-05-04T15:20:55Z"}, # older than 1 month
|
||||
|
@ -61,13 +61,13 @@ def test_formula_correctly_detects_cooled_down_bugs(cooled_down_bug, formula, no
|
|||
assert formula.has_cooled_down(cooled_down_bug)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('formula', bugzilla_formula_instances())
|
||||
@pytest.mark.parametrize("formula", bugzilla_formula_instances())
|
||||
@pytest.mark.parametrize(
|
||||
'not_cooled_down_bug',
|
||||
"not_cooled_down_bug",
|
||||
[
|
||||
{'creation_time': '2020-05-31T00:00:00Z'}, # 2 days old
|
||||
{'creation_time': '2020-05-26T00:00:00Z'}, # 1 week old
|
||||
{'creation_time': '2020-05-19T23:00:00Z'}, # ~2 weeks old, except for 1 hour
|
||||
{"creation_time": "2020-05-31T00:00:00Z"}, # 2 days old
|
||||
{"creation_time": "2020-05-26T00:00:00Z"}, # 1 week old
|
||||
{"creation_time": "2020-05-19T23:00:00Z"}, # ~2 weeks old, except for 1 hour
|
||||
],
|
||||
)
|
||||
def test_formula_detects_bugs_that_didnt_cool_down_yet(
|
||||
|
@ -76,14 +76,14 @@ def test_formula_detects_bugs_that_didnt_cool_down_yet(
|
|||
assert not formula.has_cooled_down(not_cooled_down_bug)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('formula', bugzilla_formula_instances())
|
||||
@pytest.mark.parametrize('bad_structured_bug', [{}, {'creation_time': 'jiberish-date'}])
|
||||
@pytest.mark.parametrize("formula", bugzilla_formula_instances())
|
||||
@pytest.mark.parametrize("bad_structured_bug", [{}, {"creation_time": "jiberish-date"}])
|
||||
def test_formula_throws_adequate_error_for_bug(bad_structured_bug, formula, nonblock_session):
|
||||
with pytest.raises(ValueError):
|
||||
formula.has_cooled_down(bad_structured_bug)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('FormulaClass', concrete_formula_classes())
|
||||
@pytest.mark.parametrize("FormulaClass", concrete_formula_classes())
|
||||
def test_formula_initializes_with_non_blockable_sessions(FormulaClass, nonblock_session):
|
||||
try:
|
||||
_ = FormulaClass(nonblock_session)
|
||||
|
@ -96,13 +96,13 @@ def test_formula_initializes_with_non_blockable_sessions(FormulaClass, nonblock_
|
|||
pytest.fail()
|
||||
|
||||
|
||||
@pytest.mark.parametrize('FormulaClass', concrete_formula_classes())
|
||||
@pytest.mark.parametrize("FormulaClass", concrete_formula_classes())
|
||||
def test_formula_cannot_be_initialized_with_a_regular_session(FormulaClass, unrecommended_session):
|
||||
with pytest.raises(TypeError):
|
||||
_ = FormulaClass(unrecommended_session)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('formula', bugzilla_formula_instances())
|
||||
@pytest.mark.parametrize("formula", bugzilla_formula_instances())
|
||||
def test_accessing_breakdown_without_prior_calculus_errors_out(formula, nonblock_session):
|
||||
with pytest.raises(RuntimeError):
|
||||
_ = formula.breakdown()
|
||||
|
@ -111,61 +111,61 @@ def test_accessing_breakdown_without_prior_calculus_errors_out(formula, nonblock
|
|||
# Leveraging HTTP VCR
|
||||
|
||||
|
||||
@pytest.mark.parametrize('FormulaClass', concrete_formula_classes())
|
||||
@pytest.mark.parametrize("FormulaClass", concrete_formula_classes())
|
||||
def test_formula_demands_at_least_framework_and_suite(FormulaClass, betamax_recorder):
|
||||
formula = FormulaClass(betamax_recorder.session)
|
||||
|
||||
with pytest.raises(TypeError):
|
||||
formula('some_framework')
|
||||
formula("some_framework")
|
||||
|
||||
with pytest.raises(TypeError):
|
||||
formula()
|
||||
|
||||
with betamax_recorder.use_cassette('awsy-JS', serialize_with='prettyjson'):
|
||||
with betamax_recorder.use_cassette("awsy-JS", serialize_with="prettyjson"):
|
||||
try:
|
||||
formula('awsy', 'JS')
|
||||
formula("awsy", "JS")
|
||||
except TypeError:
|
||||
pytest.fail()
|
||||
|
||||
|
||||
@pytest.mark.parametrize('FormulaClass', concrete_formula_classes())
|
||||
@pytest.mark.parametrize("FormulaClass", concrete_formula_classes())
|
||||
def test_breakdown_updates_between_calculations(FormulaClass, betamax_recorder):
|
||||
formula = FormulaClass(betamax_recorder.session)
|
||||
|
||||
test_moniker_A = ('build_metrics', 'build times')
|
||||
test_moniker_B = ('talos', 'tp5n', 'nonmain_startup_fileio')
|
||||
test_moniker_A = ("build_metrics", "build times")
|
||||
test_moniker_B = ("talos", "tp5n", "nonmain_startup_fileio")
|
||||
|
||||
cassette_preffix_A = '-'.join(filter(None, test_moniker_A))
|
||||
cassette_preffix_B = '-'.join(filter(None, test_moniker_B))
|
||||
cassette_preffix_A = "-".join(filter(None, test_moniker_A))
|
||||
cassette_preffix_B = "-".join(filter(None, test_moniker_B))
|
||||
|
||||
with betamax_recorder.use_cassette(f'{cassette_preffix_A}', serialize_with='prettyjson'):
|
||||
with betamax_recorder.use_cassette(f"{cassette_preffix_A}", serialize_with="prettyjson"):
|
||||
formula(*test_moniker_A) # let it perform calculus & cache breakdown
|
||||
breakdown_A = formula.breakdown()
|
||||
|
||||
with betamax_recorder.use_cassette(f'{cassette_preffix_B}', serialize_with='prettyjson'):
|
||||
with betamax_recorder.use_cassette(f"{cassette_preffix_B}", serialize_with="prettyjson"):
|
||||
formula(*test_moniker_B) # let it perform calculus & cache breakdown
|
||||
breakdown_B = formula.breakdown()
|
||||
|
||||
assert breakdown_A != breakdown_B
|
||||
|
||||
|
||||
@pytest.mark.parametrize('FormulaClass', concrete_formula_classes())
|
||||
@pytest.mark.parametrize("FormulaClass", concrete_formula_classes())
|
||||
def test_breakdown_resets_to_null_when_calculus_errors_out(FormulaClass, betamax_recorder):
|
||||
formula = FormulaClass(betamax_recorder.session)
|
||||
|
||||
test_moniker_A = ('build_metrics', 'build times')
|
||||
test_moniker_B = ('nonexistent_framework', 'nonexistent_suite')
|
||||
test_moniker_A = ("build_metrics", "build times")
|
||||
test_moniker_B = ("nonexistent_framework", "nonexistent_suite")
|
||||
|
||||
cassette_preffix_A = '-'.join(filter(None, test_moniker_A))
|
||||
cassette_preffix_B = '-'.join(filter(None, test_moniker_B))
|
||||
cassette_preffix_A = "-".join(filter(None, test_moniker_A))
|
||||
cassette_preffix_B = "-".join(filter(None, test_moniker_B))
|
||||
|
||||
# run happy path calculus
|
||||
with betamax_recorder.use_cassette(f'{cassette_preffix_A}', serialize_with='prettyjson'):
|
||||
with betamax_recorder.use_cassette(f"{cassette_preffix_A}", serialize_with="prettyjson"):
|
||||
formula(*test_moniker_A) # let it perform calculus & cache breakdown
|
||||
_ = formula.breakdown()
|
||||
|
||||
# now run alternated path calculus
|
||||
with betamax_recorder.use_cassette(f'{cassette_preffix_B}', serialize_with='prettyjson'):
|
||||
with betamax_recorder.use_cassette(f"{cassette_preffix_B}", serialize_with="prettyjson"):
|
||||
with pytest.raises(NoFiledBugs):
|
||||
formula(*test_moniker_B) # intentionally blows up while doing calculus
|
||||
|
||||
|
@ -174,50 +174,50 @@ def test_breakdown_resets_to_null_when_calculus_errors_out(FormulaClass, betamax
|
|||
_ = formula.breakdown()
|
||||
|
||||
|
||||
@pytest.mark.parametrize('FormulaClass', concrete_formula_classes())
|
||||
@pytest.mark.parametrize("FormulaClass", concrete_formula_classes())
|
||||
@pytest.mark.parametrize(
|
||||
'framework, suite, test',
|
||||
"framework, suite, test",
|
||||
[
|
||||
('build_metrics', 'build times', None),
|
||||
('build_metrics', 'installer size', None),
|
||||
('awsy', 'JS', None),
|
||||
('talos', 'tp5n', 'nonmain_startup_fileio'),
|
||||
("build_metrics", "build times", None),
|
||||
("build_metrics", "installer size", None),
|
||||
("awsy", "JS", None),
|
||||
("talos", "tp5n", "nonmain_startup_fileio"),
|
||||
],
|
||||
)
|
||||
def test_formula_fetches_bugs_from_quantifying_period(
|
||||
framework, suite, test, FormulaClass, betamax_recorder
|
||||
):
|
||||
formula = FormulaClass(betamax_recorder.session)
|
||||
cassette = '-'.join(filter(None, [framework, suite, test]))
|
||||
cassette = "-".join(filter(None, [framework, suite, test]))
|
||||
|
||||
with betamax_recorder.use_cassette(f'{cassette}', serialize_with='prettyjson'):
|
||||
with betamax_recorder.use_cassette(f"{cassette}", serialize_with="prettyjson"):
|
||||
formula(framework, suite, test) # let it perform calculus & cache breakdown
|
||||
|
||||
all_filed_bugs, except_new_bugs = formula.breakdown()
|
||||
|
||||
assert len(all_filed_bugs) > 0
|
||||
for bug in all_filed_bugs:
|
||||
creation_time = datetime.strptime(bug['creation_time'], BZ_DATETIME_FORMAT)
|
||||
creation_time = datetime.strptime(bug["creation_time"], BZ_DATETIME_FORMAT)
|
||||
assert creation_time >= formula.oldest_timestamp
|
||||
|
||||
|
||||
@pytest.mark.parametrize('FormulaClass', concrete_formula_classes())
|
||||
@pytest.mark.parametrize("FormulaClass", concrete_formula_classes())
|
||||
@pytest.mark.parametrize(
|
||||
'framework, suite, test',
|
||||
"framework, suite, test",
|
||||
[
|
||||
('build_metrics', 'build times', None),
|
||||
('build_metrics', 'installer size', None),
|
||||
('awsy', 'JS', None),
|
||||
('talos', 'tp5n', 'nonmain_startup_fileio'),
|
||||
("build_metrics", "build times", None),
|
||||
("build_metrics", "installer size", None),
|
||||
("awsy", "JS", None),
|
||||
("talos", "tp5n", "nonmain_startup_fileio"),
|
||||
],
|
||||
)
|
||||
def test_formula_filters_out_bugs_that_didnt_cool_down_yet(
|
||||
framework, suite, test, FormulaClass, betamax_recorder
|
||||
):
|
||||
formula = FormulaClass(betamax_recorder.session)
|
||||
cassette = '-'.join(filter(None, [framework, suite, test]))
|
||||
cassette = "-".join(filter(None, [framework, suite, test]))
|
||||
|
||||
with betamax_recorder.use_cassette(f'{cassette}', serialize_with='prettyjson'):
|
||||
with betamax_recorder.use_cassette(f"{cassette}", serialize_with="prettyjson"):
|
||||
formula(framework, suite, test) # let it perform calculus & cache breakdown
|
||||
|
||||
# left with cooled down bugs only
|
||||
|
@ -226,14 +226,14 @@ def test_formula_filters_out_bugs_that_didnt_cool_down_yet(
|
|||
assert formula.has_cooled_down(bug)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('FormulaClass', concrete_formula_classes())
|
||||
@pytest.mark.parametrize("FormulaClass", concrete_formula_classes())
|
||||
def test_formula_errors_up_when_no_bugs_were_filed(FormulaClass, betamax_recorder):
|
||||
formula = FormulaClass(betamax_recorder.session)
|
||||
nonexistent_framework = 'nonexistent_framework'
|
||||
nonexistent_suite = 'nonexistent_suite'
|
||||
nonexistent_framework = "nonexistent_framework"
|
||||
nonexistent_suite = "nonexistent_suite"
|
||||
|
||||
with betamax_recorder.use_cassette(
|
||||
f'{nonexistent_framework}-{nonexistent_suite}', serialize_with='prettyjson'
|
||||
f"{nonexistent_framework}-{nonexistent_suite}", serialize_with="prettyjson"
|
||||
):
|
||||
with pytest.raises(NoFiledBugs):
|
||||
formula(nonexistent_framework, nonexistent_suite)
|
||||
|
|
|
@ -26,28 +26,28 @@ from treeherder.utils import PROJECT_ROOT
|
|||
|
||||
pytestmark = [pytest.mark.freeze_time(CASSETTES_RECORDING_DATE, tick=True)]
|
||||
|
||||
RECORD_TEST_PATH = (PROJECT_ROOT / 'tests/sample_data/criteria-records.csv').resolve()
|
||||
RECORD_TEST_PATH = (PROJECT_ROOT / "tests/sample_data/criteria-records.csv").resolve()
|
||||
EXPECTED_LAST_UPDATE = dateutil_parse(CASSETTES_RECORDING_DATE)
|
||||
EXPECTED_VALUE = 0.5
|
||||
TESTS_WITH_NO_DATA = [
|
||||
('awsy', 'Base Content Explicit', ''),
|
||||
('browsertime', 'allrecipes-cold', ''),
|
||||
('raptor', 'os-baseline-power', ''),
|
||||
('talos', 'a11yr', ''),
|
||||
("awsy", "Base Content Explicit", ""),
|
||||
("browsertime", "allrecipes-cold", ""),
|
||||
("raptor", "os-baseline-power", ""),
|
||||
("talos", "a11yr", ""),
|
||||
]
|
||||
TESTS_WITH_EXPIRED_DATA = [
|
||||
('awsy', 'Base Content Heap Unclassified', ''),
|
||||
('browsertime', 'amazon', ''),
|
||||
('build_metrics', 'compiler warnings', ''),
|
||||
('raptor', 'raptor-ares6-firefox', ''),
|
||||
('talos', 'about_newtab_with_snippets', ''),
|
||||
("awsy", "Base Content Heap Unclassified", ""),
|
||||
("browsertime", "amazon", ""),
|
||||
("build_metrics", "compiler warnings", ""),
|
||||
("raptor", "raptor-ares6-firefox", ""),
|
||||
("talos", "about_newtab_with_snippets", ""),
|
||||
]
|
||||
TESTS_WITH_UPDATED_DATA = [
|
||||
('awsy', 'Base Content JS', ''),
|
||||
('browsertime', 'amazon-cold', ''),
|
||||
('build_metrics', 'installer size', ''),
|
||||
('raptor', 'raptor-assorted-dom-firefox', ''),
|
||||
('talos', 'about_preferences_basic', ''),
|
||||
("awsy", "Base Content JS", ""),
|
||||
("browsertime", "amazon-cold", ""),
|
||||
("build_metrics", "installer size", ""),
|
||||
("raptor", "raptor-assorted-dom-firefox", ""),
|
||||
("talos", "about_preferences_basic", ""),
|
||||
]
|
||||
recording_date = dateutil_parse(CASSETTES_RECORDING_DATE).isoformat()
|
||||
RECORDS_WITH_NO_DATA = [
|
||||
|
@ -55,11 +55,11 @@ RECORDS_WITH_NO_DATA = [
|
|||
Framework=test[0],
|
||||
Suite=test[1],
|
||||
Test=test[2],
|
||||
EngineerTraction='',
|
||||
FixRatio='',
|
||||
TotalAlerts='',
|
||||
LastUpdatedOn='',
|
||||
AllowSync='',
|
||||
EngineerTraction="",
|
||||
FixRatio="",
|
||||
TotalAlerts="",
|
||||
LastUpdatedOn="",
|
||||
AllowSync="",
|
||||
)
|
||||
for test in TESTS_WITH_NO_DATA
|
||||
]
|
||||
|
@ -71,8 +71,8 @@ RECORDS_WITH_EXPIRED_DATA = [
|
|||
EngineerTraction=0.5,
|
||||
FixRatio=0.3,
|
||||
TotalAlerts=21,
|
||||
LastUpdatedOn='2020-05-02T00:00:00.000000',
|
||||
AllowSync='',
|
||||
LastUpdatedOn="2020-05-02T00:00:00.000000",
|
||||
AllowSync="",
|
||||
)
|
||||
for test in TESTS_WITH_EXPIRED_DATA
|
||||
]
|
||||
|
@ -84,8 +84,8 @@ RECORDS_WITH_UPDATED_DATA = [
|
|||
EngineerTraction=0.5,
|
||||
FixRatio=0.3,
|
||||
TotalAlerts=21,
|
||||
LastUpdatedOn='2020-06-02T00:00:00.000000',
|
||||
AllowSync='',
|
||||
LastUpdatedOn="2020-06-02T00:00:00.000000",
|
||||
AllowSync="",
|
||||
)
|
||||
for test in TESTS_WITH_UPDATED_DATA
|
||||
]
|
||||
|
@ -114,7 +114,7 @@ for res in READY_RESULTS:
|
|||
|
||||
class eventually_ready:
|
||||
def __init__(self, start_time: float, ready_after: float):
|
||||
print(f'start_time: {start_time}')
|
||||
print(f"start_time: {start_time}")
|
||||
self.start_time = start_time
|
||||
self.ready_after = ready_after
|
||||
|
||||
|
@ -151,7 +151,7 @@ def should_take_more_than(seconds: float):
|
|||
@pytest.fixture
|
||||
def updatable_criteria_csv(tmp_path):
|
||||
updatable_csv = tmp_path / "updatable-criteria.csv"
|
||||
with open(RECORD_TEST_PATH, 'r') as file_:
|
||||
with open(RECORD_TEST_PATH, "r") as file_:
|
||||
updatable_csv.write_text(file_.read())
|
||||
|
||||
return updatable_csv
|
||||
|
@ -160,17 +160,17 @@ def updatable_criteria_csv(tmp_path):
|
|||
@pytest.fixture
|
||||
def mock_formula_map():
|
||||
return {
|
||||
'EngineerTraction': MagicMock(spec=EngineerTractionFormula, return_value=EXPECTED_VALUE),
|
||||
'FixRatio': MagicMock(spec=FixRatioFormula, return_value=EXPECTED_VALUE),
|
||||
'TotalAlerts': MagicMock(spec=FixRatioFormula, return_value=0),
|
||||
"EngineerTraction": MagicMock(spec=EngineerTractionFormula, return_value=EXPECTED_VALUE),
|
||||
"FixRatio": MagicMock(spec=FixRatioFormula, return_value=EXPECTED_VALUE),
|
||||
"TotalAlerts": MagicMock(spec=FixRatioFormula, return_value=0),
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'invalid_formulas',
|
||||
"invalid_formulas",
|
||||
[
|
||||
{'EngineerTraction': InvalidFormula(), 'FixRatio': InvalidFormula()},
|
||||
{'EngineerTraction': None, 'FixRatio': None},
|
||||
{"EngineerTraction": InvalidFormula(), "FixRatio": InvalidFormula()},
|
||||
{"EngineerTraction": None, "FixRatio": None},
|
||||
],
|
||||
)
|
||||
def test_tracker_throws_error_for_invalid_formulas(invalid_formulas):
|
||||
|
@ -179,7 +179,7 @@ def test_tracker_throws_error_for_invalid_formulas(invalid_formulas):
|
|||
|
||||
|
||||
def test_tracker_throws_error_if_no_record_file_found(tmp_path):
|
||||
nonexistent_file = str(tmp_path / 'perf-sheriffing-criteria.csv')
|
||||
nonexistent_file = str(tmp_path / "perf-sheriffing-criteria.csv")
|
||||
tracker = CriteriaTracker(record_path=nonexistent_file)
|
||||
|
||||
with pytest.raises(FileNotFoundError):
|
||||
|
@ -194,28 +194,28 @@ def test_tracker_has_a_list_of_records():
|
|||
assert len(record_list) == 5
|
||||
|
||||
|
||||
@pytest.mark.parametrize('criteria_record', RECORDS_WITH_NO_DATA)
|
||||
@pytest.mark.parametrize("criteria_record", RECORDS_WITH_NO_DATA)
|
||||
def test_record_computer_can_tell_missing_data(criteria_record):
|
||||
computer = RecordComputer({}, timedelta(days=3), timedelta(seconds=0))
|
||||
|
||||
assert computer.should_update(criteria_record)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('criteria_record', RECORDS_WITH_EXPIRED_DATA)
|
||||
@pytest.mark.parametrize("criteria_record", RECORDS_WITH_EXPIRED_DATA)
|
||||
def test_record_computer_can_tell_expired_data(criteria_record):
|
||||
computer = RecordComputer({}, timedelta(days=3), timedelta(seconds=0))
|
||||
|
||||
assert computer.should_update(criteria_record)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('criteria_record', RECORDS_WITH_UPDATED_DATA)
|
||||
@pytest.mark.parametrize("criteria_record", RECORDS_WITH_UPDATED_DATA)
|
||||
def test_record_computer_can_tell_updated_data(criteria_record):
|
||||
computer = RecordComputer({}, timedelta(days=3), timedelta(seconds=0))
|
||||
|
||||
assert not computer.should_update(criteria_record)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('criteria_record', RECORDS_UNALLOWED_TO_SYNC)
|
||||
@pytest.mark.parametrize("criteria_record", RECORDS_UNALLOWED_TO_SYNC)
|
||||
def test_record_computer_can_tell_unallowed_data(criteria_record):
|
||||
computer = RecordComputer({}, timedelta(days=3), timedelta(seconds=0))
|
||||
|
||||
|
@ -223,31 +223,31 @@ def test_record_computer_can_tell_unallowed_data(criteria_record):
|
|||
|
||||
|
||||
@pytest.mark.freeze_time(CASSETTES_RECORDING_DATE) # disable tick
|
||||
@pytest.mark.parametrize('exception', [NoFiledBugs(), Exception()])
|
||||
@pytest.mark.parametrize("exception", [NoFiledBugs(), Exception()])
|
||||
def test_record_computer_still_updates_if_one_of_the_formulas_fails(exception, db):
|
||||
formula_map = {
|
||||
'EngineerTraction': MagicMock(spec=EngineerTractionFormula, return_value=EXPECTED_VALUE),
|
||||
'FixRatio': MagicMock(spec=FixRatioFormula, side_effect=exception),
|
||||
'TotalAlerts': TotalAlertsFormula(),
|
||||
"EngineerTraction": MagicMock(spec=EngineerTractionFormula, return_value=EXPECTED_VALUE),
|
||||
"FixRatio": MagicMock(spec=FixRatioFormula, side_effect=exception),
|
||||
"TotalAlerts": TotalAlertsFormula(),
|
||||
}
|
||||
record = CriteriaRecord(
|
||||
Framework='talos',
|
||||
Suite='tp5n',
|
||||
Test='',
|
||||
EngineerTraction='',
|
||||
FixRatio='',
|
||||
TotalAlerts='',
|
||||
LastUpdatedOn='',
|
||||
AllowSync='',
|
||||
Framework="talos",
|
||||
Suite="tp5n",
|
||||
Test="",
|
||||
EngineerTraction="",
|
||||
FixRatio="",
|
||||
TotalAlerts="",
|
||||
LastUpdatedOn="",
|
||||
AllowSync="",
|
||||
)
|
||||
|
||||
computer = RecordComputer(formula_map, timedelta(days=3), timedelta(seconds=0))
|
||||
record = computer.apply_formulas(record)
|
||||
|
||||
assert record.Framework == 'talos'
|
||||
assert record.Suite == 'tp5n'
|
||||
assert record.Framework == "talos"
|
||||
assert record.Suite == "tp5n"
|
||||
assert record.EngineerTraction == EXPECTED_VALUE
|
||||
assert record.FixRatio == 'N/A'
|
||||
assert record.FixRatio == "N/A"
|
||||
assert record.TotalAlerts == 0 # as the test database is empty
|
||||
assert record.LastUpdatedOn == EXPECTED_LAST_UPDATE
|
||||
assert record.AllowSync is True
|
||||
|
@ -277,10 +277,10 @@ def test_tracker_updates_records_with_missing_data(mock_formula_map, updatable_c
|
|||
|
||||
# CSV has no criteria data initially
|
||||
for criteria_rec in tracker:
|
||||
assert criteria_rec.EngineerTraction == ''
|
||||
assert criteria_rec.FixRatio == ''
|
||||
assert criteria_rec.TotalAlerts == ''
|
||||
assert criteria_rec.LastUpdatedOn == ''
|
||||
assert criteria_rec.EngineerTraction == ""
|
||||
assert criteria_rec.FixRatio == ""
|
||||
assert criteria_rec.TotalAlerts == ""
|
||||
assert criteria_rec.LastUpdatedOn == ""
|
||||
assert criteria_rec.AllowSync is True
|
||||
|
||||
tracker.update_records()
|
||||
|
@ -301,7 +301,7 @@ def test_tracker_updates_records_with_missing_data(mock_formula_map, updatable_c
|
|||
|
||||
|
||||
@pytest.mark.freeze_time(CASSETTES_RECORDING_DATE, auto_tick_seconds=30)
|
||||
@pytest.mark.parametrize('async_results', [NEVER_READY_RESULTS, PARTIALLY_READY_RESULTS])
|
||||
@pytest.mark.parametrize("async_results", [NEVER_READY_RESULTS, PARTIALLY_READY_RESULTS])
|
||||
def test_results_checker_timeouts_on_no_changes(async_results):
|
||||
checker = ResultsChecker(check_interval=timedelta(0.0), timeout_after=timedelta(minutes=5))
|
||||
|
||||
|
@ -310,7 +310,7 @@ def test_results_checker_timeouts_on_no_changes(async_results):
|
|||
|
||||
|
||||
@pytest.mark.freeze_time(CASSETTES_RECORDING_DATE, auto_tick_seconds=30)
|
||||
@pytest.mark.parametrize('async_results', [READY_RESULTS, EVENTUALLY_READY_RESULTS])
|
||||
@pytest.mark.parametrize("async_results", [READY_RESULTS, EVENTUALLY_READY_RESULTS])
|
||||
def test_results_checker_doesnt_timeout_unexpectedly(async_results):
|
||||
checker = ResultsChecker(check_interval=timedelta(0.0), timeout_after=timedelta(minutes=5))
|
||||
|
||||
|
|
|
@ -24,30 +24,30 @@ pytestmark = [pytest.mark.freeze_time(CASSETTES_RECORDING_DATE, tick=True)]
|
|||
@pytest.fixture
|
||||
def quantified_bugs(betamax_recorder) -> list:
|
||||
params = {
|
||||
'longdesc': 'raptor speedometer',
|
||||
'longdesc_type': 'allwords',
|
||||
'longdesc_initial': 1,
|
||||
'keywords': 'perf,perf-alert',
|
||||
'keywords_type': 'anywords',
|
||||
'creation_time': '2019-12-17',
|
||||
'query_format': 'advanced',
|
||||
"longdesc": "raptor speedometer",
|
||||
"longdesc_type": "allwords",
|
||||
"longdesc_initial": 1,
|
||||
"keywords": "perf,perf-alert",
|
||||
"keywords_type": "anywords",
|
||||
"creation_time": "2019-12-17",
|
||||
"query_format": "advanced",
|
||||
}
|
||||
|
||||
with betamax_recorder.use_cassette('quantified-bugs', serialize_with='prettyjson'):
|
||||
with betamax_recorder.use_cassette("quantified-bugs", serialize_with="prettyjson"):
|
||||
bug_resp = betamax_recorder.session.get(
|
||||
'https://bugzilla.mozilla.org/rest/bug',
|
||||
headers={'Accept': 'application/json'},
|
||||
"https://bugzilla.mozilla.org/rest/bug",
|
||||
headers={"Accept": "application/json"},
|
||||
params=params,
|
||||
timeout=60,
|
||||
)
|
||||
return bug_resp.json()['bugs']
|
||||
return bug_resp.json()["bugs"]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def cooled_down_bugs(nonblock_session, quantified_bugs) -> List[dict]:
|
||||
bugs = []
|
||||
for bug in quantified_bugs:
|
||||
created_at = datetime.strptime(bug['creation_time'], BZ_DATETIME_FORMAT)
|
||||
created_at = datetime.strptime(bug["creation_time"], BZ_DATETIME_FORMAT)
|
||||
if created_at <= datetime.now() - timedelta(weeks=2):
|
||||
bugs.append(bug)
|
||||
return bugs
|
||||
|
@ -59,39 +59,39 @@ def cooled_down_bugs(nonblock_session, quantified_bugs) -> List[dict]:
|
|||
def test_formula_counts_tracted_bugs(cooled_down_bugs, betamax_recorder):
|
||||
engineer_traction = EngineerTractionFormula(betamax_recorder.session)
|
||||
|
||||
with betamax_recorder.use_cassette('cooled-down-bug-history', serialize_with='prettyjson'):
|
||||
with betamax_recorder.use_cassette("cooled-down-bug-history", serialize_with="prettyjson"):
|
||||
tracted_bugs = engineer_traction._filter_numerator_bugs(cooled_down_bugs)
|
||||
assert len(tracted_bugs) == 2
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'framework, suite, test',
|
||||
"framework, suite, test",
|
||||
[
|
||||
# Sheriffed tests
|
||||
('build_metrics', 'build times', None), # 92%
|
||||
('build_metrics', 'installer size', None), # 78%
|
||||
('awsy', 'JS', None), # 55%
|
||||
('talos', 'tp5n', 'main_startup_fileio'), # 50%
|
||||
("build_metrics", "build times", None), # 92%
|
||||
("build_metrics", "installer size", None), # 78%
|
||||
("awsy", "JS", None), # 55%
|
||||
("talos", "tp5n", "main_startup_fileio"), # 50%
|
||||
],
|
||||
)
|
||||
def test_final_formula_confirms_sheriffed_tests(framework, suite, test, betamax_recorder):
|
||||
engineer_traction = EngineerTractionFormula(betamax_recorder.session)
|
||||
|
||||
with betamax_recorder.use_cassette(f'{framework}-{suite}', serialize_with='prettyjson'):
|
||||
with betamax_recorder.use_cassette(f"{framework}-{suite}", serialize_with="prettyjson"):
|
||||
assert engineer_traction(framework, suite) >= 0.35
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'framework, suite, test',
|
||||
"framework, suite, test",
|
||||
[
|
||||
# Non-sheriffed tests
|
||||
('raptor', 'raptor-speedometer-firefox', None), # 33%
|
||||
('raptor', 'raptor-webaudio-firefox', None), # 0%
|
||||
('raptor', 'raptor-tp6-google-mail-firefox-cold', 'replayed'), # 0%
|
||||
("raptor", "raptor-speedometer-firefox", None), # 33%
|
||||
("raptor", "raptor-webaudio-firefox", None), # 0%
|
||||
("raptor", "raptor-tp6-google-mail-firefox-cold", "replayed"), # 0%
|
||||
],
|
||||
)
|
||||
def test_final_formula_confirms_non_sheriffed_tests(framework, suite, test, betamax_recorder):
|
||||
engineer_traction = EngineerTractionFormula(betamax_recorder.session)
|
||||
|
||||
with betamax_recorder.use_cassette(f'{framework}-{suite}', serialize_with='prettyjson'):
|
||||
with betamax_recorder.use_cassette(f"{framework}-{suite}", serialize_with="prettyjson"):
|
||||
assert engineer_traction(framework, suite, test) < 0.35
|
||||
|
|
|
@ -15,32 +15,32 @@ pytestmark = [pytest.mark.freeze_time(CASSETTES_RECORDING_DATE, tick=True)]
|
|||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'framework, suite',
|
||||
"framework, suite",
|
||||
[
|
||||
# Sheriffed tests
|
||||
('build_metrics', 'build times'), # 37.5%
|
||||
('build_metrics', 'installer size'), # 41.6%
|
||||
('raptor', 'raptor-speedometer-firefox'), # 100%
|
||||
('raptor', 'raptor-webaudio-firefox'), # 100%
|
||||
("build_metrics", "build times"), # 37.5%
|
||||
("build_metrics", "installer size"), # 41.6%
|
||||
("raptor", "raptor-speedometer-firefox"), # 100%
|
||||
("raptor", "raptor-webaudio-firefox"), # 100%
|
||||
],
|
||||
)
|
||||
def test_formula_confirms_sheriffed_tests(framework, suite, betamax_recorder):
|
||||
fix_ratio = FixRatioFormula(betamax_recorder.session)
|
||||
|
||||
with betamax_recorder.use_cassette(f'{framework}-{suite}', serialize_with='prettyjson'):
|
||||
with betamax_recorder.use_cassette(f"{framework}-{suite}", serialize_with="prettyjson"):
|
||||
assert fix_ratio(framework, suite) >= 0.3
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'framework, suite, test',
|
||||
"framework, suite, test",
|
||||
[
|
||||
# Non-sheriffed tests
|
||||
('awsy', 'JS', None), # 20%
|
||||
('talos', 'tp5n', 'nonmain_startup_fileio'), # 0%
|
||||
("awsy", "JS", None), # 20%
|
||||
("talos", "tp5n", "nonmain_startup_fileio"), # 0%
|
||||
],
|
||||
)
|
||||
def test_formula_confirms_non_sheriffed_tests(framework, suite, test, betamax_recorder):
|
||||
fix_ratio = FixRatioFormula(betamax_recorder.session)
|
||||
|
||||
with betamax_recorder.use_cassette(f'{framework}-{suite}', serialize_with='prettyjson'):
|
||||
with betamax_recorder.use_cassette(f"{framework}-{suite}", serialize_with="prettyjson"):
|
||||
assert fix_ratio(framework, suite, test) < 0.3
|
||||
|
|
|
@ -5,7 +5,7 @@ def test_nonblockable_sessions_has_the_recommended_headers(nonblock_session):
|
|||
session_headers = nonblock_session.headers
|
||||
|
||||
try:
|
||||
assert session_headers['Referer']
|
||||
assert session_headers['User-Agent']
|
||||
assert session_headers["Referer"]
|
||||
assert session_headers["User-Agent"]
|
||||
except KeyError:
|
||||
pytest.fail()
|
||||
|
|
|
@ -43,5 +43,5 @@ class TestDeletionNotificationWriter:
|
|||
application=test_perf_signature.application,
|
||||
last_updated=test_perf_signature.last_updated.date(),
|
||||
)
|
||||
expected_content += '\n'
|
||||
expected_content += "\n"
|
||||
return expected_content
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
from tests.conftest import SampleDataJSONLoader
|
||||
|
||||
load_json_fixture = SampleDataJSONLoader('sherlock')
|
||||
load_json_fixture = SampleDataJSONLoader("sherlock")
|
||||
|
|
|
@ -39,12 +39,12 @@ def test_summary_status(
|
|||
signature1 = test_perf_signature
|
||||
signature2 = PerformanceSignature.objects.create(
|
||||
repository=test_repository,
|
||||
signature_hash=(40 * 'u'),
|
||||
signature_hash=(40 * "u"),
|
||||
framework=test_perf_signature.framework,
|
||||
platform=test_perf_signature.platform,
|
||||
option_collection=test_perf_signature.option_collection,
|
||||
suite='mysuite_2',
|
||||
test='mytest_2',
|
||||
suite="mysuite_2",
|
||||
test="mytest_2",
|
||||
has_subtests=False,
|
||||
last_updated=datetime.datetime.now(),
|
||||
)
|
||||
|
@ -81,12 +81,12 @@ def test_reassigning_regression(
|
|||
signature1 = test_perf_signature
|
||||
signature2 = PerformanceSignature.objects.create(
|
||||
repository=test_repository,
|
||||
signature_hash=(40 * 'u'),
|
||||
signature_hash=(40 * "u"),
|
||||
framework=test_perf_signature.framework,
|
||||
platform=test_perf_signature.platform,
|
||||
option_collection=test_perf_signature.option_collection,
|
||||
suite='mysuite_2',
|
||||
test='mytest_2',
|
||||
suite="mysuite_2",
|
||||
test="mytest_2",
|
||||
has_subtests=False,
|
||||
last_updated=datetime.datetime.now(),
|
||||
)
|
||||
|
@ -132,12 +132,12 @@ def test_improvement_summary_status_after_reassigning_regression(
|
|||
signature1 = test_perf_signature
|
||||
signature2 = PerformanceSignature.objects.create(
|
||||
repository=test_repository,
|
||||
signature_hash=(40 * 'u'),
|
||||
signature_hash=(40 * "u"),
|
||||
framework=test_perf_signature.framework,
|
||||
platform=test_perf_signature.platform,
|
||||
option_collection=test_perf_signature.option_collection,
|
||||
suite='mysuite_2',
|
||||
test='mytest_2',
|
||||
suite="mysuite_2",
|
||||
test="mytest_2",
|
||||
has_subtests=False,
|
||||
last_updated=datetime.datetime.now(),
|
||||
)
|
||||
|
|
|
@ -59,10 +59,10 @@ def _generate_performance_data(
|
|||
):
|
||||
push, _ = Push.objects.get_or_create(
|
||||
repository=test_repository,
|
||||
revision='1234abcd%s' % t,
|
||||
revision="1234abcd%s" % t,
|
||||
defaults={
|
||||
'author': 'foo@bar.com',
|
||||
'time': datetime.datetime.fromtimestamp(base_timestamp + t),
|
||||
"author": "foo@bar.com",
|
||||
"time": datetime.datetime.fromtimestamp(base_timestamp + t),
|
||||
},
|
||||
)
|
||||
PerformanceDatum.objects.create(
|
||||
|
@ -302,7 +302,7 @@ def test_custom_alert_threshold(
|
|||
assert PerformanceAlertSummary.objects.count() == 1
|
||||
|
||||
|
||||
@pytest.mark.parametrize(('new_value', 'expected_num_alerts'), [(1.0, 1), (0.25, 0)])
|
||||
@pytest.mark.parametrize(("new_value", "expected_num_alerts"), [(1.0, 1), (0.25, 0)])
|
||||
def test_alert_change_type_absolute(
|
||||
test_repository,
|
||||
test_issue_tracker,
|
||||
|
|
|
@ -47,7 +47,7 @@ def test_weights():
|
|||
[
|
||||
([0.0, 0.0], [1.0, 2.0], 3.0),
|
||||
([0.0, 0.0], [0.0, 0.0], 0.0),
|
||||
([0.0, 0.0], [1.0, 1.0], float('inf')),
|
||||
([0.0, 0.0], [1.0, 1.0], float("inf")),
|
||||
],
|
||||
)
|
||||
def test_calc_t(old_data, new_data, expected):
|
||||
|
@ -111,13 +111,13 @@ def test_detect_changes_few_revisions_many_values():
|
|||
@pytest.mark.parametrize(
|
||||
("filename", "expected_timestamps"),
|
||||
[
|
||||
('runs1.json', [1365019665]),
|
||||
('runs2.json', [1357704596, 1358971894, 1365014104]),
|
||||
('runs3.json', [1335293827, 1338839958]),
|
||||
('runs4.json', [1364922838]),
|
||||
('runs5.json', []),
|
||||
('a11y.json', [1366197637, 1367799757]),
|
||||
('tp5rss.json', [1372846906, 1373413365, 1373424974]),
|
||||
("runs1.json", [1365019665]),
|
||||
("runs2.json", [1357704596, 1358971894, 1365014104]),
|
||||
("runs3.json", [1335293827, 1338839958]),
|
||||
("runs4.json", [1364922838]),
|
||||
("runs5.json", []),
|
||||
("a11y.json", [1366197637, 1367799757]),
|
||||
("tp5rss.json", [1372846906, 1373413365, 1373424974]),
|
||||
],
|
||||
)
|
||||
def test_detect_changes_historical_data(filename, expected_timestamps):
|
||||
|
@ -128,8 +128,8 @@ def test_detect_changes_historical_data(filename, expected_timestamps):
|
|||
MAX_BACK_WINDOW = 24
|
||||
THRESHOLD = 7
|
||||
|
||||
payload = SampleData.get_perf_data(os.path.join('graphs', filename))
|
||||
runs = payload['test_runs']
|
||||
payload = SampleData.get_perf_data(os.path.join("graphs", filename))
|
||||
runs = payload["test_runs"]
|
||||
data = [RevisionDatum(r[2], r[2], [r[3]]) for r in runs]
|
||||
|
||||
results = detect_changes(
|
||||
|
|
|
@ -8,14 +8,14 @@ def test_get_build_failures(
|
|||
jobs = sample_data.job_data[20:25]
|
||||
|
||||
for blob in jobs:
|
||||
blob['revision'] = test_push.revision
|
||||
blob['job']['result'] = 'busted'
|
||||
blob['job']['taskcluster_task_id'] = 'V3SVuxO8TFy37En_6HcXLs'
|
||||
blob['job']['taskcluster_retry_id'] = '0'
|
||||
blob["revision"] = test_push.revision
|
||||
blob["job"]["result"] = "busted"
|
||||
blob["job"]["taskcluster_task_id"] = "V3SVuxO8TFy37En_6HcXLs"
|
||||
blob["job"]["taskcluster_retry_id"] = "0"
|
||||
store_job_data(test_repository, jobs)
|
||||
|
||||
result, build_failures, in_progress = get_build_failures(test_push)
|
||||
|
||||
assert in_progress == 0
|
||||
assert result == 'fail'
|
||||
assert result == "fail"
|
||||
assert len(build_failures) == 2
|
||||
|
|
|
@ -7,45 +7,45 @@ def test_intermittent_win7_reftest():
|
|||
"""test that a failed test is classified as infra"""
|
||||
failures = [
|
||||
{
|
||||
'testName': 'foo',
|
||||
'jobName': 'Foodebug-reftest',
|
||||
'platform': 'windows7-32',
|
||||
'suggestedClassification': 'New Failure',
|
||||
'config': 'foo',
|
||||
'isClassifiedIntermittent': True,
|
||||
"testName": "foo",
|
||||
"jobName": "Foodebug-reftest",
|
||||
"platform": "windows7-32",
|
||||
"suggestedClassification": "New Failure",
|
||||
"config": "foo",
|
||||
"isClassifiedIntermittent": True,
|
||||
}
|
||||
]
|
||||
set_classifications(failures, {}, {})
|
||||
|
||||
assert failures[0]['suggestedClassification'] == 'intermittent'
|
||||
assert failures[0]["suggestedClassification"] == "intermittent"
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
('history', 'confidence', 'classification', 'fcid'),
|
||||
("history", "confidence", "classification", "fcid"),
|
||||
[
|
||||
({'foo': {'bing': {'baz': 2}}}, 100, 'intermittent', 1),
|
||||
({'foo': {'bing': {'bee': 2}}}, 75, 'intermittent', 1),
|
||||
({'foo': {'bee': {'bee': 2}}}, 50, 'intermittent', 1),
|
||||
({'fee': {'bee': {'bee': 2}}}, 0, 'New Failure', 1),
|
||||
({"foo": {"bing": {"baz": 2}}}, 100, "intermittent", 1),
|
||||
({"foo": {"bing": {"bee": 2}}}, 75, "intermittent", 1),
|
||||
({"foo": {"bee": {"bee": 2}}}, 50, "intermittent", 1),
|
||||
({"fee": {"bee": {"bee": 2}}}, 0, "New Failure", 1),
|
||||
# no match, but job has been classified as intermittent by hand.
|
||||
({'fee': {'bee': {'bee': 2}}}, 100, 'intermittent', 4),
|
||||
({"fee": {"bee": {"bee": 2}}}, 100, "intermittent", 4),
|
||||
],
|
||||
)
|
||||
def test_intermittent_confidence(history, confidence, classification, fcid):
|
||||
"""test that a failed test is classified as intermittent, confidence 100"""
|
||||
failures = [
|
||||
{
|
||||
'testName': 'foo',
|
||||
'jobName': 'bar',
|
||||
'platform': 'bing',
|
||||
'suggestedClassification': 'New Failure',
|
||||
'config': 'baz',
|
||||
'confidence': 0,
|
||||
'isClassifiedIntermittent': fcid == 4,
|
||||
"testName": "foo",
|
||||
"jobName": "bar",
|
||||
"platform": "bing",
|
||||
"suggestedClassification": "New Failure",
|
||||
"config": "baz",
|
||||
"confidence": 0,
|
||||
"isClassifiedIntermittent": fcid == 4,
|
||||
}
|
||||
]
|
||||
|
||||
set_classifications(failures, history, {})
|
||||
|
||||
assert failures[0]['suggestedClassification'] == classification
|
||||
assert failures[0]['confidence'] == confidence
|
||||
assert failures[0]["suggestedClassification"] == classification
|
||||
assert failures[0]["confidence"] == confidence
|
||||
|
|
|
@ -5,8 +5,8 @@ import responses
|
|||
from treeherder.model.models import Push
|
||||
from treeherder.push_health.compare import get_commit_history
|
||||
|
||||
test_revision = '4c45a777949168d16c03a4cba167678b7ab65f76'
|
||||
parent_revision = 'abcdef77949168d16c03a4cba167678b7ab65f76'
|
||||
test_revision = "4c45a777949168d16c03a4cba167678b7ab65f76"
|
||||
parent_revision = "abcdef77949168d16c03a4cba167678b7ab65f76"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -14,7 +14,7 @@ def mock_rev(test_push):
|
|||
# This is the revision/push under test
|
||||
responses.add(
|
||||
responses.GET,
|
||||
f'https://hg.mozilla.org/{test_push.repository.name}/rev/{test_revision}?style=json',
|
||||
f"https://hg.mozilla.org/{test_push.repository.name}/rev/{test_revision}?style=json",
|
||||
json={
|
||||
"node": test_revision,
|
||||
"date": [1589318819.0, -7200],
|
||||
|
@ -26,7 +26,7 @@ def mock_rev(test_push):
|
|||
"pushdate": [1589318855, 0],
|
||||
"pushuser": "hiro@protagonist.com",
|
||||
},
|
||||
content_type='application/json',
|
||||
content_type="application/json",
|
||||
status=200,
|
||||
)
|
||||
|
||||
|
@ -35,7 +35,7 @@ def mock_rev(test_push):
|
|||
def mock_json_pushes(test_push):
|
||||
responses.add(
|
||||
responses.GET,
|
||||
f'https://hg.mozilla.org/{test_push.repository.name}/json-pushes?version=2&full=1&startID=536015&endID=536016',
|
||||
f"https://hg.mozilla.org/{test_push.repository.name}/json-pushes?version=2&full=1&startID=536015&endID=536016",
|
||||
json={
|
||||
"pushes": {
|
||||
"536016": {
|
||||
|
@ -49,12 +49,12 @@ def mock_json_pushes(test_push):
|
|||
}
|
||||
},
|
||||
},
|
||||
content_type='application/json',
|
||||
content_type="application/json",
|
||||
status=200,
|
||||
)
|
||||
responses.add(
|
||||
responses.GET,
|
||||
f'https://hg.mozilla.org/{test_push.repository.name}/json-automationrelevance/4c45a777949168d16c03a4cba167678b7ab65f76?backouts=1',
|
||||
f"https://hg.mozilla.org/{test_push.repository.name}/json-automationrelevance/4c45a777949168d16c03a4cba167678b7ab65f76?backouts=1",
|
||||
json={
|
||||
"changesets": [
|
||||
{
|
||||
|
@ -90,7 +90,7 @@ def mock_json_pushes(test_push):
|
|||
},
|
||||
],
|
||||
},
|
||||
content_type='application/json',
|
||||
content_type="application/json",
|
||||
status=200,
|
||||
)
|
||||
|
||||
|
@ -100,12 +100,12 @@ def test_get_commit_history(test_push, test_repository, mock_rev, mock_json_push
|
|||
Push.objects.create(
|
||||
revision=parent_revision,
|
||||
repository=test_repository,
|
||||
author='foo@bar.baz',
|
||||
author="foo@bar.baz",
|
||||
time=datetime.datetime.now(),
|
||||
)
|
||||
|
||||
history = get_commit_history(test_repository, test_revision, test_push)
|
||||
print('\n<><><>history')
|
||||
print("\n<><><>history")
|
||||
print(history)
|
||||
assert history['parentSha'] == parent_revision
|
||||
assert history['parentRepository']['name'] == test_repository.name
|
||||
assert history["parentSha"] == parent_revision
|
||||
assert history["parentRepository"]["name"] == test_repository.name
|
||||
|
|
|
@ -8,19 +8,19 @@ def test_get_linting_failures(
|
|||
jobs = sample_data.job_data[20:22]
|
||||
|
||||
for blob in jobs:
|
||||
blob['revision'] = test_push.revision
|
||||
blob['job'].update(
|
||||
blob["revision"] = test_push.revision
|
||||
blob["job"].update(
|
||||
{
|
||||
'result': 'testfailed',
|
||||
'taskcluster_task_id': 'V3SVuxO8TFy37En_6HcXLs',
|
||||
'taskcluster_retry_id': '0',
|
||||
"result": "testfailed",
|
||||
"taskcluster_task_id": "V3SVuxO8TFy37En_6HcXLs",
|
||||
"taskcluster_retry_id": "0",
|
||||
}
|
||||
)
|
||||
blob['job']['machine_platform']['platform'] = 'lint'
|
||||
blob["job"]["machine_platform"]["platform"] = "lint"
|
||||
store_job_data(test_repository, jobs)
|
||||
|
||||
result, build_failures, in_progress = get_lint_failures(test_push)
|
||||
|
||||
assert in_progress == 0
|
||||
assert result == 'fail'
|
||||
assert result == "fail"
|
||||
assert len(build_failures) == 2
|
||||
|
|
|
@ -4,31 +4,31 @@ from treeherder.model.models import FailureLine, Job, Repository
|
|||
from treeherder.push_health.tests import get_test_failures, get_test_failure_jobs, has_job, has_line
|
||||
|
||||
|
||||
@pytest.mark.parametrize(('find_it',), [(True,), (False,)])
|
||||
@pytest.mark.parametrize(("find_it",), [(True,), (False,)])
|
||||
def test_has_job(find_it):
|
||||
job = Job(id=123, repository=Repository(), guid='12345')
|
||||
job = Job(id=123, repository=Repository(), guid="12345")
|
||||
job_list = [
|
||||
{'id': 111},
|
||||
{'id': 222},
|
||||
{"id": 111},
|
||||
{"id": 222},
|
||||
]
|
||||
|
||||
if find_it:
|
||||
job_list.append({'id': 123})
|
||||
job_list.append({"id": 123})
|
||||
assert has_job(job, job_list)
|
||||
else:
|
||||
assert not has_job(job, job_list)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(('find_it',), [(True,), (False,)])
|
||||
@pytest.mark.parametrize(("find_it",), [(True,), (False,)])
|
||||
def test_has_line(find_it):
|
||||
line = FailureLine(line=123)
|
||||
line_list = [
|
||||
{'line_number': 111},
|
||||
{'line_number': 222},
|
||||
{"line_number": 111},
|
||||
{"line_number": 222},
|
||||
]
|
||||
|
||||
if find_it:
|
||||
line_list.append({'line_number': 123})
|
||||
line_list.append({"line_number": 123})
|
||||
assert has_line(line, line_list)
|
||||
else:
|
||||
assert not has_line(line, line_list)
|
||||
|
@ -37,13 +37,13 @@ def test_has_line(find_it):
|
|||
def test_get_test_failures(
|
||||
failure_classifications, test_repository, test_job, text_log_error_lines
|
||||
):
|
||||
test_job.result = 'testfailed'
|
||||
test_job.result = "testfailed"
|
||||
test_job.save()
|
||||
|
||||
result_status, jobs = get_test_failure_jobs(test_job.push)
|
||||
result, build_failures = get_test_failures(test_job.push, jobs, result_status)
|
||||
need_investigation = build_failures['needInvestigation']
|
||||
need_investigation = build_failures["needInvestigation"]
|
||||
|
||||
assert result == 'fail'
|
||||
assert result == "fail"
|
||||
assert len(need_investigation) == 1
|
||||
assert len(jobs[need_investigation[0]['jobName']]) == 1
|
||||
assert len(jobs[need_investigation[0]["jobName"]]) == 1
|
||||
|
|
|
@ -12,49 +12,49 @@ from treeherder.push_health.usage import get_latest, get_peak, get_usage
|
|||
|
||||
@pytest.fixture
|
||||
def push_usage(test_base_dir):
|
||||
usage_path = os.path.join(test_base_dir, 'sample_data', 'push_usage_data.json')
|
||||
usage_path = os.path.join(test_base_dir, "sample_data", "push_usage_data.json")
|
||||
with open(usage_path) as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
def test_peak(push_usage):
|
||||
peak = get_peak(push_usage['facets'][0])
|
||||
assert peak['needInvestigation'] == 149.0
|
||||
assert peak['time'] == 1584035553
|
||||
peak = get_peak(push_usage["facets"][0])
|
||||
assert peak["needInvestigation"] == 149.0
|
||||
assert peak["time"] == 1584035553
|
||||
|
||||
|
||||
def test_latest(push_usage):
|
||||
latest = get_latest(push_usage['facets'][0])
|
||||
assert latest['needInvestigation'] == 30.0
|
||||
assert latest['time'] == 1584042753
|
||||
latest = get_latest(push_usage["facets"][0])
|
||||
assert latest["needInvestigation"] == 30.0
|
||||
assert latest["time"] == 1584042753
|
||||
|
||||
|
||||
@responses.activate
|
||||
def test_get_usage(push_usage, test_repository):
|
||||
nrql = "SELECT%20max(needInvestigation)%20FROM%20push_health_need_investigation%20FACET%20revision%20SINCE%201%20DAY%20AGO%20TIMESERIES%20where%20repo%3D'{}'%20AND%20appName%3D'{}'".format(
|
||||
'try', 'treeherder-prod'
|
||||
"try", "treeherder-prod"
|
||||
)
|
||||
new_relic_url = '{}?nrql={}'.format(settings.NEW_RELIC_INSIGHTS_API_URL, nrql)
|
||||
new_relic_url = "{}?nrql={}".format(settings.NEW_RELIC_INSIGHTS_API_URL, nrql)
|
||||
|
||||
responses.add(
|
||||
responses.GET,
|
||||
new_relic_url,
|
||||
body=json.dumps(push_usage),
|
||||
status=200,
|
||||
content_type='application/json',
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
# create the Pushes that match the usage response
|
||||
for rev in [
|
||||
'4c45a777949168d16c03a4cba167678b7ab65f76',
|
||||
'1cd5f1062ce081636af8083eb5b87e45d0f03d01',
|
||||
'c73645027199ac3e092002452b436dde461bbe28',
|
||||
'b6e5cd6373370c40d315b0e266c6c3e9aa48ae12',
|
||||
"4c45a777949168d16c03a4cba167678b7ab65f76",
|
||||
"1cd5f1062ce081636af8083eb5b87e45d0f03d01",
|
||||
"c73645027199ac3e092002452b436dde461bbe28",
|
||||
"b6e5cd6373370c40d315b0e266c6c3e9aa48ae12",
|
||||
]:
|
||||
Push.objects.create(
|
||||
revision=rev,
|
||||
repository=test_repository,
|
||||
author='phydeaux@dog.org',
|
||||
author="phydeaux@dog.org",
|
||||
time=datetime.datetime.now(),
|
||||
)
|
||||
|
||||
|
@ -62,6 +62,6 @@ def test_get_usage(push_usage, test_repository):
|
|||
facet = usage[0]
|
||||
|
||||
assert len(usage) == 4
|
||||
assert facet['push']['revision'] == '4c45a777949168d16c03a4cba167678b7ab65f76'
|
||||
assert facet['peak']['needInvestigation'] == 149.0
|
||||
assert facet['latest']['needInvestigation'] == 30.0
|
||||
assert facet["push"]["revision"] == "4c45a777949168d16c03a4cba167678b7ab65f76"
|
||||
assert facet["peak"]["needInvestigation"] == 149.0
|
||||
assert facet["latest"]["needInvestigation"] == 30.0
|
||||
|
|
|
@ -9,49 +9,49 @@ from treeherder.push_health.utils import (
|
|||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
('action', 'test', 'signature', 'message', 'expected'),
|
||||
("action", "test", "signature", "message", "expected"),
|
||||
[
|
||||
('test_result', 'dis/dat/da/odder/ting', 'sig', 'mess', 'dis/dat/da/odder/ting'),
|
||||
('crash', None, 'sig', 'mess', 'sig'),
|
||||
('log', None, None, 'mess', 'mess'),
|
||||
('meh', None, None, None, 'Non-Test Error'),
|
||||
('test_result', 'pid:dis/dat/da/odder/ting', 'sig', 'mess', None),
|
||||
("test_result", "dis/dat/da/odder/ting", "sig", "mess", "dis/dat/da/odder/ting"),
|
||||
("crash", None, "sig", "mess", "sig"),
|
||||
("log", None, None, "mess", "mess"),
|
||||
("meh", None, None, None, "Non-Test Error"),
|
||||
("test_result", "pid:dis/dat/da/odder/ting", "sig", "mess", None),
|
||||
(
|
||||
'test_result',
|
||||
'tests/layout/this == tests/layout/that',
|
||||
'sig',
|
||||
'mess',
|
||||
'layout/this == layout/that',
|
||||
"test_result",
|
||||
"tests/layout/this == tests/layout/that",
|
||||
"sig",
|
||||
"mess",
|
||||
"layout/this == layout/that",
|
||||
),
|
||||
(
|
||||
'test_result',
|
||||
'tests/layout/this != tests/layout/that',
|
||||
'sig',
|
||||
'mess',
|
||||
'layout/this != layout/that',
|
||||
"test_result",
|
||||
"tests/layout/this != tests/layout/that",
|
||||
"sig",
|
||||
"mess",
|
||||
"layout/this != layout/that",
|
||||
),
|
||||
(
|
||||
'test_result',
|
||||
'build/tests/reftest/tests/this != build/tests/reftest/tests/that',
|
||||
'sig',
|
||||
'mess',
|
||||
'this != that',
|
||||
"test_result",
|
||||
"build/tests/reftest/tests/this != build/tests/reftest/tests/that",
|
||||
"sig",
|
||||
"mess",
|
||||
"this != that",
|
||||
),
|
||||
(
|
||||
'test_result',
|
||||
'http://10.0.5.5/tests/this != http://10.0.5.5/tests/that',
|
||||
'sig',
|
||||
'mess',
|
||||
'this != that',
|
||||
"test_result",
|
||||
"http://10.0.5.5/tests/this != http://10.0.5.5/tests/that",
|
||||
"sig",
|
||||
"mess",
|
||||
"this != that",
|
||||
),
|
||||
('test_result', 'build/tests/reftest/tests/this', 'sig', 'mess', 'this'),
|
||||
('test_result', 'test=jsreftest.html', 'sig', 'mess', 'jsreftest.html'),
|
||||
('test_result', 'http://10.0.5.5/tests/this/thing', 'sig', 'mess', 'this/thing'),
|
||||
('test_result', 'http://localhost:5000/tests/this/thing', 'sig', 'mess', 'thing'),
|
||||
('test_result', 'thing is done (finished)', 'sig', 'mess', 'thing is done'),
|
||||
('test_result', 'Last test finished', 'sig', 'mess', None),
|
||||
('test_result', '(SimpleTest/TestRunner.js)', 'sig', 'mess', None),
|
||||
('test_result', '/this\\thing\\there', 'sig', 'mess', 'this/thing/there'),
|
||||
("test_result", "build/tests/reftest/tests/this", "sig", "mess", "this"),
|
||||
("test_result", "test=jsreftest.html", "sig", "mess", "jsreftest.html"),
|
||||
("test_result", "http://10.0.5.5/tests/this/thing", "sig", "mess", "this/thing"),
|
||||
("test_result", "http://localhost:5000/tests/this/thing", "sig", "mess", "thing"),
|
||||
("test_result", "thing is done (finished)", "sig", "mess", "thing is done"),
|
||||
("test_result", "Last test finished", "sig", "mess", None),
|
||||
("test_result", "(SimpleTest/TestRunner.js)", "sig", "mess", None),
|
||||
("test_result", "/this\\thing\\there", "sig", "mess", "this/thing/there"),
|
||||
],
|
||||
)
|
||||
def test_clean_test(action, test, signature, message, expected):
|
||||
|
@ -59,13 +59,13 @@ def test_clean_test(action, test, signature, message, expected):
|
|||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
('config', 'expected'),
|
||||
("config", "expected"),
|
||||
[
|
||||
('opt', 'opt'),
|
||||
('debug', 'debug'),
|
||||
('asan', 'asan'),
|
||||
('pgo', 'opt'),
|
||||
('shippable', 'opt'),
|
||||
("opt", "opt"),
|
||||
("debug", "debug"),
|
||||
("asan", "asan"),
|
||||
("pgo", "opt"),
|
||||
("shippable", "opt"),
|
||||
],
|
||||
)
|
||||
def test_clean_config(config, expected):
|
||||
|
@ -73,11 +73,11 @@ def test_clean_config(config, expected):
|
|||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
('platform', 'expected'),
|
||||
("platform", "expected"),
|
||||
[
|
||||
('macosx64 opt and such', 'osx-10-10 opt and such'),
|
||||
('linux doohickey', 'linux doohickey'),
|
||||
('windows gizmo', 'windows gizmo'),
|
||||
("macosx64 opt and such", "osx-10-10 opt and such"),
|
||||
("linux doohickey", "linux doohickey"),
|
||||
("windows gizmo", "windows gizmo"),
|
||||
],
|
||||
)
|
||||
def test_clean_platform(platform, expected):
|
||||
|
@ -85,14 +85,14 @@ def test_clean_platform(platform, expected):
|
|||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
('line', 'expected'),
|
||||
("line", "expected"),
|
||||
[
|
||||
('Return code:', False),
|
||||
('unexpected status', False),
|
||||
('unexpected crashes', False),
|
||||
('exit status', False),
|
||||
('Finished in', False),
|
||||
('expect magic', True),
|
||||
("Return code:", False),
|
||||
("unexpected status", False),
|
||||
("unexpected crashes", False),
|
||||
("exit status", False),
|
||||
("Finished in", False),
|
||||
("expect magic", True),
|
||||
],
|
||||
)
|
||||
def test_is_valid_failure_line(line, expected):
|
||||
|
|
|
@ -10,22 +10,22 @@ def job_data(**kwargs):
|
|||
jobs_obj = {
|
||||
"revision": kwargs.get("revision", "24fd64b8251fac5cf60b54a915bffa7e51f636b5"),
|
||||
"job": {
|
||||
u"build_platform": build_platform(**kwargs.pop("build_platform", {})),
|
||||
u"submit_timestamp": kwargs.pop("submit_timestamp", submit_timestamp()),
|
||||
u"start_timestamp": kwargs.pop("start_timestamp", start_timestamp()),
|
||||
u"name": kwargs.pop("name", u"mochitest-5"),
|
||||
u"option_collection": option_collection(**kwargs.pop("option_collection", {})),
|
||||
u"log_references": log_references(kwargs.pop("log_references", [])),
|
||||
u"who": kwargs.pop("who", u"sendchange-unittest"),
|
||||
u"reason": kwargs.pop("reason", u"scheduler"),
|
||||
u"artifact": kwargs.pop("artifact", {}),
|
||||
u"machine_platform": machine_platform(**kwargs.pop("machine_platform", {})),
|
||||
u"machine": kwargs.pop("machine", u"talos-r3-xp-088"),
|
||||
u"state": kwargs.pop("state", u"completed"),
|
||||
u"result": kwargs.pop("result", 0),
|
||||
u"job_guid": kwargs.pop(u"job_guid", u"f3e3a9e6526881c39a3b2b6ff98510f213b3d4ed"),
|
||||
u"product_name": kwargs.pop("product_name", u"firefox"),
|
||||
u"end_timestamp": kwargs.pop("end_timestamp", end_timestamp()),
|
||||
"build_platform": build_platform(**kwargs.pop("build_platform", {})),
|
||||
"submit_timestamp": kwargs.pop("submit_timestamp", submit_timestamp()),
|
||||
"start_timestamp": kwargs.pop("start_timestamp", start_timestamp()),
|
||||
"name": kwargs.pop("name", "mochitest-5"),
|
||||
"option_collection": option_collection(**kwargs.pop("option_collection", {})),
|
||||
"log_references": log_references(kwargs.pop("log_references", [])),
|
||||
"who": kwargs.pop("who", "sendchange-unittest"),
|
||||
"reason": kwargs.pop("reason", "scheduler"),
|
||||
"artifact": kwargs.pop("artifact", {}),
|
||||
"machine_platform": machine_platform(**kwargs.pop("machine_platform", {})),
|
||||
"machine": kwargs.pop("machine", "talos-r3-xp-088"),
|
||||
"state": kwargs.pop("state", "completed"),
|
||||
"result": kwargs.pop("result", 0),
|
||||
"job_guid": kwargs.pop("job_guid", "f3e3a9e6526881c39a3b2b6ff98510f213b3d4ed"),
|
||||
"product_name": kwargs.pop("product_name", "firefox"),
|
||||
"end_timestamp": kwargs.pop("end_timestamp", end_timestamp()),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -63,7 +63,7 @@ def option_collection(**kwargs):
|
|||
Return a sample data structure, with default values.
|
||||
|
||||
"""
|
||||
defaults = {u"debug": True}
|
||||
defaults = {"debug": True}
|
||||
|
||||
defaults.update(kwargs)
|
||||
|
||||
|
@ -72,7 +72,7 @@ def option_collection(**kwargs):
|
|||
|
||||
def log_references(log_refs=None):
|
||||
if not log_refs:
|
||||
log_refs = [{u"url": u"http://ftp.mozilla.org/pub/...", u"name": u"unittest"}]
|
||||
log_refs = [{"url": "http://ftp.mozilla.org/pub/...", "name": "unittest"}]
|
||||
return log_refs
|
||||
|
||||
|
||||
|
@ -82,9 +82,9 @@ def build_platform(**kwargs):
|
|||
|
||||
"""
|
||||
defaults = {
|
||||
u"platform": u"WINNT5.1",
|
||||
u"os_name": u"win",
|
||||
u"architecture": u"x86",
|
||||
"platform": "WINNT5.1",
|
||||
"os_name": "win",
|
||||
"architecture": "x86",
|
||||
}
|
||||
|
||||
defaults.update(kwargs)
|
||||
|
@ -98,9 +98,9 @@ def machine_platform(**kwargs):
|
|||
|
||||
"""
|
||||
defaults = {
|
||||
u"platform": u"WINNT5.1",
|
||||
u"os_name": u"win",
|
||||
u"architecture": u"x86",
|
||||
"platform": "WINNT5.1",
|
||||
"os_name": "win",
|
||||
"architecture": "x86",
|
||||
}
|
||||
|
||||
defaults.update(kwargs)
|
||||
|
|
|
@ -58,9 +58,9 @@ def test_JointConsumer_on_message_do_not_call_classification_ingestion(monkeypat
|
|||
nonlocal mock_called
|
||||
mock_called = True
|
||||
|
||||
monkeypatch.setattr(store_pulse_tasks, 'apply_async', lambda args, queue: None)
|
||||
monkeypatch.setattr(store_pulse_tasks, "apply_async", lambda args, queue: None)
|
||||
monkeypatch.setattr(
|
||||
store_pulse_tasks_classification, 'apply_async', mock_store_pulse_tasks_classification
|
||||
store_pulse_tasks_classification, "apply_async", mock_store_pulse_tasks_classification
|
||||
)
|
||||
|
||||
consumer = JointConsumer(
|
||||
|
@ -76,10 +76,10 @@ def test_JointConsumer_on_message_do_not_call_classification_ingestion(monkeypat
|
|||
message = MagicMock()
|
||||
monkeypatch.setattr(
|
||||
message,
|
||||
'delivery_info',
|
||||
"delivery_info",
|
||||
{
|
||||
'exchange': 'exchange/taskcluster-queue/v1/task-completed',
|
||||
'routing_key': 'primary.aaaaaaaaaaaaaaaaaaaaaa.0.us-east1.111111111111111111.proj-bugbug.compute-smaller.-.AAAAAAAAAAAAAAAAAAAAAA._',
|
||||
"exchange": "exchange/taskcluster-queue/v1/task-completed",
|
||||
"routing_key": "primary.aaaaaaaaaaaaaaaaaaaaaa.0.us-east1.111111111111111111.proj-bugbug.compute-smaller.-.AAAAAAAAAAAAAAAAAAAAAA._",
|
||||
},
|
||||
)
|
||||
consumer.on_message(None, message)
|
||||
|
@ -94,9 +94,9 @@ def test_JointConsumer_on_message_call_classification_ingestion(monkeypatch):
|
|||
nonlocal mock_called
|
||||
mock_called = True
|
||||
|
||||
monkeypatch.setattr(store_pulse_tasks, 'apply_async', lambda args, queue: None)
|
||||
monkeypatch.setattr(store_pulse_tasks, "apply_async", lambda args, queue: None)
|
||||
monkeypatch.setattr(
|
||||
store_pulse_tasks_classification, 'apply_async', mock_store_pulse_tasks_classification
|
||||
store_pulse_tasks_classification, "apply_async", mock_store_pulse_tasks_classification
|
||||
)
|
||||
|
||||
consumer = JointConsumer(
|
||||
|
@ -112,10 +112,10 @@ def test_JointConsumer_on_message_call_classification_ingestion(monkeypatch):
|
|||
message = MagicMock()
|
||||
monkeypatch.setattr(
|
||||
message,
|
||||
'delivery_info',
|
||||
"delivery_info",
|
||||
{
|
||||
'exchange': 'exchange/taskcluster-queue/v1/task-completed',
|
||||
'routing_key': 'primary.aaaaaaaaaaaaaaaaaaaaaa.0.us-east1.111111111111111111.proj-mozci.compute-smaller.-.AAAAAAAAAAAAAAAAAAAAAA._',
|
||||
"exchange": "exchange/taskcluster-queue/v1/task-completed",
|
||||
"routing_key": "primary.aaaaaaaaaaaaaaaaaaaaaa.0.us-east1.111111111111111111.proj-mozci.compute-smaller.-.AAAAAAAAAAAAAAAAAAAAAA._",
|
||||
},
|
||||
)
|
||||
consumer.on_message(None, message)
|
||||
|
|
|
@ -10,32 +10,32 @@ from treeherder.services.taskcluster import (
|
|||
TaskclusterModelNullObject,
|
||||
)
|
||||
|
||||
load_json_fixture = SampleDataJSONLoader('sherlock')
|
||||
load_json_fixture = SampleDataJSONLoader("sherlock")
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def actions_json():
|
||||
return load_json_fixture('initialActions.json')
|
||||
return load_json_fixture("initialActions.json")
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def expected_actions_json():
|
||||
return load_json_fixture('reducedActions.json')
|
||||
return load_json_fixture("reducedActions.json")
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def original_task():
|
||||
return load_json_fixture('originalTask.json')
|
||||
return load_json_fixture("originalTask.json")
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def expected_backfill_task():
|
||||
return load_json_fixture('backfillTask.json')
|
||||
return load_json_fixture("backfillTask.json")
|
||||
|
||||
|
||||
class TestTaskclusterModelImpl:
|
||||
FAKE_ROOT_URL = 'https://fakerooturl.org'
|
||||
FAKE_OPTIONS = (FAKE_ROOT_URL, 'FAKE_CLIENT_ID', 'FAKE_ACCESS_TOKEN')
|
||||
FAKE_ROOT_URL = "https://fakerooturl.org"
|
||||
FAKE_OPTIONS = (FAKE_ROOT_URL, "FAKE_CLIENT_ID", "FAKE_ACCESS_TOKEN")
|
||||
|
||||
def test_can_instantiate_without_credentials(self):
|
||||
try:
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
from treeherder.config.settings import * # noqa: F403
|
||||
|
||||
DATABASES["default"]["TEST"] = {"NAME": "test_treeherder"} # noqa: F405
|
||||
KEY_PREFIX = 'test'
|
||||
KEY_PREFIX = "test"
|
||||
|
||||
TREEHERDER_TEST_REPOSITORY_NAME = 'mozilla-central'
|
||||
TREEHERDER_TEST_REPOSITORY_NAME = "mozilla-central"
|
||||
|
||||
# this makes celery calls synchronous, useful for unit testing
|
||||
CELERY_TASK_ALWAYS_EAGER = True
|
||||
|
@ -22,7 +22,7 @@ BUGFILER_API_URL = "https://thisisnotbugzilla.org"
|
|||
# access. But if we use the defaults in config.settings, we also get the
|
||||
# ``ModelBackend``, which will try to access the DB. This ensures we don't
|
||||
# do that, since we don't have any tests that use the ``ModelBackend``.
|
||||
AUTHENTICATION_BACKENDS = ('treeherder.auth.backends.AuthBackend',)
|
||||
AUTHENTICATION_BACKENDS = ("treeherder.auth.backends.AuthBackend",)
|
||||
|
||||
# For Push Health Usage dashboard
|
||||
NEW_RELIC_INSIGHTS_API_KEY = "123"
|
||||
|
@ -31,7 +31,7 @@ NEW_RELIC_INSIGHTS_API_KEY = "123"
|
|||
# https://django-debug-toolbar.readthedocs.io/en/latest/configuration.html#show-toolbar-callback
|
||||
# "You can provide your own function callback(request) which returns True or False."
|
||||
DEBUG_TOOLBAR_CONFIG = {
|
||||
'SHOW_TOOLBAR_CALLBACK': lambda request: False,
|
||||
"SHOW_TOOLBAR_CALLBACK": lambda request: False,
|
||||
}
|
||||
|
||||
INSTALLED_APPS.remove('django.contrib.staticfiles') # noqa: F405
|
||||
INSTALLED_APPS.remove("django.contrib.staticfiles") # noqa: F405
|
||||
|
|
|
@ -6,41 +6,41 @@ from treeherder.middleware import CustomWhiteNoise
|
|||
|
||||
URLS_IMMUTABLE = [
|
||||
# Assets generated by Yarn.
|
||||
'/assets/2.379789df.css',
|
||||
'/assets/dancing_cat.fa5552a5.gif',
|
||||
'/assets/fontawesome-webfont.af7ae505.woff2',
|
||||
'/assets/fontawesome-webfont.fee66e71.woff',
|
||||
'/assets/index.1d85033a.js',
|
||||
'/assets/index.1d85033a.js.map',
|
||||
'/assets/perf.d7fea1e4.css',
|
||||
'/assets/perf.d7fea1e4.css.map',
|
||||
'/assets/treeherder-logo.3df97cff.png',
|
||||
"/assets/2.379789df.css",
|
||||
"/assets/dancing_cat.fa5552a5.gif",
|
||||
"/assets/fontawesome-webfont.af7ae505.woff2",
|
||||
"/assets/fontawesome-webfont.fee66e71.woff",
|
||||
"/assets/index.1d85033a.js",
|
||||
"/assets/index.1d85033a.js.map",
|
||||
"/assets/perf.d7fea1e4.css",
|
||||
"/assets/perf.d7fea1e4.css.map",
|
||||
"/assets/treeherder-logo.3df97cff.png",
|
||||
]
|
||||
|
||||
URLS_NOT_IMMUTABLE = [
|
||||
'/',
|
||||
'/contribute.json',
|
||||
'/perf.html',
|
||||
'/revision.txt',
|
||||
'/tree_open.png',
|
||||
'/docs/schema.js',
|
||||
"/",
|
||||
"/contribute.json",
|
||||
"/perf.html",
|
||||
"/revision.txt",
|
||||
"/tree_open.png",
|
||||
"/docs/schema.js",
|
||||
# The unhashed Yarn/webpack output if using `yarn build --mode development`.
|
||||
'/assets/runtime.js',
|
||||
'/assets/vendors~index.js',
|
||||
"/assets/runtime.js",
|
||||
"/assets/vendors~index.js",
|
||||
# The unhashed Django static asset originals (used in development).
|
||||
'/static/debug_toolbar/assets/toolbar.css',
|
||||
'/static/rest_framework/docs/js/jquery.json-view.min.js',
|
||||
"/static/debug_toolbar/assets/toolbar.css",
|
||||
"/static/rest_framework/docs/js/jquery.json-view.min.js",
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize('url', URLS_IMMUTABLE)
|
||||
@pytest.mark.parametrize("url", URLS_IMMUTABLE)
|
||||
def test_immutable_file_test_matches(url):
|
||||
assert CustomWhiteNoise().immutable_file_test('', url)
|
||||
assert CustomWhiteNoise().immutable_file_test("", url)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('url', URLS_NOT_IMMUTABLE)
|
||||
@pytest.mark.parametrize("url", URLS_NOT_IMMUTABLE)
|
||||
def test_immutable_file_test_does_not_match(url):
|
||||
assert not CustomWhiteNoise().immutable_file_test('', url)
|
||||
assert not CustomWhiteNoise().immutable_file_test("", url)
|
||||
|
||||
|
||||
def test_content_security_policy_header(client):
|
||||
|
@ -48,7 +48,7 @@ def test_content_security_policy_header(client):
|
|||
# however they won't exist unless `yarn build` has been run first.
|
||||
# So instead we request an arbitrary static asset from django-rest-framework,
|
||||
# which will be served with the same headers as our frontend HTML.
|
||||
response = client.get('/static/rest_framework/css/default.css')
|
||||
assert response.has_header('Content-Security-Policy')
|
||||
response = client.get("/static/rest_framework/css/default.css")
|
||||
assert response.has_header("Content-Security-Policy")
|
||||
policy_regex = r"default-src 'none'; script-src 'self' 'unsafe-eval' 'report-sample'; .*; report-uri /api/csp-report/"
|
||||
assert re.match(policy_regex, response['Content-Security-Policy'])
|
||||
assert re.match(policy_regex, response["Content-Security-Policy"])
|
||||
|
|
|
@ -9,26 +9,26 @@ from treeherder.utils.http import fetch_text
|
|||
|
||||
def test_block_unmocked_requests():
|
||||
"""Ensure the `block_unmocked_requests` fixture prevents requests from hitting the network."""
|
||||
url = 'https://example.com'
|
||||
url = "https://example.com"
|
||||
|
||||
with pytest.raises(RuntimeError, match='Tests must mock all HTTP requests!'):
|
||||
with pytest.raises(RuntimeError, match="Tests must mock all HTTP requests!"):
|
||||
fetch_text(url)
|
||||
|
||||
with responses.RequestsMock() as rsps:
|
||||
rsps.add(responses.GET, url, body='Mocked requests still work')
|
||||
rsps.add(responses.GET, url, body="Mocked requests still work")
|
||||
text = fetch_text(url)
|
||||
assert text == 'Mocked requests still work'
|
||||
assert text == "Mocked requests still work"
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_no_missing_migrations():
|
||||
"""Check no model changes have been made since the last `./manage.py makemigrations`."""
|
||||
call_command('makemigrations', interactive=False, dry_run=True, check_changes=True)
|
||||
call_command("makemigrations", interactive=False, dry_run=True, check_changes=True)
|
||||
|
||||
|
||||
def test_django_cache():
|
||||
"""Test the Django cache backend & associated server are properly set up."""
|
||||
k, v = 'my_key', 'my_value'
|
||||
k, v = "my_key", "my_value"
|
||||
cache.set(k, v, 10)
|
||||
assert cache.get(k) == v
|
||||
|
||||
|
@ -49,4 +49,4 @@ def test_celery_setup():
|
|||
def test_load_initial_data():
|
||||
"Test load_initial_data executes properly"
|
||||
|
||||
call_command('load_initial_data')
|
||||
call_command("load_initial_data")
|
||||
|
|
|
@ -40,10 +40,10 @@ def do_job_ingestion(test_repository, job_data, sample_push, verify_data=True):
|
|||
push_index = 0
|
||||
|
||||
# Modify job structure to sync with the push sample data
|
||||
if 'sources' in blob:
|
||||
del blob['sources']
|
||||
if "sources" in blob:
|
||||
del blob["sources"]
|
||||
|
||||
blob['revision'] = sample_push[push_index]['revision']
|
||||
blob["revision"] = sample_push[push_index]["revision"]
|
||||
|
||||
blobs.append(blob)
|
||||
|
||||
|
@ -52,14 +52,14 @@ def do_job_ingestion(test_repository, job_data, sample_push, verify_data=True):
|
|||
# Build data structures to confirm everything is stored
|
||||
# as expected
|
||||
if verify_data:
|
||||
job = blob['job']
|
||||
job = blob["job"]
|
||||
|
||||
build_platforms_ref.add(
|
||||
"-".join(
|
||||
[
|
||||
job.get('build_platform', {}).get('os_name', 'unknown'),
|
||||
job.get('build_platform', {}).get('platform', 'unknown'),
|
||||
job.get('build_platform', {}).get('architecture', 'unknown'),
|
||||
job.get("build_platform", {}).get("os_name", "unknown"),
|
||||
job.get("build_platform", {}).get("platform", "unknown"),
|
||||
job.get("build_platform", {}).get("architecture", "unknown"),
|
||||
]
|
||||
)
|
||||
)
|
||||
|
@ -67,30 +67,30 @@ def do_job_ingestion(test_repository, job_data, sample_push, verify_data=True):
|
|||
machine_platforms_ref.add(
|
||||
"-".join(
|
||||
[
|
||||
job.get('machine_platform', {}).get('os_name', 'unknown'),
|
||||
job.get('machine_platform', {}).get('platform', 'unknown'),
|
||||
job.get('machine_platform', {}).get('architecture', 'unknown'),
|
||||
job.get("machine_platform", {}).get("os_name", "unknown"),
|
||||
job.get("machine_platform", {}).get("platform", "unknown"),
|
||||
job.get("machine_platform", {}).get("architecture", "unknown"),
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
machines_ref.add(job.get('machine', 'unknown'))
|
||||
machines_ref.add(job.get("machine", "unknown"))
|
||||
|
||||
options_ref = options_ref.union(job.get('option_collection', []).keys())
|
||||
options_ref = options_ref.union(job.get("option_collection", []).keys())
|
||||
|
||||
job_types_ref.add(job.get('name', 'unknown'))
|
||||
products_ref.add(job.get('product_name', 'unknown'))
|
||||
pushes_ref.add(blob['revision'])
|
||||
job_types_ref.add(job.get("name", "unknown"))
|
||||
products_ref.add(job.get("product_name", "unknown"))
|
||||
pushes_ref.add(blob["revision"])
|
||||
|
||||
log_url_list = job.get('log_references', [])
|
||||
log_url_list = job.get("log_references", [])
|
||||
for log_data in log_url_list:
|
||||
log_urls_ref.add(log_data['url'])
|
||||
log_urls_ref.add(log_data["url"])
|
||||
|
||||
artifact_name = job.get('artifact', {}).get('name')
|
||||
artifact_name = job.get("artifact", {}).get("name")
|
||||
if artifact_name:
|
||||
artifacts_ref[artifact_name] = job.get('artifact')
|
||||
artifacts_ref[artifact_name] = job.get("artifact")
|
||||
|
||||
superseded = blob.get('superseded', [])
|
||||
superseded = blob.get("superseded", [])
|
||||
superseded_job_guids.update(superseded)
|
||||
|
||||
# Store the modified json blobs
|
||||
|
@ -132,40 +132,40 @@ def verify_machine_platforms(machine_platforms_ref):
|
|||
|
||||
|
||||
def verify_machines(machines_ref):
|
||||
machines = models.Machine.objects.all().values_list('name', flat=True)
|
||||
machines = models.Machine.objects.all().values_list("name", flat=True)
|
||||
assert machines_ref.issubset(machines)
|
||||
|
||||
|
||||
def verify_options(options_ref):
|
||||
options = models.Option.objects.all().values_list('name', flat=True)
|
||||
options = models.Option.objects.all().values_list("name", flat=True)
|
||||
|
||||
assert options_ref.issubset(options)
|
||||
|
||||
|
||||
def verify_job_types(job_types_ref):
|
||||
job_types = models.JobType.objects.all().values_list('name', flat=True)
|
||||
job_types = models.JobType.objects.all().values_list("name", flat=True)
|
||||
assert job_types_ref.issubset(job_types)
|
||||
|
||||
|
||||
def verify_products(products_ref):
|
||||
products = models.Product.objects.all().values_list('name', flat=True)
|
||||
products = models.Product.objects.all().values_list("name", flat=True)
|
||||
|
||||
assert products_ref.issubset(products)
|
||||
|
||||
|
||||
def verify_pushes(pushes_ref):
|
||||
return pushes_ref.issubset(models.Push.objects.values_list('revision', flat=True))
|
||||
return pushes_ref.issubset(models.Push.objects.values_list("revision", flat=True))
|
||||
|
||||
|
||||
def verify_log_urls(log_urls_ref):
|
||||
log_urls = set(models.JobLog.objects.values_list('url', flat=True))
|
||||
log_urls = set(models.JobLog.objects.values_list("url", flat=True))
|
||||
|
||||
assert log_urls_ref.issubset(log_urls)
|
||||
|
||||
|
||||
def verify_superseded(expected_superseded_job_guids):
|
||||
super_seeded_guids = models.Job.objects.filter(result='superseded').values_list(
|
||||
'guid', flat=True
|
||||
super_seeded_guids = models.Job.objects.filter(result="superseded").values_list(
|
||||
"guid", flat=True
|
||||
)
|
||||
assert set(super_seeded_guids) == expected_superseded_job_guids
|
||||
|
||||
|
@ -197,10 +197,10 @@ def create_generic_job(guid, repository, push_id, generic_reference_data, tier=N
|
|||
job_group=generic_reference_data.job_group,
|
||||
product=generic_reference_data.product,
|
||||
failure_classification_id=1,
|
||||
who='testuser@foo.com',
|
||||
reason='success',
|
||||
result='finished',
|
||||
state='completed',
|
||||
who="testuser@foo.com",
|
||||
reason="success",
|
||||
result="finished",
|
||||
state="completed",
|
||||
submit_time=job_time,
|
||||
start_time=job_time,
|
||||
end_time=job_time,
|
||||
|
@ -215,15 +215,15 @@ def add_log_response(filename):
|
|||
log_path = SampleData().get_log_path(filename)
|
||||
log_url = "http://my-log.mozilla.org/{}".format(filename)
|
||||
|
||||
with open(log_path, 'rb') as log_file:
|
||||
with open(log_path, "rb") as log_file:
|
||||
content = log_file.read()
|
||||
responses.add(
|
||||
responses.GET,
|
||||
log_url,
|
||||
body=content,
|
||||
adding_headers={
|
||||
'Content-Encoding': 'gzip',
|
||||
'Content-Length': str(len(content)),
|
||||
"Content-Encoding": "gzip",
|
||||
"Content-Length": str(len(content)),
|
||||
},
|
||||
)
|
||||
return log_url
|
||||
|
|
|
@ -7,7 +7,7 @@ from datetime import timedelta
|
|||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@patch('treeherder.workers.stats.get_stats_client')
|
||||
@patch("treeherder.workers.stats.get_stats_client")
|
||||
def test_publish_stats_nothing_to_do(get_worker_mock, django_assert_num_queries, caplog):
|
||||
statsd_client = MagicMock()
|
||||
get_worker_mock.return_value = statsd_client
|
||||
|
@ -16,15 +16,15 @@ def test_publish_stats_nothing_to_do(get_worker_mock, django_assert_num_queries,
|
|||
with django_assert_num_queries(2):
|
||||
publish_stats()
|
||||
assert [(level, message) for _, level, message in caplog.record_tuples] == [
|
||||
(20, 'Publishing runtime statistics to statsd'),
|
||||
(20, 'Ingested 0 pushes'),
|
||||
(20, 'Ingested 0 jobs in total'),
|
||||
(20, "Publishing runtime statistics to statsd"),
|
||||
(20, "Ingested 0 pushes"),
|
||||
(20, "Ingested 0 jobs in total"),
|
||||
]
|
||||
assert statsd_client.call_args_list == []
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@patch('treeherder.workers.stats.get_stats_client')
|
||||
@patch("treeherder.workers.stats.get_stats_client")
|
||||
def test_publish_stats(
|
||||
get_worker_mock, eleven_jobs_stored_new_date, django_assert_num_queries, caplog, settings
|
||||
):
|
||||
|
@ -40,13 +40,13 @@ def test_publish_stats(
|
|||
with django_assert_num_queries(2):
|
||||
publish_stats()
|
||||
assert [(level, message) for _, level, message in caplog.record_tuples] == [
|
||||
(20, 'Publishing runtime statistics to statsd'),
|
||||
(20, 'Ingested 10 pushes'),
|
||||
(20, 'Ingested 11 jobs in total'),
|
||||
(20, "Publishing runtime statistics to statsd"),
|
||||
(20, "Ingested 10 pushes"),
|
||||
(20, "Ingested 11 jobs in total"),
|
||||
]
|
||||
assert statsd_client.incr.call_args_list == [
|
||||
call('push', 10),
|
||||
call('jobs', 11),
|
||||
call('jobs_repo.mozilla-central', 11),
|
||||
call('jobs_state.completed', 11),
|
||||
call("push", 10),
|
||||
call("jobs", 11),
|
||||
call("jobs_repo.mozilla-central", 11),
|
||||
call("jobs_state.completed", 11),
|
||||
]
|
||||
|
|
|
@ -58,7 +58,7 @@ def test_retryable_task_throws_retry():
|
|||
|
||||
with pytest.raises(Retry) as e:
|
||||
throwing_task_should_retry.delay()
|
||||
assert str(e.value) == 'Retry in 10s: OperationalError()'
|
||||
assert str(e.value) == "Retry in 10s: OperationalError()"
|
||||
|
||||
# The task is only called once, the Retry() exception
|
||||
# will signal to the worker that the task needs to be tried again later
|
||||
|
|
|
@ -7,32 +7,32 @@ from treeherder.utils.taskcluster import download_artifact
|
|||
|
||||
@responses.activate
|
||||
@pytest.mark.parametrize(
|
||||
'path, response_config, expected_result',
|
||||
"path, response_config, expected_result",
|
||||
[
|
||||
[
|
||||
'my_file.json',
|
||||
{'json': {'key': 'value'}, 'content_type': 'application/json'},
|
||||
{'key': 'value'},
|
||||
"my_file.json",
|
||||
{"json": {"key": "value"}, "content_type": "application/json"},
|
||||
{"key": "value"},
|
||||
],
|
||||
[
|
||||
'my_file.yml',
|
||||
{'body': 'key:\n - value1\n - value2', 'content_type': 'text/plain'},
|
||||
{'key': ['value1', 'value2']},
|
||||
"my_file.yml",
|
||||
{"body": "key:\n - value1\n - value2", "content_type": "text/plain"},
|
||||
{"key": ["value1", "value2"]},
|
||||
],
|
||||
[
|
||||
'my_file.txt',
|
||||
{'body': 'some text from a file', 'content_type': 'text/plain'},
|
||||
'some text from a file',
|
||||
"my_file.txt",
|
||||
{"body": "some text from a file", "content_type": "text/plain"},
|
||||
"some text from a file",
|
||||
],
|
||||
],
|
||||
)
|
||||
def test_download_artifact(path, response_config, expected_result):
|
||||
root_url = 'https://taskcluster.net'
|
||||
task_id = 'A35mWTRuQmyj88yMnIF0fA'
|
||||
root_url = "https://taskcluster.net"
|
||||
task_id = "A35mWTRuQmyj88yMnIF0fA"
|
||||
|
||||
responses.add(
|
||||
responses.GET,
|
||||
f'{root_url}/api/queue/v1/task/{task_id}/artifacts/{path}',
|
||||
f"{root_url}/api/queue/v1/task/{task_id}/artifacts/{path}",
|
||||
**response_config,
|
||||
status=200,
|
||||
)
|
||||
|
|
|
@ -5,31 +5,31 @@ from treeherder.utils.taskcluster_lib_scopes import patternMatch, satisfiesExpre
|
|||
|
||||
# satisfiesExpression()
|
||||
@pytest.mark.parametrize(
|
||||
'scopeset, expression',
|
||||
"scopeset, expression",
|
||||
[
|
||||
[[], {'AllOf': []}],
|
||||
[['A'], {'AllOf': ['A']}],
|
||||
[['A', 'B'], 'A'],
|
||||
[['a*', 'b*', 'c*'], 'abc'],
|
||||
[['abc'], {'AnyOf': ['abc', 'def']}],
|
||||
[['def'], {'AnyOf': ['abc', 'def']}],
|
||||
[['abc', 'def'], {'AnyOf': ['abc', 'def']}],
|
||||
[['abc*'], {'AnyOf': ['abc', 'def']}],
|
||||
[['abc*'], {'AnyOf': ['abc']}],
|
||||
[['abc*', 'def*'], {'AnyOf': ['abc', 'def']}],
|
||||
[['foo'], {'AllOf': [{'AnyOf': [{'AllOf': ['foo']}, {'AllOf': ['bar']}]}]}],
|
||||
[['a*', 'b*', 'c*'], {'AnyOf': ['cfoo', 'dfoo']}],
|
||||
[['a*', 'b*', 'c*'], {'AnyOf': ['bx', 'by']}],
|
||||
[['a*', 'b*', 'c*'], {'AllOf': ['bx', 'cx']}],
|
||||
[[], {"AllOf": []}],
|
||||
[["A"], {"AllOf": ["A"]}],
|
||||
[["A", "B"], "A"],
|
||||
[["a*", "b*", "c*"], "abc"],
|
||||
[["abc"], {"AnyOf": ["abc", "def"]}],
|
||||
[["def"], {"AnyOf": ["abc", "def"]}],
|
||||
[["abc", "def"], {"AnyOf": ["abc", "def"]}],
|
||||
[["abc*"], {"AnyOf": ["abc", "def"]}],
|
||||
[["abc*"], {"AnyOf": ["abc"]}],
|
||||
[["abc*", "def*"], {"AnyOf": ["abc", "def"]}],
|
||||
[["foo"], {"AllOf": [{"AnyOf": [{"AllOf": ["foo"]}, {"AllOf": ["bar"]}]}]}],
|
||||
[["a*", "b*", "c*"], {"AnyOf": ["cfoo", "dfoo"]}],
|
||||
[["a*", "b*", "c*"], {"AnyOf": ["bx", "by"]}],
|
||||
[["a*", "b*", "c*"], {"AllOf": ["bx", "cx"]}],
|
||||
# complex expression with only
|
||||
# some AnyOf branches matching
|
||||
[
|
||||
['a*', 'b*', 'c*'],
|
||||
["a*", "b*", "c*"],
|
||||
{
|
||||
'AnyOf': [
|
||||
{'AllOf': ['ax', 'jx']}, # doesn't match
|
||||
{'AllOf': ['bx', 'cx']}, # does match
|
||||
'bbb',
|
||||
"AnyOf": [
|
||||
{"AllOf": ["ax", "jx"]}, # doesn't match
|
||||
{"AllOf": ["bx", "cx"]}, # does match
|
||||
"bbb",
|
||||
]
|
||||
},
|
||||
],
|
||||
|
@ -40,21 +40,21 @@ def test_expression_is_satisfied(scopeset, expression):
|
|||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'scopeset, expression',
|
||||
"scopeset, expression",
|
||||
[
|
||||
[[], {'AnyOf': []}],
|
||||
[[], 'missing-scope'],
|
||||
[['wrong-scope'], 'missing-scope'],
|
||||
[['ghi'], {'AnyOf': ['abc', 'def']}],
|
||||
[['ghi*'], {'AnyOf': ['abc', 'def']}],
|
||||
[['ghi', 'fff'], {'AnyOf': ['abc', 'def']}],
|
||||
[['ghi*', 'fff*'], {'AnyOf': ['abc', 'def']}],
|
||||
[['abc'], {'AnyOf': ['ghi']}],
|
||||
[['abc*'], {'AllOf': ['abc', 'ghi']}],
|
||||
[[''], {'AnyOf': ['abc', 'def']}],
|
||||
[['abc:def'], {'AnyOf': ['abc', 'def']}],
|
||||
[['xyz', 'abc'], {'AllOf': [{'AnyOf': [{'AllOf': ['foo']}, {'AllOf': ['bar']}]}]}],
|
||||
[['a*', 'b*', 'c*'], {'AllOf': ['bx', 'cx', {'AnyOf': ['xxx', 'yyyy']}]}],
|
||||
[[], {"AnyOf": []}],
|
||||
[[], "missing-scope"],
|
||||
[["wrong-scope"], "missing-scope"],
|
||||
[["ghi"], {"AnyOf": ["abc", "def"]}],
|
||||
[["ghi*"], {"AnyOf": ["abc", "def"]}],
|
||||
[["ghi", "fff"], {"AnyOf": ["abc", "def"]}],
|
||||
[["ghi*", "fff*"], {"AnyOf": ["abc", "def"]}],
|
||||
[["abc"], {"AnyOf": ["ghi"]}],
|
||||
[["abc*"], {"AllOf": ["abc", "ghi"]}],
|
||||
[[""], {"AnyOf": ["abc", "def"]}],
|
||||
[["abc:def"], {"AnyOf": ["abc", "def"]}],
|
||||
[["xyz", "abc"], {"AllOf": [{"AnyOf": [{"AllOf": ["foo"]}, {"AllOf": ["bar"]}]}]}],
|
||||
[["a*", "b*", "c*"], {"AllOf": ["bx", "cx", {"AnyOf": ["xxx", "yyyy"]}]}],
|
||||
],
|
||||
)
|
||||
def test_expression_is_not_satisfied(scopeset, expression):
|
||||
|
@ -62,34 +62,34 @@ def test_expression_is_not_satisfied(scopeset, expression):
|
|||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'scopeset',
|
||||
"scopeset",
|
||||
[
|
||||
None,
|
||||
'scopeset_argument',
|
||||
('scopeset', 'argument'),
|
||||
{'scopeset', 'argument'},
|
||||
"scopeset_argument",
|
||||
("scopeset", "argument"),
|
||||
{"scopeset", "argument"},
|
||||
],
|
||||
)
|
||||
def test_wrong_scopeset_type_raises_exception(scopeset):
|
||||
with pytest.raises(TypeError):
|
||||
satisfiesExpression(scopeset, 'in-tree:hook-action:{hook_group_id}/{hook_id}')
|
||||
satisfiesExpression(scopeset, "in-tree:hook-action:{hook_group_id}/{hook_id}")
|
||||
|
||||
|
||||
# patternMatch()
|
||||
def test_identical_scope_and_pattern_are_matching():
|
||||
assert patternMatch('mock:scope', 'mock:scope') is True
|
||||
assert patternMatch("mock:scope", "mock:scope") is True
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'pattern, scope', [('matching*', 'matching'), ('matching*', 'matching/scope')]
|
||||
"pattern, scope", [("matching*", "matching"), ("matching*", "matching/scope")]
|
||||
)
|
||||
def test_starred_patterns_are_matching(pattern, scope):
|
||||
assert patternMatch(pattern, scope) is True
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'pattern, scope',
|
||||
[('matching*', 'mismatching'), ('match*ing', 'matching'), ('*matching', 'matching')],
|
||||
"pattern, scope",
|
||||
[("matching*", "mismatching"), ("match*ing", "matching"), ("*matching", "matching")],
|
||||
)
|
||||
def test_starred_patterns_dont_matching(pattern, scope):
|
||||
assert not patternMatch(pattern, scope)
|
||||
|
|
|
@ -19,14 +19,14 @@ class AuthenticatedView(APIView):
|
|||
"""This inherits `IsAuthenticatedOrReadOnly` due to `DEFAULT_PERMISSION_CLASSES`."""
|
||||
|
||||
def get(self, request, *args, **kwargs):
|
||||
return Response({'foo': 'bar'})
|
||||
return Response({"foo": "bar"})
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
return Response({'foo': 'bar'})
|
||||
return Response({"foo": "bar"})
|
||||
|
||||
|
||||
factory = APIRequestFactory()
|
||||
url = 'http://testserver/'
|
||||
url = "http://testserver/"
|
||||
|
||||
|
||||
def test_get_no_auth():
|
||||
|
@ -34,7 +34,7 @@ def test_get_no_auth():
|
|||
view = AuthenticatedView.as_view()
|
||||
response = view(request)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
assert response.data == {'foo': 'bar'}
|
||||
assert response.data == {"foo": "bar"}
|
||||
|
||||
|
||||
def test_post_no_auth():
|
||||
|
@ -42,7 +42,7 @@ def test_post_no_auth():
|
|||
view = AuthenticatedView.as_view()
|
||||
response = view(request)
|
||||
assert response.status_code == status.HTTP_403_FORBIDDEN
|
||||
assert response.data == {'detail': 'Authentication credentials were not provided.'}
|
||||
assert response.data == {"detail": "Authentication credentials were not provided."}
|
||||
|
||||
|
||||
# Auth Login and Logout Tests
|
||||
|
@ -50,13 +50,13 @@ def test_post_no_auth():
|
|||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize(
|
||||
('id_token_sub', 'id_token_email', 'expected_username'),
|
||||
("id_token_sub", "id_token_email", "expected_username"),
|
||||
[
|
||||
('ad|Mozilla-LDAP|biped', 'biped@mozilla.com', 'mozilla-ldap/biped@mozilla.com'),
|
||||
('email', 'biped@mozilla.com', 'email/biped@mozilla.com'),
|
||||
('oauth2|biped', 'biped@mozilla.com', 'oauth2/biped@mozilla.com'),
|
||||
('github|0000', 'biped@gmail.com', 'github/biped@gmail.com'),
|
||||
('google-oauth2|0000', 'biped@mozilla.com', 'google/biped@mozilla.com'),
|
||||
("ad|Mozilla-LDAP|biped", "biped@mozilla.com", "mozilla-ldap/biped@mozilla.com"),
|
||||
("email", "biped@mozilla.com", "email/biped@mozilla.com"),
|
||||
("oauth2|biped", "biped@mozilla.com", "oauth2/biped@mozilla.com"),
|
||||
("github|0000", "biped@gmail.com", "github/biped@gmail.com"),
|
||||
("google-oauth2|0000", "biped@mozilla.com", "google/biped@mozilla.com"),
|
||||
],
|
||||
)
|
||||
def test_login_logout_relogin(client, monkeypatch, id_token_sub, id_token_email, expected_username):
|
||||
|
@ -69,9 +69,9 @@ def test_login_logout_relogin(client, monkeypatch, id_token_sub, id_token_email,
|
|||
access_token_expiration_timestamp = now_in_seconds + one_hour_in_seconds
|
||||
|
||||
def userinfo_mock(*args, **kwargs):
|
||||
return {'sub': id_token_sub, 'email': id_token_email, 'exp': id_token_expiration_timestamp}
|
||||
return {"sub": id_token_sub, "email": id_token_email, "exp": id_token_expiration_timestamp}
|
||||
|
||||
monkeypatch.setattr(AuthBackend, '_get_user_info', userinfo_mock)
|
||||
monkeypatch.setattr(AuthBackend, "_get_user_info", userinfo_mock)
|
||||
|
||||
assert auth_session_key not in client.session
|
||||
assert User.objects.count() == 0
|
||||
|
@ -80,17 +80,17 @@ def test_login_logout_relogin(client, monkeypatch, id_token_sub, id_token_email,
|
|||
# which is then associated with their Django session.
|
||||
|
||||
resp = client.get(
|
||||
reverse('auth-login'),
|
||||
HTTP_AUTHORIZATION='Bearer meh',
|
||||
HTTP_ID_TOKEN='meh',
|
||||
reverse("auth-login"),
|
||||
HTTP_AUTHORIZATION="Bearer meh",
|
||||
HTTP_ID_TOKEN="meh",
|
||||
HTTP_ACCESS_TOKEN_EXPIRES_AT=str(access_token_expiration_timestamp),
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert resp.json() == {
|
||||
'username': expected_username,
|
||||
'email': id_token_email,
|
||||
'is_staff': False,
|
||||
'is_superuser': False,
|
||||
"username": expected_username,
|
||||
"email": id_token_email,
|
||||
"is_staff": False,
|
||||
"is_superuser": False,
|
||||
}
|
||||
assert auth_session_key in client.session
|
||||
# Uses a tolerance of up to 5 seconds to account for rounding/the time the test takes to run.
|
||||
|
@ -104,20 +104,20 @@ def test_login_logout_relogin(client, monkeypatch, id_token_sub, id_token_email,
|
|||
|
||||
# Logging out should disassociate the user from the Django session.
|
||||
|
||||
resp = client.get(reverse('auth-logout'))
|
||||
resp = client.get(reverse("auth-logout"))
|
||||
assert resp.status_code == 200
|
||||
assert auth_session_key not in client.session
|
||||
|
||||
# Logging in again should associate the existing user with the Django session.
|
||||
|
||||
resp = client.get(
|
||||
reverse('auth-login'),
|
||||
HTTP_AUTHORIZATION='Bearer meh',
|
||||
HTTP_ID_TOKEN='meh',
|
||||
reverse("auth-login"),
|
||||
HTTP_AUTHORIZATION="Bearer meh",
|
||||
HTTP_ID_TOKEN="meh",
|
||||
HTTP_ACCESS_TOKEN_EXPIRES_AT=str(access_token_expiration_timestamp),
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert resp.json()['username'] == expected_username
|
||||
assert resp.json()["username"] == expected_username
|
||||
assert auth_session_key in client.session
|
||||
assert client.session.get_expiry_age() == pytest.approx(one_hour_in_seconds, abs=5)
|
||||
assert User.objects.count() == 1
|
||||
|
@ -134,19 +134,19 @@ def test_login_same_email_different_provider(test_ldap_user, client, monkeypatch
|
|||
access_token_expiration_timestamp = now_in_seconds + one_hour_in_seconds
|
||||
|
||||
def userinfo_mock(*args, **kwargs):
|
||||
return {'sub': 'email', 'email': test_ldap_user.email, 'exp': id_token_expiration_timestamp}
|
||||
return {"sub": "email", "email": test_ldap_user.email, "exp": id_token_expiration_timestamp}
|
||||
|
||||
monkeypatch.setattr(AuthBackend, '_get_user_info', userinfo_mock)
|
||||
monkeypatch.setattr(AuthBackend, "_get_user_info", userinfo_mock)
|
||||
|
||||
resp = client.get(
|
||||
reverse('auth-login'),
|
||||
HTTP_AUTHORIZATION='Bearer meh',
|
||||
HTTP_ID_TOKEN='meh',
|
||||
reverse("auth-login"),
|
||||
HTTP_AUTHORIZATION="Bearer meh",
|
||||
HTTP_ID_TOKEN="meh",
|
||||
HTTP_ACCESS_TOKEN_EXPIRES_AT=str(access_token_expiration_timestamp),
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert resp.json()['username'] == 'email/user@foo.com'
|
||||
assert resp.json()['email'] == test_ldap_user.email
|
||||
assert resp.json()["username"] == "email/user@foo.com"
|
||||
assert resp.json()["email"] == test_ldap_user.email
|
||||
|
||||
|
||||
def test_login_unknown_identity_provider(client, monkeypatch):
|
||||
|
@ -156,9 +156,9 @@ def test_login_unknown_identity_provider(client, monkeypatch):
|
|||
access_token_expiration_timestamp = now_in_seconds + one_hour_in_seconds
|
||||
|
||||
def userinfo_mock(*args, **kwargs):
|
||||
return {'sub': 'bad', 'email': 'foo@bar.com', 'exp': id_token_expiration_timestamp}
|
||||
return {"sub": "bad", "email": "foo@bar.com", "exp": id_token_expiration_timestamp}
|
||||
|
||||
monkeypatch.setattr(AuthBackend, '_get_user_info', userinfo_mock)
|
||||
monkeypatch.setattr(AuthBackend, "_get_user_info", userinfo_mock)
|
||||
|
||||
resp = client.get(
|
||||
reverse("auth-login"),
|
||||
|
@ -179,12 +179,12 @@ def test_login_not_active(test_ldap_user, client, monkeypatch):
|
|||
|
||||
def userinfo_mock(*args, **kwargs):
|
||||
return {
|
||||
'sub': 'Mozilla-LDAP',
|
||||
'email': test_ldap_user.email,
|
||||
'exp': id_token_expiration_timestamp,
|
||||
"sub": "Mozilla-LDAP",
|
||||
"email": test_ldap_user.email,
|
||||
"exp": id_token_expiration_timestamp,
|
||||
}
|
||||
|
||||
monkeypatch.setattr(AuthBackend, '_get_user_info', userinfo_mock)
|
||||
monkeypatch.setattr(AuthBackend, "_get_user_info", userinfo_mock)
|
||||
|
||||
test_ldap_user.is_active = False
|
||||
test_ldap_user.save()
|
||||
|
@ -206,45 +206,45 @@ def test_login_authorization_header_missing(client):
|
|||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'auth_header_value',
|
||||
"auth_header_value",
|
||||
[
|
||||
'foo',
|
||||
'Bearer ',
|
||||
'Bearer foo bar',
|
||||
"foo",
|
||||
"Bearer ",
|
||||
"Bearer foo bar",
|
||||
],
|
||||
)
|
||||
def test_login_authorization_header_malformed(client, auth_header_value):
|
||||
resp = client.get(
|
||||
reverse('auth-login'),
|
||||
reverse("auth-login"),
|
||||
HTTP_AUTHORIZATION=auth_header_value,
|
||||
)
|
||||
assert resp.status_code == 403
|
||||
assert resp.json()['detail'] == "Authorization header must be of form 'Bearer {token}'"
|
||||
assert resp.json()["detail"] == "Authorization header must be of form 'Bearer {token}'"
|
||||
|
||||
|
||||
def test_login_id_token_header_missing(client):
|
||||
resp = client.get(
|
||||
reverse('auth-login'),
|
||||
HTTP_AUTHORIZATION='Bearer abc',
|
||||
reverse("auth-login"),
|
||||
HTTP_AUTHORIZATION="Bearer abc",
|
||||
)
|
||||
assert resp.status_code == 403
|
||||
assert resp.json()['detail'] == 'Id-Token header is expected'
|
||||
assert resp.json()["detail"] == "Id-Token header is expected"
|
||||
|
||||
|
||||
def test_login_id_token_malformed(client):
|
||||
resp = client.get(
|
||||
reverse('auth-login'),
|
||||
HTTP_AUTHORIZATION='Bearer abc',
|
||||
HTTP_ID_TOKEN='aaa',
|
||||
reverse("auth-login"),
|
||||
HTTP_AUTHORIZATION="Bearer abc",
|
||||
HTTP_ID_TOKEN="aaa",
|
||||
)
|
||||
assert resp.status_code == 403
|
||||
assert resp.json()['detail'] == 'Unable to decode the Id token header'
|
||||
assert resp.json()["detail"] == "Unable to decode the Id token header"
|
||||
|
||||
|
||||
def test_login_id_token_missing_rsa_key_id(client):
|
||||
resp = client.get(
|
||||
reverse('auth-login'),
|
||||
HTTP_AUTHORIZATION='Bearer abc',
|
||||
reverse("auth-login"),
|
||||
HTTP_AUTHORIZATION="Bearer abc",
|
||||
HTTP_ID_TOKEN=(
|
||||
# Token generated using:
|
||||
# https://jwt.io/#debugger-io
|
||||
|
@ -254,19 +254,19 @@ def test_login_id_token_missing_rsa_key_id(client):
|
|||
# "typ": "JWT"
|
||||
# }
|
||||
# (and default payload)
|
||||
'eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.'
|
||||
+ 'eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.'
|
||||
+ 'SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c'
|
||||
"eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9."
|
||||
+ "eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ."
|
||||
+ "SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c"
|
||||
),
|
||||
)
|
||||
assert resp.status_code == 403
|
||||
assert resp.json()['detail'] == 'Id token header missing RSA key ID'
|
||||
assert resp.json()["detail"] == "Id token header missing RSA key ID"
|
||||
|
||||
|
||||
def test_login_id_token_unknown_rsa_key_id(client):
|
||||
resp = client.get(
|
||||
reverse('auth-login'),
|
||||
HTTP_AUTHORIZATION='Bearer abc',
|
||||
reverse("auth-login"),
|
||||
HTTP_AUTHORIZATION="Bearer abc",
|
||||
HTTP_ID_TOKEN=(
|
||||
# Token generated using:
|
||||
# https://jwt.io/#debugger-io
|
||||
|
@ -277,19 +277,19 @@ def test_login_id_token_unknown_rsa_key_id(client):
|
|||
# "kid": "1234"
|
||||
# }
|
||||
# (and default payload)
|
||||
'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6IjEyMzQifQ.'
|
||||
+ 'eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.'
|
||||
+ 'Fghd96rsPbzEOGv0mMn4DDBf86PiW_ztPcAbDQoeA6s'
|
||||
"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6IjEyMzQifQ."
|
||||
+ "eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ."
|
||||
+ "Fghd96rsPbzEOGv0mMn4DDBf86PiW_ztPcAbDQoeA6s"
|
||||
),
|
||||
)
|
||||
assert resp.status_code == 403
|
||||
assert resp.json()['detail'] == 'Id token using unrecognised RSA key ID'
|
||||
assert resp.json()["detail"] == "Id token using unrecognised RSA key ID"
|
||||
|
||||
|
||||
def test_login_id_token_invalid_signature(client):
|
||||
resp = client.get(
|
||||
reverse('auth-login'),
|
||||
HTTP_AUTHORIZATION='Bearer foo',
|
||||
reverse("auth-login"),
|
||||
HTTP_AUTHORIZATION="Bearer foo",
|
||||
HTTP_ID_TOKEN=(
|
||||
# Token generated using:
|
||||
# https://jwt.io/#debugger-io
|
||||
|
@ -300,14 +300,14 @@ def test_login_id_token_invalid_signature(client):
|
|||
# "kid": "MkZDNDcyRkNGRTFDNjlBNjZFOEJBN0ZBNzJBQTNEMDhCMEEwNkFGOA"
|
||||
# }
|
||||
# (and default payload)
|
||||
'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6Ik1rWkRORGN5UmtOR1JURkROamxCTmp'
|
||||
+ 'aRk9FSkJOMFpCTnpKQlFUTkVNRGhDTUVFd05rRkdPQSJ9.'
|
||||
+ 'eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.'
|
||||
+ 'this_signature_is_not_valid'
|
||||
"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6Ik1rWkRORGN5UmtOR1JURkROamxCTmp"
|
||||
+ "aRk9FSkJOMFpCTnpKQlFUTkVNRGhDTUVFd05rRkdPQSJ9."
|
||||
+ "eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ."
|
||||
+ "this_signature_is_not_valid"
|
||||
),
|
||||
)
|
||||
assert resp.status_code == 403
|
||||
assert resp.json()['detail'] == 'Invalid header: Unable to parse authentication'
|
||||
assert resp.json()["detail"] == "Invalid header: Unable to parse authentication"
|
||||
|
||||
|
||||
def test_login_access_token_expiry_header_missing(client, monkeypatch):
|
||||
|
@ -315,17 +315,17 @@ def test_login_access_token_expiry_header_missing(client, monkeypatch):
|
|||
id_token_expiration_timestamp = now_in_seconds + one_day_in_seconds
|
||||
|
||||
def userinfo_mock(*args, **kwargs):
|
||||
return {'sub': 'Mozilla-LDAP', 'email': 'x@y.z', 'exp': id_token_expiration_timestamp}
|
||||
return {"sub": "Mozilla-LDAP", "email": "x@y.z", "exp": id_token_expiration_timestamp}
|
||||
|
||||
monkeypatch.setattr(AuthBackend, '_get_user_info', userinfo_mock)
|
||||
monkeypatch.setattr(AuthBackend, "_get_user_info", userinfo_mock)
|
||||
|
||||
resp = client.get(
|
||||
reverse('auth-login'),
|
||||
HTTP_AUTHORIZATION='Bearer foo',
|
||||
HTTP_ID_TOKEN='bar',
|
||||
reverse("auth-login"),
|
||||
HTTP_AUTHORIZATION="Bearer foo",
|
||||
HTTP_ID_TOKEN="bar",
|
||||
)
|
||||
assert resp.status_code == 403
|
||||
assert resp.json()['detail'] == 'Access-Token-Expires-At header is expected'
|
||||
assert resp.json()["detail"] == "Access-Token-Expires-At header is expected"
|
||||
|
||||
|
||||
def test_login_access_token_expiry_header_malformed(client, monkeypatch):
|
||||
|
@ -333,18 +333,18 @@ def test_login_access_token_expiry_header_malformed(client, monkeypatch):
|
|||
id_token_expiration_timestamp = now_in_seconds + one_day_in_seconds
|
||||
|
||||
def userinfo_mock(*args, **kwargs):
|
||||
return {'sub': 'Mozilla-LDAP', 'email': 'x@y.z', 'exp': id_token_expiration_timestamp}
|
||||
return {"sub": "Mozilla-LDAP", "email": "x@y.z", "exp": id_token_expiration_timestamp}
|
||||
|
||||
monkeypatch.setattr(AuthBackend, '_get_user_info', userinfo_mock)
|
||||
monkeypatch.setattr(AuthBackend, "_get_user_info", userinfo_mock)
|
||||
|
||||
resp = client.get(
|
||||
reverse('auth-login'),
|
||||
HTTP_AUTHORIZATION='Bearer foo',
|
||||
HTTP_ID_TOKEN='bar',
|
||||
HTTP_ACCESS_TOKEN_EXPIRES_AT='aaa',
|
||||
reverse("auth-login"),
|
||||
HTTP_AUTHORIZATION="Bearer foo",
|
||||
HTTP_ID_TOKEN="bar",
|
||||
HTTP_ACCESS_TOKEN_EXPIRES_AT="aaa",
|
||||
)
|
||||
assert resp.status_code == 403
|
||||
assert resp.json()['detail'] == 'Access-Token-Expires-At header value is invalid'
|
||||
assert resp.json()["detail"] == "Access-Token-Expires-At header value is invalid"
|
||||
|
||||
|
||||
def test_login_access_token_expired(client, monkeypatch):
|
||||
|
@ -353,18 +353,18 @@ def test_login_access_token_expired(client, monkeypatch):
|
|||
access_token_expiration_timestamp = now_in_seconds - 30
|
||||
|
||||
def userinfo_mock(*args, **kwargs):
|
||||
return {'sub': 'Mozilla-LDAP', 'email': 'x@y.z', 'exp': id_token_expiration_timestamp}
|
||||
return {"sub": "Mozilla-LDAP", "email": "x@y.z", "exp": id_token_expiration_timestamp}
|
||||
|
||||
monkeypatch.setattr(AuthBackend, '_get_user_info', userinfo_mock)
|
||||
monkeypatch.setattr(AuthBackend, "_get_user_info", userinfo_mock)
|
||||
|
||||
resp = client.get(
|
||||
reverse('auth-login'),
|
||||
HTTP_AUTHORIZATION='Bearer foo',
|
||||
HTTP_ID_TOKEN='bar',
|
||||
reverse("auth-login"),
|
||||
HTTP_AUTHORIZATION="Bearer foo",
|
||||
HTTP_ID_TOKEN="bar",
|
||||
HTTP_ACCESS_TOKEN_EXPIRES_AT=str(access_token_expiration_timestamp),
|
||||
)
|
||||
assert resp.status_code == 403
|
||||
assert resp.json()['detail'] == 'Session expiry time has already passed!'
|
||||
assert resp.json()["detail"] == "Session expiry time has already passed!"
|
||||
|
||||
|
||||
def test_login_id_token_expires_before_access_token(test_ldap_user, client, monkeypatch):
|
||||
|
@ -377,14 +377,14 @@ def test_login_id_token_expires_before_access_token(test_ldap_user, client, monk
|
|||
access_token_expiration_timestamp = now_in_seconds + one_day_in_seconds
|
||||
|
||||
def userinfo_mock(*args, **kwargs):
|
||||
return {'sub': 'email', 'email': test_ldap_user.email, 'exp': id_token_expiration_timestamp}
|
||||
return {"sub": "email", "email": test_ldap_user.email, "exp": id_token_expiration_timestamp}
|
||||
|
||||
monkeypatch.setattr(AuthBackend, '_get_user_info', userinfo_mock)
|
||||
monkeypatch.setattr(AuthBackend, "_get_user_info", userinfo_mock)
|
||||
|
||||
resp = client.get(
|
||||
reverse('auth-login'),
|
||||
HTTP_AUTHORIZATION='Bearer meh',
|
||||
HTTP_ID_TOKEN='meh',
|
||||
reverse("auth-login"),
|
||||
HTTP_AUTHORIZATION="Bearer meh",
|
||||
HTTP_ID_TOKEN="meh",
|
||||
HTTP_ACCESS_TOKEN_EXPIRES_AT=str(access_token_expiration_timestamp),
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
|
|
|
@ -10,149 +10,149 @@ from treeherder.model.models import BugzillaComponent, FilesBugzillaMap
|
|||
|
||||
|
||||
def test_bugzilla_components_for_path(client, test_job):
|
||||
BugzillaComponent.objects.create(product='Mock Product 1', component='Mock Component 1')
|
||||
BugzillaComponent.objects.create(product="Mock Product 1", component="Mock Component 1")
|
||||
|
||||
FilesBugzillaMap.objects.create(
|
||||
path='mock/folder/file_1.extension',
|
||||
file_name='file_1.extension',
|
||||
path="mock/folder/file_1.extension",
|
||||
file_name="file_1.extension",
|
||||
bugzilla_component=BugzillaComponent.objects.last(),
|
||||
)
|
||||
|
||||
URL_BASE = reverse('bugzilla-component-list')
|
||||
URL_BASE = reverse("bugzilla-component-list")
|
||||
|
||||
EXPECTED_MOCK1 = [{'product': 'Mock Product 1', 'component': 'Mock Component 1'}]
|
||||
EXPECTED_MOCK1 = [{"product": "Mock Product 1", "component": "Mock Component 1"}]
|
||||
|
||||
resp = client.get(URL_BASE + '?path=file_1.extension')
|
||||
resp = client.get(URL_BASE + "?path=file_1.extension")
|
||||
assert resp.status_code == 200
|
||||
assert resp.json() == EXPECTED_MOCK1
|
||||
|
||||
resp = client.get(URL_BASE + '?path=file_2.extension')
|
||||
resp = client.get(URL_BASE + "?path=file_2.extension")
|
||||
assert resp.json() == []
|
||||
|
||||
resp = client.get(URL_BASE + '?path=ile_2.extension')
|
||||
resp = client.get(URL_BASE + "?path=ile_2.extension")
|
||||
assert resp.json() == []
|
||||
|
||||
resp = client.get(URL_BASE + '?path=file_1')
|
||||
resp = client.get(URL_BASE + "?path=file_1")
|
||||
assert resp.json() == EXPECTED_MOCK1
|
||||
|
||||
resp = client.get(URL_BASE + '?path=mock/folder/file_1.extension')
|
||||
resp = client.get(URL_BASE + "?path=mock/folder/file_1.extension")
|
||||
assert resp.json() == EXPECTED_MOCK1
|
||||
|
||||
resp = client.get(URL_BASE + '?path=other_mock/other_folder/file_1.extension')
|
||||
resp = client.get(URL_BASE + "?path=other_mock/other_folder/file_1.extension")
|
||||
# Should also pass because search falls back to file name if no match for path.
|
||||
assert resp.json() == EXPECTED_MOCK1
|
||||
|
||||
resp = client.get(URL_BASE + '?path=folder/file_1.extension')
|
||||
resp = client.get(URL_BASE + "?path=folder/file_1.extension")
|
||||
assert resp.json() == EXPECTED_MOCK1
|
||||
|
||||
resp = client.get(URL_BASE + '?path=folder/file_1.other_extension')
|
||||
resp = client.get(URL_BASE + "?path=folder/file_1.other_extension")
|
||||
assert resp.json() == EXPECTED_MOCK1
|
||||
|
||||
resp = client.get(URL_BASE + '?path=completely.unrelated')
|
||||
resp = client.get(URL_BASE + "?path=completely.unrelated")
|
||||
assert resp.json() == []
|
||||
|
||||
BugzillaComponent.objects.create(product='Mock Product 1', component='Mock Component 2')
|
||||
BugzillaComponent.objects.create(product="Mock Product 1", component="Mock Component 2")
|
||||
|
||||
FilesBugzillaMap.objects.create(
|
||||
path='mock/folder_2/file_1.extension',
|
||||
file_name='file_1.extension',
|
||||
path="mock/folder_2/file_1.extension",
|
||||
file_name="file_1.extension",
|
||||
bugzilla_component=BugzillaComponent.objects.last(),
|
||||
)
|
||||
|
||||
EXPECTED_MOCK2 = [{'product': 'Mock Product 1', 'component': 'Mock Component 2'}]
|
||||
EXPECTED_MOCK2 = [{"product": "Mock Product 1", "component": "Mock Component 2"}]
|
||||
|
||||
EXPECTED_MOCK1_MOCK2 = [
|
||||
{'product': 'Mock Product 1', 'component': 'Mock Component 1'},
|
||||
{'product': 'Mock Product 1', 'component': 'Mock Component 2'},
|
||||
{"product": "Mock Product 1", "component": "Mock Component 1"},
|
||||
{"product": "Mock Product 1", "component": "Mock Component 2"},
|
||||
]
|
||||
|
||||
resp = client.get(URL_BASE + '?path=file_1.extension')
|
||||
resp = client.get(URL_BASE + "?path=file_1.extension")
|
||||
assert resp.json() == EXPECTED_MOCK1_MOCK2
|
||||
|
||||
resp = client.get(URL_BASE + '?path=mock/folder/file_1.extension')
|
||||
resp = client.get(URL_BASE + "?path=mock/folder/file_1.extension")
|
||||
assert resp.json() == EXPECTED_MOCK1
|
||||
|
||||
resp = client.get(URL_BASE + '?path=mock/folder_2/file_1.extension')
|
||||
resp = client.get(URL_BASE + "?path=mock/folder_2/file_1.extension")
|
||||
assert resp.json() == EXPECTED_MOCK2
|
||||
|
||||
resp = client.get(URL_BASE + '?path=other_mock/other_folder/file_1.extension')
|
||||
resp = client.get(URL_BASE + "?path=other_mock/other_folder/file_1.extension")
|
||||
# Should also pass because search falls back to file name if no match for path.
|
||||
assert resp.json() == EXPECTED_MOCK1_MOCK2
|
||||
|
||||
BugzillaComponent.objects.create(product='Mock Product 3', component='Mock Component 3')
|
||||
BugzillaComponent.objects.create(product="Mock Product 3", component="Mock Component 3")
|
||||
|
||||
FilesBugzillaMap.objects.create(
|
||||
path='mock_3/folder_3/other.file.js',
|
||||
file_name='other.file.js',
|
||||
path="mock_3/folder_3/other.file.js",
|
||||
file_name="other.file.js",
|
||||
bugzilla_component=BugzillaComponent.objects.last(),
|
||||
)
|
||||
|
||||
EXPECTED_MOCK3 = [{'product': 'Mock Product 3', 'component': 'Mock Component 3'}]
|
||||
EXPECTED_MOCK3 = [{"product": "Mock Product 3", "component": "Mock Component 3"}]
|
||||
|
||||
resp = client.get(URL_BASE + '?path=other.file.js')
|
||||
resp = client.get(URL_BASE + "?path=other.file.js")
|
||||
assert resp.json() == EXPECTED_MOCK3
|
||||
|
||||
resp = client.get(URL_BASE + '?path=other.file')
|
||||
resp = client.get(URL_BASE + "?path=other.file")
|
||||
assert resp.json() == EXPECTED_MOCK3
|
||||
|
||||
resp = client.get(URL_BASE + '?path=other')
|
||||
resp = client.get(URL_BASE + "?path=other")
|
||||
assert resp.json() == EXPECTED_MOCK3
|
||||
|
||||
BugzillaComponent.objects.create(product='Mock Product 4', component='Mock Component 4')
|
||||
BugzillaComponent.objects.create(product="Mock Product 4", component="Mock Component 4")
|
||||
|
||||
FilesBugzillaMap.objects.create(
|
||||
path='mock_3/folder_3/other.extension',
|
||||
file_name='other.extension',
|
||||
path="mock_3/folder_3/other.extension",
|
||||
file_name="other.extension",
|
||||
bugzilla_component=BugzillaComponent.objects.last(),
|
||||
)
|
||||
|
||||
EXPECTED_MOCK4 = [{'product': 'Mock Product 4', 'component': 'Mock Component 4'}]
|
||||
EXPECTED_MOCK4 = [{"product": "Mock Product 4", "component": "Mock Component 4"}]
|
||||
|
||||
EXPECTED_MOCK3_MOCK4 = [
|
||||
{'product': 'Mock Product 3', 'component': 'Mock Component 3'},
|
||||
{'product': 'Mock Product 4', 'component': 'Mock Component 4'},
|
||||
{"product": "Mock Product 3", "component": "Mock Component 3"},
|
||||
{"product": "Mock Product 4", "component": "Mock Component 4"},
|
||||
]
|
||||
|
||||
resp = client.get(URL_BASE + '?path=other.file.js')
|
||||
resp = client.get(URL_BASE + "?path=other.file.js")
|
||||
assert resp.json() == EXPECTED_MOCK3
|
||||
|
||||
resp = client.get(URL_BASE + '?path=other.extension')
|
||||
resp = client.get(URL_BASE + "?path=other.extension")
|
||||
assert resp.json() == EXPECTED_MOCK4
|
||||
|
||||
resp = client.get(URL_BASE + '?path=other')
|
||||
resp = client.get(URL_BASE + "?path=other")
|
||||
assert resp.json() == EXPECTED_MOCK3_MOCK4
|
||||
|
||||
resp = client.get(URL_BASE + '?path=another')
|
||||
resp = client.get(URL_BASE + "?path=another")
|
||||
assert resp.json() == []
|
||||
|
||||
BugzillaComponent.objects.create(
|
||||
product='Mock Product org.mozilla.*.<TestName>', component='Mock Component File Match'
|
||||
product="Mock Product org.mozilla.*.<TestName>", component="Mock Component File Match"
|
||||
)
|
||||
|
||||
FilesBugzillaMap.objects.create(
|
||||
path='parent/folder/org/mozilla/geckoview/test/MockTestName.kt',
|
||||
file_name='MockTestName.kt',
|
||||
path="parent/folder/org/mozilla/geckoview/test/MockTestName.kt",
|
||||
file_name="MockTestName.kt",
|
||||
bugzilla_component=BugzillaComponent.objects.last(),
|
||||
)
|
||||
|
||||
BugzillaComponent.objects.create(
|
||||
product='Mock Product org.mozilla.*.<TestName>', component='Mock Component No File Match'
|
||||
product="Mock Product org.mozilla.*.<TestName>", component="Mock Component No File Match"
|
||||
)
|
||||
|
||||
FilesBugzillaMap.objects.create(
|
||||
path='parent/folder/org/mozilla/geckoview/test/OtherName.kt',
|
||||
file_name='OtherName.kt',
|
||||
path="parent/folder/org/mozilla/geckoview/test/OtherName.kt",
|
||||
file_name="OtherName.kt",
|
||||
bugzilla_component=BugzillaComponent.objects.last(),
|
||||
)
|
||||
|
||||
BugzillaComponent.objects.create(
|
||||
product='Mock Product org.mozilla.*.<TestName>',
|
||||
component='Mock Component No File Match For Subtest',
|
||||
product="Mock Product org.mozilla.*.<TestName>",
|
||||
component="Mock Component No File Match For Subtest",
|
||||
)
|
||||
|
||||
FilesBugzillaMap.objects.create(
|
||||
path='parent/folder/org/mozilla/geckoview/test/Subtest.kt',
|
||||
file_name='Subtest.kt',
|
||||
path="parent/folder/org/mozilla/geckoview/test/Subtest.kt",
|
||||
file_name="Subtest.kt",
|
||||
bugzilla_component=BugzillaComponent.objects.last(),
|
||||
)
|
||||
|
||||
|
@ -161,33 +161,33 @@ def test_bugzilla_components_for_path(client, test_job):
|
|||
)
|
||||
|
||||
FilesBugzillaMap.objects.create(
|
||||
path='other/folder/org.html',
|
||||
file_name='org.html',
|
||||
path="other/folder/org.html",
|
||||
file_name="org.html",
|
||||
bugzilla_component=BugzillaComponent.objects.last(),
|
||||
)
|
||||
|
||||
EXPECTED_MOCK_ORG_MOZILLA = [
|
||||
{
|
||||
'product': 'Mock Product org.mozilla.*.<TestName>',
|
||||
'component': 'Mock Component File Match',
|
||||
"product": "Mock Product org.mozilla.*.<TestName>",
|
||||
"component": "Mock Component File Match",
|
||||
}
|
||||
]
|
||||
|
||||
resp = client.get(URL_BASE + '?path=org.mozilla.geckoview.test.MockTestName#Subtest')
|
||||
resp = client.get(URL_BASE + "?path=org.mozilla.geckoview.test.MockTestName#Subtest")
|
||||
assert resp.json() == EXPECTED_MOCK_ORG_MOZILLA
|
||||
|
||||
# Only take test name into account.
|
||||
resp = client.get(URL_BASE + '?path=org.mozilla.otherproduct.otherfolder.MockTestName')
|
||||
resp = client.get(URL_BASE + "?path=org.mozilla.otherproduct.otherfolder.MockTestName")
|
||||
assert resp.json() == EXPECTED_MOCK_ORG_MOZILLA
|
||||
|
||||
BugzillaComponent.objects.create(product='Testing', component='Mochitest')
|
||||
BugzillaComponent.objects.create(product="Testing", component="Mochitest")
|
||||
|
||||
FilesBugzillaMap.objects.create(
|
||||
path='mock/mochitest/mochitest.test',
|
||||
file_name='mochitest.test',
|
||||
path="mock/mochitest/mochitest.test",
|
||||
file_name="mochitest.test",
|
||||
bugzilla_component=BugzillaComponent.objects.last(),
|
||||
)
|
||||
|
||||
# Respect the ignore list of product and component combinations.
|
||||
resp = client.get(URL_BASE + '?path=mock/mochitest/mochitest.test')
|
||||
resp = client.get(URL_BASE + "?path=mock/mochitest/mochitest.test")
|
||||
assert resp.json() == []
|
||||
|
|
|
@ -7,7 +7,7 @@ from treeherder.model.models import BugJobMap, Job
|
|||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'test_no_auth,test_duplicate_handling', [(True, False), (False, False), (False, True)]
|
||||
"test_no_auth,test_duplicate_handling", [(True, False), (False, False), (False, True)]
|
||||
)
|
||||
def test_create_bug_job_map(
|
||||
client, test_job, test_user, bugs, test_no_auth, test_duplicate_handling
|
||||
|
@ -19,7 +19,7 @@ def test_create_bug_job_map(
|
|||
if not test_no_auth:
|
||||
client.force_authenticate(user=test_user)
|
||||
|
||||
submit_obj = {u"job_id": test_job.id, u"bug_id": bug.id, u"type": u"manual"}
|
||||
submit_obj = {"job_id": test_job.id, "bug_id": bug.id, "type": "manual"}
|
||||
|
||||
# if testing duplicate handling, submit twice
|
||||
if test_duplicate_handling:
|
||||
|
@ -40,8 +40,8 @@ def test_create_bug_job_map(
|
|||
assert BugJobMap.objects.count() == 1
|
||||
bug_job_map = BugJobMap.objects.first()
|
||||
|
||||
assert bug_job_map.job_id == submit_obj['job_id']
|
||||
assert bug_job_map.bug_id == submit_obj['bug_id']
|
||||
assert bug_job_map.job_id == submit_obj["job_id"]
|
||||
assert bug_job_map.bug_id == submit_obj["bug_id"]
|
||||
assert bug_job_map.user == test_user
|
||||
|
||||
|
||||
|
@ -73,10 +73,10 @@ def test_bug_job_map_list(client, test_repository, eleven_jobs_stored, test_user
|
|||
for job_range in [(0, 1), (0, 2), (0, 9)]:
|
||||
resp = client.get(
|
||||
reverse("bug-job-map-list", kwargs={"project": test_repository.name}),
|
||||
data={'job_id': [job.id for job in jobs[job_range[0] : job_range[1]]]},
|
||||
data={"job_id": [job.id for job in jobs[job_range[0] : job_range[1]]]},
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
buglist = sorted(resp.json(), key=lambda i: i['bug_id'])
|
||||
buglist = sorted(resp.json(), key=lambda i: i["bug_id"])
|
||||
|
||||
assert buglist == expected[job_range[0] : job_range[1]]
|
||||
|
||||
|
@ -111,7 +111,7 @@ def test_bug_job_map_detail(client, eleven_jobs_stored, test_repository, test_us
|
|||
assert resp.json() == expected
|
||||
|
||||
|
||||
@pytest.mark.parametrize('test_no_auth', [True, False])
|
||||
@pytest.mark.parametrize("test_no_auth", [True, False])
|
||||
def test_bug_job_map_delete(
|
||||
client, eleven_jobs_stored, test_repository, test_user, test_no_auth, bugs
|
||||
):
|
||||
|
@ -153,8 +153,8 @@ def test_bug_job_map_bad_job_id(client, test_repository):
|
|||
|
||||
resp = client.get(
|
||||
reverse("bug-job-map-list", kwargs={"project": test_repository.name}),
|
||||
data={'job_id': bad_job_id},
|
||||
data={"job_id": bad_job_id},
|
||||
)
|
||||
|
||||
assert resp.status_code == 400
|
||||
assert resp.json() == {'message': 'Valid job_id required'}
|
||||
assert resp.json() == {"message": "Valid job_id required"}
|
||||
|
|
|
@ -15,17 +15,17 @@ def test_create_bug(client, eleven_jobs_stored, activate_responses, test_user):
|
|||
headers = {}
|
||||
requestdata = json.loads(request.body)
|
||||
requestheaders = request.headers
|
||||
assert requestheaders['x-bugzilla-api-key'] == "12345helloworld"
|
||||
assert requestdata['type'] == "defect"
|
||||
assert requestdata['product'] == "Bugzilla"
|
||||
assert requestdata['description'] == u"**Filed by:** {}\nIntermittent Description".format(
|
||||
test_user.email.replace('@', " [at] ")
|
||||
assert requestheaders["x-bugzilla-api-key"] == "12345helloworld"
|
||||
assert requestdata["type"] == "defect"
|
||||
assert requestdata["product"] == "Bugzilla"
|
||||
assert requestdata["description"] == "**Filed by:** {}\nIntermittent Description".format(
|
||||
test_user.email.replace("@", " [at] ")
|
||||
)
|
||||
assert requestdata['component'] == "Administration"
|
||||
assert requestdata['summary'] == u"Intermittent summary"
|
||||
assert requestdata['comment_tags'] == "treeherder"
|
||||
assert requestdata['version'] == "4.0.17"
|
||||
assert requestdata['keywords'] == ["intermittent-failure"]
|
||||
assert requestdata["component"] == "Administration"
|
||||
assert requestdata["summary"] == "Intermittent summary"
|
||||
assert requestdata["comment_tags"] == "treeherder"
|
||||
assert requestdata["version"] == "4.0.17"
|
||||
assert requestdata["keywords"] == ["intermittent-failure"]
|
||||
resp_body = {"id": 323}
|
||||
return (200, headers, json.dumps(resp_body))
|
||||
|
||||
|
@ -44,17 +44,17 @@ def test_create_bug(client, eleven_jobs_stored, activate_responses, test_user):
|
|||
"type": "defect",
|
||||
"product": "Bugzilla",
|
||||
"component": "Administration",
|
||||
"summary": u"Intermittent summary",
|
||||
"summary": "Intermittent summary",
|
||||
"version": "4.0.17",
|
||||
"comment": u"Intermittent Description",
|
||||
"comment": "Intermittent Description",
|
||||
"comment_tags": "treeherder",
|
||||
"keywords": ["intermittent-failure"],
|
||||
"is_security_issue": False,
|
||||
},
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert resp.json()['id'] == 323
|
||||
assert resp.json()['url'] == 'https://thisisnotbugzilla.org/show_bug.cgi?id=323'
|
||||
assert resp.json()["id"] == 323
|
||||
assert resp.json()["url"] == "https://thisisnotbugzilla.org/show_bug.cgi?id=323"
|
||||
|
||||
|
||||
def test_create_bug_with_unicode(client, eleven_jobs_stored, activate_responses, test_user):
|
||||
|
@ -66,19 +66,19 @@ def test_create_bug_with_unicode(client, eleven_jobs_stored, activate_responses,
|
|||
headers = {}
|
||||
requestdata = json.loads(request.body)
|
||||
requestheaders = request.headers
|
||||
assert requestheaders['x-bugzilla-api-key'] == "12345helloworld"
|
||||
assert requestdata['type'] == "defect"
|
||||
assert requestdata['product'] == "Bugzilla"
|
||||
assert requestheaders["x-bugzilla-api-key"] == "12345helloworld"
|
||||
assert requestdata["type"] == "defect"
|
||||
assert requestdata["product"] == "Bugzilla"
|
||||
assert requestdata[
|
||||
'description'
|
||||
] == u"**Filed by:** {}\nIntermittent “description” string".format(
|
||||
test_user.email.replace('@', " [at] ")
|
||||
"description"
|
||||
] == "**Filed by:** {}\nIntermittent “description” string".format(
|
||||
test_user.email.replace("@", " [at] ")
|
||||
)
|
||||
assert requestdata['component'] == "Administration"
|
||||
assert requestdata['summary'] == u"Intermittent “summary”"
|
||||
assert requestdata['comment_tags'] == "treeherder"
|
||||
assert requestdata['version'] == "4.0.17"
|
||||
assert requestdata['keywords'] == ["intermittent-failure"]
|
||||
assert requestdata["component"] == "Administration"
|
||||
assert requestdata["summary"] == "Intermittent “summary”"
|
||||
assert requestdata["comment_tags"] == "treeherder"
|
||||
assert requestdata["version"] == "4.0.17"
|
||||
assert requestdata["keywords"] == ["intermittent-failure"]
|
||||
resp_body = {"id": 323}
|
||||
return (200, headers, json.dumps(resp_body))
|
||||
|
||||
|
@ -97,16 +97,16 @@ def test_create_bug_with_unicode(client, eleven_jobs_stored, activate_responses,
|
|||
"type": "defect",
|
||||
"product": "Bugzilla",
|
||||
"component": "Administration",
|
||||
"summary": u"Intermittent “summary”",
|
||||
"summary": "Intermittent “summary”",
|
||||
"version": "4.0.17",
|
||||
"comment": u"Intermittent “description” string",
|
||||
"comment": "Intermittent “description” string",
|
||||
"comment_tags": "treeherder",
|
||||
"keywords": ["intermittent-failure"],
|
||||
"is_security_issue": False,
|
||||
},
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert resp.json()['id'] == 323
|
||||
assert resp.json()["id"] == 323
|
||||
|
||||
|
||||
def test_create_crash_bug(client, eleven_jobs_stored, activate_responses, test_user):
|
||||
|
@ -118,19 +118,19 @@ def test_create_crash_bug(client, eleven_jobs_stored, activate_responses, test_u
|
|||
headers = {}
|
||||
requestdata = json.loads(request.body)
|
||||
requestheaders = request.headers
|
||||
assert requestheaders['x-bugzilla-api-key'] == "12345helloworld"
|
||||
assert requestdata['type'] == "defect"
|
||||
assert requestdata['product'] == "Bugzilla"
|
||||
assert requestdata['description'] == u"**Filed by:** {}\nIntermittent Description".format(
|
||||
test_user.email.replace('@', " [at] ")
|
||||
assert requestheaders["x-bugzilla-api-key"] == "12345helloworld"
|
||||
assert requestdata["type"] == "defect"
|
||||
assert requestdata["product"] == "Bugzilla"
|
||||
assert requestdata["description"] == "**Filed by:** {}\nIntermittent Description".format(
|
||||
test_user.email.replace("@", " [at] ")
|
||||
)
|
||||
assert requestdata['component'] == "Administration"
|
||||
assert requestdata['summary'] == u"Intermittent summary"
|
||||
assert requestdata['comment_tags'] == "treeherder"
|
||||
assert requestdata['version'] == "4.0.17"
|
||||
assert requestdata['keywords'] == ["intermittent-failure", "crash"]
|
||||
assert requestdata['cf_crash_signature'] == "[@crashsig]"
|
||||
assert requestdata['priority'] == '--'
|
||||
assert requestdata["component"] == "Administration"
|
||||
assert requestdata["summary"] == "Intermittent summary"
|
||||
assert requestdata["comment_tags"] == "treeherder"
|
||||
assert requestdata["version"] == "4.0.17"
|
||||
assert requestdata["keywords"] == ["intermittent-failure", "crash"]
|
||||
assert requestdata["cf_crash_signature"] == "[@crashsig]"
|
||||
assert requestdata["priority"] == "--"
|
||||
resp_body = {"id": 323}
|
||||
return (200, headers, json.dumps(resp_body))
|
||||
|
||||
|
@ -149,9 +149,9 @@ def test_create_crash_bug(client, eleven_jobs_stored, activate_responses, test_u
|
|||
"type": "defect",
|
||||
"product": "Bugzilla",
|
||||
"component": "Administration",
|
||||
"summary": u"Intermittent summary",
|
||||
"summary": "Intermittent summary",
|
||||
"version": "4.0.17",
|
||||
"comment": u"Intermittent Description",
|
||||
"comment": "Intermittent Description",
|
||||
"comment_tags": "treeherder",
|
||||
"crash_signature": "[@crashsig]",
|
||||
"priority": "--",
|
||||
|
@ -160,7 +160,7 @@ def test_create_crash_bug(client, eleven_jobs_stored, activate_responses, test_u
|
|||
},
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert resp.json()['id'] == 323
|
||||
assert resp.json()["id"] == 323
|
||||
|
||||
|
||||
def test_create_unauthenticated_bug(client, eleven_jobs_stored, activate_responses):
|
||||
|
@ -172,16 +172,16 @@ def test_create_unauthenticated_bug(client, eleven_jobs_stored, activate_respons
|
|||
headers = {}
|
||||
requestdata = json.loads(request.body)
|
||||
requestheaders = request.headers
|
||||
assert requestheaders['x-bugzilla-api-key'] == "12345helloworld"
|
||||
assert requestdata['type'] == "defect"
|
||||
assert requestdata['product'] == "Bugzilla"
|
||||
assert requestdata['description'] == u"**Filed by:** MyName\nIntermittent Description"
|
||||
assert requestdata['component'] == "Administration"
|
||||
assert requestdata['summary'] == u"Intermittent summary"
|
||||
assert requestdata['comment_tags'] == "treeherder"
|
||||
assert requestdata['version'] == "4.0.17"
|
||||
assert requestdata['keywords'] == ["intermittent-failure"]
|
||||
assert requestdata['see_also'] == "12345"
|
||||
assert requestheaders["x-bugzilla-api-key"] == "12345helloworld"
|
||||
assert requestdata["type"] == "defect"
|
||||
assert requestdata["product"] == "Bugzilla"
|
||||
assert requestdata["description"] == "**Filed by:** MyName\nIntermittent Description"
|
||||
assert requestdata["component"] == "Administration"
|
||||
assert requestdata["summary"] == "Intermittent summary"
|
||||
assert requestdata["comment_tags"] == "treeherder"
|
||||
assert requestdata["version"] == "4.0.17"
|
||||
assert requestdata["keywords"] == ["intermittent-failure"]
|
||||
assert requestdata["see_also"] == "12345"
|
||||
resp_body = {"id": 323}
|
||||
return (200, headers, json.dumps(resp_body))
|
||||
|
||||
|
@ -198,9 +198,9 @@ def test_create_unauthenticated_bug(client, eleven_jobs_stored, activate_respons
|
|||
"type": "defect",
|
||||
"product": "Bugzilla",
|
||||
"component": "Administration",
|
||||
"summary": u"Intermittent summary",
|
||||
"summary": "Intermittent summary",
|
||||
"version": "4.0.17",
|
||||
"comment": u"Intermittent Description",
|
||||
"comment": "Intermittent Description",
|
||||
"comment_tags": "treeherder",
|
||||
"keywords": ["intermittent-failure"],
|
||||
"see_also": "12345",
|
||||
|
@ -208,7 +208,7 @@ def test_create_unauthenticated_bug(client, eleven_jobs_stored, activate_respons
|
|||
},
|
||||
)
|
||||
assert resp.status_code == 403
|
||||
assert resp.json()['detail'] == "Authentication credentials were not provided."
|
||||
assert resp.json()["detail"] == "Authentication credentials were not provided."
|
||||
|
||||
|
||||
def test_create_bug_with_long_crash_signature(
|
||||
|
@ -222,18 +222,18 @@ def test_create_bug_with_long_crash_signature(
|
|||
headers = {}
|
||||
requestdata = json.loads(request.body)
|
||||
requestheaders = request.headers
|
||||
assert requestheaders['x-bugzilla-api-key'] == "12345helloworld"
|
||||
assert requestdata['type'] == "defect"
|
||||
assert requestdata['product'] == "Bugzilla"
|
||||
assert requestdata['description'] == u"**Filed by:** MyName\nIntermittent Description"
|
||||
assert requestdata['component'] == "Administration"
|
||||
assert requestdata['summary'] == u"Intermittent summary"
|
||||
assert requestdata['comment_tags'] == "treeherder"
|
||||
assert requestdata['version'] == "4.0.17"
|
||||
assert requestdata['keywords'] == ["intermittent-failure", "regression"]
|
||||
assert requestdata['cf_crash_signature'] == "[@crashsig]"
|
||||
assert requestdata['regressed_by'] == "123"
|
||||
assert requestdata['see_also'] == "12345"
|
||||
assert requestheaders["x-bugzilla-api-key"] == "12345helloworld"
|
||||
assert requestdata["type"] == "defect"
|
||||
assert requestdata["product"] == "Bugzilla"
|
||||
assert requestdata["description"] == "**Filed by:** MyName\nIntermittent Description"
|
||||
assert requestdata["component"] == "Administration"
|
||||
assert requestdata["summary"] == "Intermittent summary"
|
||||
assert requestdata["comment_tags"] == "treeherder"
|
||||
assert requestdata["version"] == "4.0.17"
|
||||
assert requestdata["keywords"] == ["intermittent-failure", "regression"]
|
||||
assert requestdata["cf_crash_signature"] == "[@crashsig]"
|
||||
assert requestdata["regressed_by"] == "123"
|
||||
assert requestdata["see_also"] == "12345"
|
||||
resp_body = {"id": 323}
|
||||
return (200, headers, json.dumps(resp_body))
|
||||
|
||||
|
@ -246,16 +246,16 @@ def test_create_bug_with_long_crash_signature(
|
|||
|
||||
client.force_authenticate(user=test_user)
|
||||
|
||||
crashsig = 'x' * 2050
|
||||
crashsig = "x" * 2050
|
||||
resp = client.post(
|
||||
reverse("bugzilla-create-bug"),
|
||||
{
|
||||
"type": "defect",
|
||||
"product": "Bugzilla",
|
||||
"component": "Administration",
|
||||
"summary": u"Intermittent summary",
|
||||
"summary": "Intermittent summary",
|
||||
"version": "4.0.17",
|
||||
"comment": u"Intermittent Description",
|
||||
"comment": "Intermittent Description",
|
||||
"comment_tags": "treeherder",
|
||||
"keywords": ["intermittent-failure", "regression"],
|
||||
"crash_signature": crashsig,
|
||||
|
@ -265,4 +265,4 @@ def test_create_bug_with_long_crash_signature(
|
|||
},
|
||||
)
|
||||
assert resp.status_code == 400
|
||||
assert resp.json()['failure'] == "Crash signature can't be more than 2048 characters."
|
||||
assert resp.json()["failure"] == "Crash signature can't be more than 2048 characters."
|
||||
|
|
|
@ -6,28 +6,28 @@ from django.urls import reverse
|
|||
def test_valid_report(client):
|
||||
"""Tests that a correctly formed CSP violation report is accepted when unauthenticated."""
|
||||
valid_report = {
|
||||
'csp-report': {
|
||||
'blocked-uri': 'https://treestatus.mozilla-releng.net/trees/autoland',
|
||||
'document-uri': 'http://localhost:8000/',
|
||||
'original-policy': '...',
|
||||
'referrer': '',
|
||||
'violated-directive': 'connect-src',
|
||||
"csp-report": {
|
||||
"blocked-uri": "https://treestatus.mozilla-releng.net/trees/autoland",
|
||||
"document-uri": "http://localhost:8000/",
|
||||
"original-policy": "...",
|
||||
"referrer": "",
|
||||
"violated-directive": "connect-src",
|
||||
}
|
||||
}
|
||||
response = client.post(
|
||||
reverse('csp-report'),
|
||||
reverse("csp-report"),
|
||||
data=json.dumps(valid_report),
|
||||
content_type='application/csp-report',
|
||||
content_type="application/csp-report",
|
||||
)
|
||||
assert response.status_code == 200
|
||||
|
||||
|
||||
def test_invalid_report(client):
|
||||
"""Test that badly formed reports are gracefully handled."""
|
||||
invalid_report = 'bad'
|
||||
invalid_report = "bad"
|
||||
response = client.post(
|
||||
reverse('csp-report'),
|
||||
reverse("csp-report"),
|
||||
data=json.dumps(invalid_report),
|
||||
content_type='application/csp-report',
|
||||
content_type="application/csp-report",
|
||||
)
|
||||
assert response.status_code == 400
|
||||
|
|
|
@ -8,7 +8,7 @@ def test_future_date(group_data, client):
|
|||
|
||||
today = datetime.datetime.today().date()
|
||||
tomorrow = today + datetime.timedelta(days=1)
|
||||
url = reverse('groupsummary') + "?startdate=%s" % tomorrow
|
||||
url = reverse("groupsummary") + "?startdate=%s" % tomorrow
|
||||
resp = client.get(url)
|
||||
assert resp.status_code == 200
|
||||
assert resp.json() == expected
|
||||
|
@ -18,7 +18,7 @@ def test_future_date(group_data, client):
|
|||
def test_default_date(group_data, client):
|
||||
expected = {"job_type_names": [], "manifests": []}
|
||||
|
||||
url = reverse('groupsummary')
|
||||
url = reverse("groupsummary")
|
||||
resp = client.get(url)
|
||||
assert resp.status_code == 200
|
||||
assert resp.json() == expected
|
||||
|
@ -27,8 +27,8 @@ def test_default_date(group_data, client):
|
|||
# test data, summarized by manifest
|
||||
# test jobname chunk removal and aggregation
|
||||
def test_summarized(group_data, client):
|
||||
expected = group_data['expected']
|
||||
url = reverse('groupsummary') + "?startdate=%s" % str(group_data['date']).split(' ')[0]
|
||||
expected = group_data["expected"]
|
||||
url = reverse("groupsummary") + "?startdate=%s" % str(group_data["date"]).split(" ")[0]
|
||||
resp = client.get(url)
|
||||
assert resp.status_code == 200
|
||||
assert resp.json() == expected
|
||||
|
|
|
@ -4,9 +4,9 @@ from treeherder.model.models import BugJobMap
|
|||
|
||||
|
||||
def test_failures(bug_data, client):
|
||||
expected = [{'bug_count': 1, 'bug_id': bug_data['bug_id']}]
|
||||
expected = [{"bug_count": 1, "bug_id": bug_data["bug_id"]}]
|
||||
|
||||
resp = client.get(reverse('failures') + bug_data['query_string'])
|
||||
resp = client.get(reverse("failures") + bug_data["query_string"])
|
||||
assert resp.status_code == 200
|
||||
assert resp.json() == expected
|
||||
|
||||
|
@ -14,21 +14,21 @@ def test_failures(bug_data, client):
|
|||
def test_failures_by_bug(bug_data, client):
|
||||
expected = [
|
||||
{
|
||||
'bug_id': bug_data['bug_id'],
|
||||
'build_type': bug_data['option'].name,
|
||||
'job_id': bug_data['job'].id,
|
||||
'push_time': bug_data['job'].push.time.strftime('%Y-%m-%d %H:%M:%S'),
|
||||
'platform': bug_data['job'].machine_platform.platform,
|
||||
'revision': bug_data['job'].push.revision,
|
||||
'test_suite': bug_data['job'].signature.job_type_name,
|
||||
'tree': bug_data['job'].repository.name,
|
||||
'machine_name': bug_data['job'].machine.name,
|
||||
'lines': [],
|
||||
"bug_id": bug_data["bug_id"],
|
||||
"build_type": bug_data["option"].name,
|
||||
"job_id": bug_data["job"].id,
|
||||
"push_time": bug_data["job"].push.time.strftime("%Y-%m-%d %H:%M:%S"),
|
||||
"platform": bug_data["job"].machine_platform.platform,
|
||||
"revision": bug_data["job"].push.revision,
|
||||
"test_suite": bug_data["job"].signature.job_type_name,
|
||||
"tree": bug_data["job"].repository.name,
|
||||
"machine_name": bug_data["job"].machine.name,
|
||||
"lines": [],
|
||||
}
|
||||
]
|
||||
|
||||
resp = client.get(
|
||||
reverse('failures-by-bug') + bug_data['query_string'] + '&bug={}'.format(bug_data['bug_id'])
|
||||
reverse("failures-by-bug") + bug_data["query_string"] + "&bug={}".format(bug_data["bug_id"])
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert resp.json() == expected
|
||||
|
@ -40,20 +40,20 @@ def test_failure_count_by_bug(bug_data, client, test_run_data):
|
|||
|
||||
for bug in bugs:
|
||||
if (
|
||||
bug.job.repository.name == bug_data['tree']
|
||||
and bug.bug_id == bug_data['bug_id']
|
||||
and bug.job.push.time.strftime('%Y-%m-%d') == test_run_data['push_time']
|
||||
bug.job.repository.name == bug_data["tree"]
|
||||
and bug.bug_id == bug_data["bug_id"]
|
||||
and bug.job.push.time.strftime("%Y-%m-%d") == test_run_data["push_time"]
|
||||
):
|
||||
failure_count += 1
|
||||
|
||||
expected = {
|
||||
'date': test_run_data['push_time'],
|
||||
'test_runs': test_run_data['test_runs'],
|
||||
'failure_count': failure_count,
|
||||
"date": test_run_data["push_time"],
|
||||
"test_runs": test_run_data["test_runs"],
|
||||
"failure_count": failure_count,
|
||||
}
|
||||
|
||||
resp = client.get(
|
||||
reverse('failure-count') + bug_data['query_string'] + '&bug={}'.format(bug_data['bug_id'])
|
||||
reverse("failure-count") + bug_data["query_string"] + "&bug={}".format(bug_data["bug_id"])
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert resp.json()[0] == expected
|
||||
|
@ -62,20 +62,20 @@ def test_failure_count_by_bug(bug_data, client, test_run_data):
|
|||
def test_failure_count(bug_data, client, test_run_data):
|
||||
failure_count = 0
|
||||
|
||||
for job in list(bug_data['jobs']):
|
||||
for job in list(bug_data["jobs"]):
|
||||
if (
|
||||
job.repository.name == bug_data['tree']
|
||||
job.repository.name == bug_data["tree"]
|
||||
and job.failure_classification_id == 4
|
||||
and job.push.time.strftime('%Y-%m-%d') == test_run_data['push_time']
|
||||
and job.push.time.strftime("%Y-%m-%d") == test_run_data["push_time"]
|
||||
):
|
||||
failure_count += 1
|
||||
|
||||
expected = {
|
||||
'date': test_run_data['push_time'],
|
||||
'test_runs': test_run_data['test_runs'],
|
||||
'failure_count': failure_count,
|
||||
"date": test_run_data["push_time"],
|
||||
"test_runs": test_run_data["test_runs"],
|
||||
"failure_count": failure_count,
|
||||
}
|
||||
|
||||
resp = client.get(reverse('failure-count') + bug_data['query_string'])
|
||||
resp = client.get(reverse("failure-count") + bug_data["query_string"])
|
||||
assert resp.status_code == 200
|
||||
assert resp.json()[0] == expected
|
||||
|
|
|
@ -7,23 +7,23 @@ from treeherder.model.models import JobLog
|
|||
def test_get_job_log_urls(
|
||||
test_repository, push_stored, failure_classifications, generic_reference_data, client
|
||||
):
|
||||
job1 = create_generic_job('1234', test_repository, 1, generic_reference_data)
|
||||
job2 = create_generic_job('5678', test_repository, 1, generic_reference_data)
|
||||
job1 = create_generic_job("1234", test_repository, 1, generic_reference_data)
|
||||
job2 = create_generic_job("5678", test_repository, 1, generic_reference_data)
|
||||
|
||||
JobLog.objects.create(
|
||||
job=job1, name='test_log_1', url='http://google.com', status=JobLog.PENDING
|
||||
job=job1, name="test_log_1", url="http://google.com", status=JobLog.PENDING
|
||||
)
|
||||
JobLog.objects.create(job=job1, name='test_log_2', url='http://yahoo.com', status=JobLog.PARSED)
|
||||
JobLog.objects.create(job=job2, name='test_log_3', url='http://yahoo.com', status=JobLog.PARSED)
|
||||
JobLog.objects.create(job=job1, name="test_log_2", url="http://yahoo.com", status=JobLog.PARSED)
|
||||
JobLog.objects.create(job=job2, name="test_log_3", url="http://yahoo.com", status=JobLog.PARSED)
|
||||
|
||||
resp = client.get(
|
||||
reverse('job-log-url-list', kwargs={"project": test_repository.name}) + '?job_id=1'
|
||||
reverse("job-log-url-list", kwargs={"project": test_repository.name}) + "?job_id=1"
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert len(resp.json()) == 2
|
||||
|
||||
resp = client.get(
|
||||
reverse('job-log-url-list', kwargs={"project": test_repository.name}) + '?job_id=1&job_id=2'
|
||||
reverse("job-log-url-list", kwargs={"project": test_repository.name}) + "?job_id=1&job_id=2"
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert len(resp.json()) == 3
|
||||
|
|
|
@ -9,7 +9,7 @@ from treeherder.model.models import Job, TextLogError
|
|||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
('offset', 'count', 'expected_num'),
|
||||
("offset", "count", "expected_num"),
|
||||
[(None, None, 10), (None, 5, 5), (5, None, 6), (0, 5, 5), (10, 10, 1)],
|
||||
)
|
||||
def test_job_list(client, eleven_jobs_stored, test_repository, offset, count, expected_num):
|
||||
|
@ -18,11 +18,11 @@ def test_job_list(client, eleven_jobs_stored, test_repository, offset, count, ex
|
|||
endpoint.
|
||||
"""
|
||||
url = reverse("jobs-list", kwargs={"project": test_repository.name})
|
||||
params = '&'.join(
|
||||
['{}={}'.format(k, v) for k, v in [('offset', offset), ('count', count)] if v]
|
||||
params = "&".join(
|
||||
["{}={}".format(k, v) for k, v in [("offset", offset), ("count", count)] if v]
|
||||
)
|
||||
if params:
|
||||
url += '?{}'.format(params)
|
||||
url += "?{}".format(params)
|
||||
resp = client.get(url)
|
||||
assert resp.status_code == 200
|
||||
response_dict = resp.json()
|
||||
|
@ -91,47 +91,47 @@ def test_job_list_equals_filter(client, eleven_jobs_stored, test_repository):
|
|||
|
||||
resp = client.get(final_url)
|
||||
assert resp.status_code == 200
|
||||
assert len(resp.json()['results']) == 1
|
||||
assert len(resp.json()["results"]) == 1
|
||||
|
||||
|
||||
job_filter_values = [
|
||||
(u'build_architecture', u'x86_64'),
|
||||
(u'build_os', u'mac'),
|
||||
(u'build_platform', u'osx-10-7'),
|
||||
(u'build_platform_id', 3),
|
||||
(u'build_system_type', u'buildbot'),
|
||||
(u'end_timestamp', 1384364849),
|
||||
(u'failure_classification_id', 1),
|
||||
(u'id', 4),
|
||||
(u'job_group_id', 2),
|
||||
(u'job_group_name', u'Mochitest'),
|
||||
(u'job_group_symbol', u'M'),
|
||||
(u'job_guid', u'ab952a4bbbc74f1d9fb3cf536073b371029dbd02'),
|
||||
(u'job_type_id', 2),
|
||||
(u'job_type_name', u'Mochitest Browser Chrome'),
|
||||
(u'job_type_symbol', u'bc'),
|
||||
(u'machine_name', u'talos-r4-lion-011'),
|
||||
(u'machine_platform_architecture', u'x86_64'),
|
||||
(u'machine_platform_os', u'mac'),
|
||||
(u'option_collection_hash', u'32faaecac742100f7753f0c1d0aa0add01b4046b'),
|
||||
(u'platform', u'osx-10-7'),
|
||||
(u'reason', u'scheduler'),
|
||||
("build_architecture", "x86_64"),
|
||||
("build_os", "mac"),
|
||||
("build_platform", "osx-10-7"),
|
||||
("build_platform_id", 3),
|
||||
("build_system_type", "buildbot"),
|
||||
("end_timestamp", 1384364849),
|
||||
("failure_classification_id", 1),
|
||||
("id", 4),
|
||||
("job_group_id", 2),
|
||||
("job_group_name", "Mochitest"),
|
||||
("job_group_symbol", "M"),
|
||||
("job_guid", "ab952a4bbbc74f1d9fb3cf536073b371029dbd02"),
|
||||
("job_type_id", 2),
|
||||
("job_type_name", "Mochitest Browser Chrome"),
|
||||
("job_type_symbol", "bc"),
|
||||
("machine_name", "talos-r4-lion-011"),
|
||||
("machine_platform_architecture", "x86_64"),
|
||||
("machine_platform_os", "mac"),
|
||||
("option_collection_hash", "32faaecac742100f7753f0c1d0aa0add01b4046b"),
|
||||
("platform", "osx-10-7"),
|
||||
("reason", "scheduler"),
|
||||
(
|
||||
u'ref_data_name',
|
||||
u'Rev4 MacOSX Lion 10.7 mozilla-release debug test mochitest-browser-chrome',
|
||||
"ref_data_name",
|
||||
"Rev4 MacOSX Lion 10.7 mozilla-release debug test mochitest-browser-chrome",
|
||||
),
|
||||
(u'result', u'success'),
|
||||
(u'result_set_id', 4),
|
||||
(u'signature', u'b4a4be709b937853b4ea1a49fc21bf43bf6d6406'),
|
||||
(u'start_timestamp', 1384356880),
|
||||
(u'state', u'completed'),
|
||||
(u'submit_timestamp', 1384356854),
|
||||
(u'tier', 1),
|
||||
(u'who', u'tests-mozilla-release-lion-debug-unittest'),
|
||||
("result", "success"),
|
||||
("result_set_id", 4),
|
||||
("signature", "b4a4be709b937853b4ea1a49fc21bf43bf6d6406"),
|
||||
("start_timestamp", 1384356880),
|
||||
("state", "completed"),
|
||||
("submit_timestamp", 1384356854),
|
||||
("tier", 1),
|
||||
("who", "tests-mozilla-release-lion-debug-unittest"),
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(('fieldname', 'expected'), job_filter_values)
|
||||
@pytest.mark.parametrize(("fieldname", "expected"), job_filter_values)
|
||||
def test_job_list_filter_fields(client, eleven_jobs_stored, test_repository, fieldname, expected):
|
||||
"""
|
||||
test retrieving a job list with a querystring filter.
|
||||
|
@ -146,7 +146,7 @@ def test_job_list_filter_fields(client, eleven_jobs_stored, test_repository, fie
|
|||
final_url = url + "?{}={}".format(fieldname, expected)
|
||||
resp = client.get(final_url)
|
||||
assert resp.status_code == 200
|
||||
first = resp.json()['results'][0]
|
||||
first = resp.json()["results"][0]
|
||||
assert first[fieldname] == expected
|
||||
|
||||
|
||||
|
@ -163,7 +163,7 @@ def test_job_list_in_filter(client, eleven_jobs_stored, test_repository):
|
|||
|
||||
resp = client.get(final_url)
|
||||
assert resp.status_code == 200
|
||||
assert len(resp.json()['results']) == 2
|
||||
assert len(resp.json()["results"]) == 2
|
||||
|
||||
|
||||
def test_job_detail(client, test_job):
|
||||
|
@ -183,7 +183,7 @@ def test_job_detail(client, test_job):
|
|||
)
|
||||
assert resp.status_code == 200
|
||||
assert resp.json()["taskcluster_metadata"] == {
|
||||
"task_id": 'V3SVuxO8TFy37En_6HcXLs',
|
||||
"task_id": "V3SVuxO8TFy37En_6HcXLs",
|
||||
"retry_id": 0,
|
||||
}
|
||||
|
||||
|
@ -210,8 +210,8 @@ def test_job_detail_not_found(client, test_repository):
|
|||
|
||||
|
||||
def test_text_log_errors(client, test_job):
|
||||
TextLogError.objects.create(job=test_job, line='failure 1', line_number=101)
|
||||
TextLogError.objects.create(job=test_job, line='failure 2', line_number=102)
|
||||
TextLogError.objects.create(job=test_job, line="failure 1", line_number=101)
|
||||
TextLogError.objects.create(job=test_job, line="failure 2", line_number=102)
|
||||
resp = client.get(
|
||||
reverse(
|
||||
"jobs-text-log-errors", kwargs={"project": test_job.repository.name, "pk": test_job.id}
|
||||
|
@ -220,22 +220,22 @@ def test_text_log_errors(client, test_job):
|
|||
assert resp.status_code == 200
|
||||
assert resp.json() == [
|
||||
{
|
||||
'id': 1,
|
||||
'job': 1,
|
||||
'line': 'failure 1',
|
||||
'line_number': 101,
|
||||
"id": 1,
|
||||
"job": 1,
|
||||
"line": "failure 1",
|
||||
"line_number": 101,
|
||||
},
|
||||
{
|
||||
'id': 2,
|
||||
'job': 1,
|
||||
'line': 'failure 2',
|
||||
'line_number': 102,
|
||||
"id": 2,
|
||||
"job": 1,
|
||||
"line": "failure 2",
|
||||
"line_number": 102,
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
('offset', 'count', 'expected_num'),
|
||||
("offset", "count", "expected_num"),
|
||||
[(None, None, 3), (None, 2, 2), (1, None, 2), (0, 1, 1), (2, 10, 1)],
|
||||
)
|
||||
def test_list_similar_jobs(client, eleven_jobs_stored, offset, count, expected_num):
|
||||
|
@ -245,26 +245,26 @@ def test_list_similar_jobs(client, eleven_jobs_stored, offset, count, expected_n
|
|||
job = Job.objects.get(id=1)
|
||||
|
||||
url = reverse("jobs-similar-jobs", kwargs={"project": job.repository.name, "pk": job.id})
|
||||
params = '&'.join(
|
||||
['{}={}'.format(k, v) for k, v in [('offset', offset), ('count', count)] if v]
|
||||
params = "&".join(
|
||||
["{}={}".format(k, v) for k, v in [("offset", offset), ("count", count)] if v]
|
||||
)
|
||||
if params:
|
||||
url += '?{}'.format(params)
|
||||
url += "?{}".format(params)
|
||||
resp = client.get(url)
|
||||
|
||||
assert resp.status_code == 200
|
||||
|
||||
similar_jobs = resp.json()
|
||||
|
||||
assert 'results' in similar_jobs
|
||||
assert "results" in similar_jobs
|
||||
|
||||
assert isinstance(similar_jobs['results'], list)
|
||||
assert isinstance(similar_jobs["results"], list)
|
||||
|
||||
assert len(similar_jobs['results']) == expected_num
|
||||
assert len(similar_jobs["results"]) == expected_num
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'lm_key,lm_value,exp_status, exp_job_count',
|
||||
"lm_key,lm_value,exp_status, exp_job_count",
|
||||
[
|
||||
("last_modified__gt", "2016-07-18T22:16:58.000", 200, 8),
|
||||
("last_modified__lt", "2016-07-18T22:16:58.000", 200, 3),
|
||||
|
|
|
@ -75,7 +75,7 @@ def test_note_detail_bad_project(client, test_repository):
|
|||
assert resp.status_code == 404
|
||||
|
||||
|
||||
@pytest.mark.parametrize('test_no_auth', [True, False])
|
||||
@pytest.mark.parametrize("test_no_auth", [True, False])
|
||||
def test_create_note(client, test_job, test_user, test_no_auth):
|
||||
"""
|
||||
test creating a single note via endpoint when authenticated
|
||||
|
@ -100,23 +100,23 @@ def test_create_note(client, test_job, test_user, test_no_auth):
|
|||
assert resp.status_code == 200
|
||||
|
||||
content = json.loads(resp.content)
|
||||
assert content['message'] == 'note stored for job %s' % test_job.id
|
||||
assert content["message"] == "note stored for job %s" % test_job.id
|
||||
|
||||
note_list = JobNote.objects.filter(job=test_job)
|
||||
|
||||
assert len(note_list) == 1
|
||||
assert note_list[0].user == test_user
|
||||
assert note_list[0].failure_classification.id == 2
|
||||
assert note_list[0].text == 'you look like a man-o-lantern'
|
||||
assert note_list[0].text == "you look like a man-o-lantern"
|
||||
|
||||
# verify that the job's last_modified field got updated
|
||||
old_last_modified = test_job.last_modified
|
||||
assert old_last_modified < Job.objects.values_list('last_modified', flat=True).get(
|
||||
assert old_last_modified < Job.objects.values_list("last_modified", flat=True).get(
|
||||
id=test_job.id
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('test_no_auth', [True, False])
|
||||
@pytest.mark.parametrize("test_no_auth", [True, False])
|
||||
def test_delete_note(client, test_job_with_notes, test_repository, test_sheriff, test_no_auth):
|
||||
"""
|
||||
test deleting a single note via endpoint
|
||||
|
@ -169,16 +169,16 @@ def test_push_notes(client, test_job_with_notes):
|
|||
"text": "you look like a man-o-lantern",
|
||||
},
|
||||
{
|
||||
'failure_classification_name': 'expected fail',
|
||||
'id': 2,
|
||||
'job': {
|
||||
'duration': 191,
|
||||
'job_type_name': 'B2G Emulator Image Build',
|
||||
'result': 'success',
|
||||
"failure_classification_name": "expected fail",
|
||||
"id": 2,
|
||||
"job": {
|
||||
"duration": 191,
|
||||
"job_type_name": "B2G Emulator Image Build",
|
||||
"result": "success",
|
||||
"task_id": notes[1].job.taskcluster_metadata.task_id,
|
||||
},
|
||||
"who": notes[1].user.email,
|
||||
"created": notes[1].created.isoformat(),
|
||||
'text': 'you look like a man-o-lantern',
|
||||
"text": "you look like a man-o-lantern",
|
||||
},
|
||||
]
|
||||
|
|
|
@ -2,7 +2,7 @@ from django.urls import reverse
|
|||
|
||||
|
||||
def test_option_collection_list(client, sample_option_collections):
|
||||
resp = client.get(reverse("optioncollectionhash-list") + '?')
|
||||
resp = client.get(reverse("optioncollectionhash-list") + "?")
|
||||
assert resp.status_code == 200
|
||||
|
||||
response = resp.json()
|
||||
|
@ -11,6 +11,6 @@ def test_option_collection_list(client, sample_option_collections):
|
|||
|
||||
assert len(response) == 2
|
||||
assert response == [
|
||||
{'option_collection_hash': 'option_hash1', 'options': [{'name': 'opt1'}]},
|
||||
{'option_collection_hash': 'option_hash2', 'options': [{'name': 'opt2'}]},
|
||||
{"option_collection_hash": "option_hash1", "options": [{"name": "opt1"}]},
|
||||
{"option_collection_hash": "option_hash2", "options": [{"name": "opt2"}]},
|
||||
]
|
||||
|
|
|
@ -27,7 +27,7 @@ def test_perfcompare_results_against_no_base(
|
|||
test_linux_platform,
|
||||
test_option_collection,
|
||||
):
|
||||
perf_jobs = Job.objects.filter(pk__in=range(1, 11)).order_by('push__time').all()
|
||||
perf_jobs = Job.objects.filter(pk__in=range(1, 11)).order_by("push__time").all()
|
||||
|
||||
test_perfcomp_push.time = THREE_DAYS_AGO
|
||||
test_perfcomp_push.repository = try_repository
|
||||
|
@ -35,15 +35,15 @@ def test_perfcompare_results_against_no_base(
|
|||
test_perfcomp_push_2.time = datetime.datetime.now()
|
||||
test_perfcomp_push_2.save()
|
||||
|
||||
suite = 'a11yr'
|
||||
test = 'dhtml.html'
|
||||
extra_options = 'e10s fission stylo webrender'
|
||||
measurement_unit = 'ms'
|
||||
base_application = 'firefox'
|
||||
new_application = 'geckoview'
|
||||
suite = "a11yr"
|
||||
test = "dhtml.html"
|
||||
extra_options = "e10s fission stylo webrender"
|
||||
measurement_unit = "ms"
|
||||
base_application = "firefox"
|
||||
new_application = "geckoview"
|
||||
|
||||
base_sig = create_signature(
|
||||
signature_hash=(20 * 't1'),
|
||||
signature_hash=(20 * "t1"),
|
||||
extra_options=extra_options,
|
||||
platform=test_linux_platform,
|
||||
measurement_unit=measurement_unit,
|
||||
|
@ -72,7 +72,7 @@ def test_perfcompare_results_against_no_base(
|
|||
perf_datum.push.save()
|
||||
|
||||
new_sig = create_signature(
|
||||
signature_hash=(20 * 't2'),
|
||||
signature_hash=(20 * "t2"),
|
||||
extra_options=extra_options,
|
||||
platform=test_linux_platform,
|
||||
measurement_unit=measurement_unit,
|
||||
|
@ -103,59 +103,59 @@ def test_perfcompare_results_against_no_base(
|
|||
|
||||
expected = [
|
||||
{
|
||||
'base_rev': None,
|
||||
'new_rev': test_perfcomp_push_2.revision,
|
||||
'framework_id': base_sig.framework.id,
|
||||
'platform': base_sig.platform.platform,
|
||||
'suite': base_sig.suite,
|
||||
'is_empty': False,
|
||||
'header_name': response['header_name'],
|
||||
'base_repository_name': base_sig.repository.name,
|
||||
'new_repository_name': new_sig.repository.name,
|
||||
'base_app': 'firefox',
|
||||
'new_app': 'geckoview',
|
||||
'is_complete': response['is_complete'],
|
||||
'base_measurement_unit': base_sig.measurement_unit,
|
||||
'new_measurement_unit': new_sig.measurement_unit,
|
||||
'base_retriggerable_job_ids': [1],
|
||||
'new_retriggerable_job_ids': [4],
|
||||
'base_runs': base_perf_data_values,
|
||||
'new_runs': new_perf_data_values,
|
||||
'base_avg_value': round(response['base_avg_value'], 2),
|
||||
'new_avg_value': round(response['new_avg_value'], 2),
|
||||
'base_median_value': round(response['base_median_value'], 2),
|
||||
'new_median_value': round(response['new_median_value'], 2),
|
||||
'test': base_sig.test,
|
||||
'option_name': response['option_name'],
|
||||
'extra_options': base_sig.extra_options,
|
||||
'base_stddev': round(response['base_stddev'], 2),
|
||||
'new_stddev': round(response['new_stddev'], 2),
|
||||
'base_stddev_pct': round(response['base_stddev_pct'], 2),
|
||||
'new_stddev_pct': round(response['new_stddev_pct'], 2),
|
||||
'confidence': round(response['confidence'], 2),
|
||||
'confidence_text': response['confidence_text'],
|
||||
'delta_value': round(response['delta_value'], 2),
|
||||
'delta_percentage': round(response['delta_pct'], 2),
|
||||
'magnitude': round(response['magnitude'], 2),
|
||||
'new_is_better': response['new_is_better'],
|
||||
'lower_is_better': response['lower_is_better'],
|
||||
'is_confident': response['is_confident'],
|
||||
'more_runs_are_needed': response['more_runs_are_needed'],
|
||||
'noise_metric': False,
|
||||
'graphs_link': f'https://treeherder.mozilla.org/perfherder/graphs?'
|
||||
f'highlightedRevisions={test_perfcomp_push_2.revision}&'
|
||||
f'series={try_repository.name}%2C{base_sig.signature_hash}%2C1%2C{base_sig.framework.id}&'
|
||||
f'series={test_repository.name}%2C{base_sig.signature_hash}%2C1%2C{base_sig.framework.id}&'
|
||||
f'timerange=86400',
|
||||
'is_improvement': response['is_improvement'],
|
||||
'is_regression': response['is_regression'],
|
||||
'is_meaningful': response['is_meaningful'],
|
||||
"base_rev": None,
|
||||
"new_rev": test_perfcomp_push_2.revision,
|
||||
"framework_id": base_sig.framework.id,
|
||||
"platform": base_sig.platform.platform,
|
||||
"suite": base_sig.suite,
|
||||
"is_empty": False,
|
||||
"header_name": response["header_name"],
|
||||
"base_repository_name": base_sig.repository.name,
|
||||
"new_repository_name": new_sig.repository.name,
|
||||
"base_app": "firefox",
|
||||
"new_app": "geckoview",
|
||||
"is_complete": response["is_complete"],
|
||||
"base_measurement_unit": base_sig.measurement_unit,
|
||||
"new_measurement_unit": new_sig.measurement_unit,
|
||||
"base_retriggerable_job_ids": [1],
|
||||
"new_retriggerable_job_ids": [4],
|
||||
"base_runs": base_perf_data_values,
|
||||
"new_runs": new_perf_data_values,
|
||||
"base_avg_value": round(response["base_avg_value"], 2),
|
||||
"new_avg_value": round(response["new_avg_value"], 2),
|
||||
"base_median_value": round(response["base_median_value"], 2),
|
||||
"new_median_value": round(response["new_median_value"], 2),
|
||||
"test": base_sig.test,
|
||||
"option_name": response["option_name"],
|
||||
"extra_options": base_sig.extra_options,
|
||||
"base_stddev": round(response["base_stddev"], 2),
|
||||
"new_stddev": round(response["new_stddev"], 2),
|
||||
"base_stddev_pct": round(response["base_stddev_pct"], 2),
|
||||
"new_stddev_pct": round(response["new_stddev_pct"], 2),
|
||||
"confidence": round(response["confidence"], 2),
|
||||
"confidence_text": response["confidence_text"],
|
||||
"delta_value": round(response["delta_value"], 2),
|
||||
"delta_percentage": round(response["delta_pct"], 2),
|
||||
"magnitude": round(response["magnitude"], 2),
|
||||
"new_is_better": response["new_is_better"],
|
||||
"lower_is_better": response["lower_is_better"],
|
||||
"is_confident": response["is_confident"],
|
||||
"more_runs_are_needed": response["more_runs_are_needed"],
|
||||
"noise_metric": False,
|
||||
"graphs_link": f"https://treeherder.mozilla.org/perfherder/graphs?"
|
||||
f"highlightedRevisions={test_perfcomp_push_2.revision}&"
|
||||
f"series={try_repository.name}%2C{base_sig.signature_hash}%2C1%2C{base_sig.framework.id}&"
|
||||
f"series={test_repository.name}%2C{base_sig.signature_hash}%2C1%2C{base_sig.framework.id}&"
|
||||
f"timerange=86400",
|
||||
"is_improvement": response["is_improvement"],
|
||||
"is_regression": response["is_regression"],
|
||||
"is_meaningful": response["is_meaningful"],
|
||||
},
|
||||
]
|
||||
|
||||
query_params = (
|
||||
'?base_repository={}&new_repository={}&new_revision={}&framework={'
|
||||
'}&interval={}&no_subtests=true'.format(
|
||||
"?base_repository={}&new_repository={}&new_revision={}&framework={"
|
||||
"}&interval={}&no_subtests=true".format(
|
||||
try_repository.name,
|
||||
test_repository.name,
|
||||
test_perfcomp_push_2.revision,
|
||||
|
@ -164,7 +164,7 @@ def test_perfcompare_results_against_no_base(
|
|||
)
|
||||
)
|
||||
|
||||
response = client.get(reverse('perfcompare-results') + query_params)
|
||||
response = client.get(reverse("perfcompare-results") + query_params)
|
||||
|
||||
assert response.status_code == 200
|
||||
assert expected[0] == response.json()[0]
|
||||
|
@ -183,7 +183,7 @@ def test_perfcompare_results_with_only_one_run_and_diff_repo(
|
|||
test_linux_platform,
|
||||
test_option_collection,
|
||||
):
|
||||
perf_jobs = Job.objects.filter(pk__in=range(1, 11)).order_by('push__time').all()
|
||||
perf_jobs = Job.objects.filter(pk__in=range(1, 11)).order_by("push__time").all()
|
||||
|
||||
test_perfcomp_push.time = THREE_DAYS_AGO
|
||||
test_perfcomp_push.repository = try_repository
|
||||
|
@ -191,15 +191,15 @@ def test_perfcompare_results_with_only_one_run_and_diff_repo(
|
|||
test_perfcomp_push_2.time = datetime.datetime.now()
|
||||
test_perfcomp_push_2.save()
|
||||
|
||||
suite = 'a11yr'
|
||||
test = 'dhtml.html'
|
||||
extra_options = 'e10s fission stylo webrender'
|
||||
measurement_unit = 'ms'
|
||||
base_application = 'firefox'
|
||||
new_application = 'geckoview'
|
||||
suite = "a11yr"
|
||||
test = "dhtml.html"
|
||||
extra_options = "e10s fission stylo webrender"
|
||||
measurement_unit = "ms"
|
||||
base_application = "firefox"
|
||||
new_application = "geckoview"
|
||||
|
||||
base_sig = create_signature(
|
||||
signature_hash=(20 * 't1'),
|
||||
signature_hash=(20 * "t1"),
|
||||
extra_options=extra_options,
|
||||
platform=test_linux_platform,
|
||||
measurement_unit=measurement_unit,
|
||||
|
@ -228,7 +228,7 @@ def test_perfcompare_results_with_only_one_run_and_diff_repo(
|
|||
perf_datum.push.save()
|
||||
|
||||
new_sig = create_signature(
|
||||
signature_hash=(20 * 't2'),
|
||||
signature_hash=(20 * "t2"),
|
||||
extra_options=extra_options,
|
||||
platform=test_linux_platform,
|
||||
measurement_unit=measurement_unit,
|
||||
|
@ -259,59 +259,59 @@ def test_perfcompare_results_with_only_one_run_and_diff_repo(
|
|||
|
||||
expected = [
|
||||
{
|
||||
'base_rev': test_perfcomp_push.revision,
|
||||
'new_rev': test_perfcomp_push_2.revision,
|
||||
'framework_id': base_sig.framework.id,
|
||||
'platform': base_sig.platform.platform,
|
||||
'suite': base_sig.suite,
|
||||
'is_empty': False,
|
||||
'header_name': response['header_name'],
|
||||
'base_repository_name': base_sig.repository.name,
|
||||
'new_repository_name': new_sig.repository.name,
|
||||
'base_app': 'firefox',
|
||||
'new_app': 'geckoview',
|
||||
'is_complete': response['is_complete'],
|
||||
'base_measurement_unit': base_sig.measurement_unit,
|
||||
'new_measurement_unit': new_sig.measurement_unit,
|
||||
'base_retriggerable_job_ids': [1],
|
||||
'new_retriggerable_job_ids': [4],
|
||||
'base_runs': base_perf_data_values,
|
||||
'new_runs': new_perf_data_values,
|
||||
'base_avg_value': round(response['base_avg_value'], 2),
|
||||
'new_avg_value': round(response['new_avg_value'], 2),
|
||||
'base_median_value': round(response['base_median_value'], 2),
|
||||
'new_median_value': round(response['new_median_value'], 2),
|
||||
'test': base_sig.test,
|
||||
'option_name': response['option_name'],
|
||||
'extra_options': base_sig.extra_options,
|
||||
'base_stddev': round(response['base_stddev'], 2),
|
||||
'new_stddev': round(response['new_stddev'], 2),
|
||||
'base_stddev_pct': round(response['base_stddev_pct'], 2),
|
||||
'new_stddev_pct': round(response['new_stddev_pct'], 2),
|
||||
'confidence': round(response['confidence'], 2),
|
||||
'confidence_text': response['confidence_text'],
|
||||
'delta_value': round(response['delta_value'], 2),
|
||||
'delta_percentage': round(response['delta_pct'], 2),
|
||||
'magnitude': round(response['magnitude'], 2),
|
||||
'new_is_better': response['new_is_better'],
|
||||
'lower_is_better': response['lower_is_better'],
|
||||
'is_confident': response['is_confident'],
|
||||
'more_runs_are_needed': response['more_runs_are_needed'],
|
||||
'noise_metric': False,
|
||||
'graphs_link': f'https://treeherder.mozilla.org/perfherder/graphs?highlightedRevisions={test_perfcomp_push.revision}&'
|
||||
f'highlightedRevisions={test_perfcomp_push_2.revision}&'
|
||||
f'series={try_repository.name}%2C{base_sig.signature_hash}%2C1%2C{base_sig.framework.id}&'
|
||||
f'series={test_repository.name}%2C{base_sig.signature_hash}%2C1%2C{base_sig.framework.id}&'
|
||||
f'timerange=604800',
|
||||
'is_improvement': response['is_improvement'],
|
||||
'is_regression': response['is_regression'],
|
||||
'is_meaningful': response['is_meaningful'],
|
||||
"base_rev": test_perfcomp_push.revision,
|
||||
"new_rev": test_perfcomp_push_2.revision,
|
||||
"framework_id": base_sig.framework.id,
|
||||
"platform": base_sig.platform.platform,
|
||||
"suite": base_sig.suite,
|
||||
"is_empty": False,
|
||||
"header_name": response["header_name"],
|
||||
"base_repository_name": base_sig.repository.name,
|
||||
"new_repository_name": new_sig.repository.name,
|
||||
"base_app": "firefox",
|
||||
"new_app": "geckoview",
|
||||
"is_complete": response["is_complete"],
|
||||
"base_measurement_unit": base_sig.measurement_unit,
|
||||
"new_measurement_unit": new_sig.measurement_unit,
|
||||
"base_retriggerable_job_ids": [1],
|
||||
"new_retriggerable_job_ids": [4],
|
||||
"base_runs": base_perf_data_values,
|
||||
"new_runs": new_perf_data_values,
|
||||
"base_avg_value": round(response["base_avg_value"], 2),
|
||||
"new_avg_value": round(response["new_avg_value"], 2),
|
||||
"base_median_value": round(response["base_median_value"], 2),
|
||||
"new_median_value": round(response["new_median_value"], 2),
|
||||
"test": base_sig.test,
|
||||
"option_name": response["option_name"],
|
||||
"extra_options": base_sig.extra_options,
|
||||
"base_stddev": round(response["base_stddev"], 2),
|
||||
"new_stddev": round(response["new_stddev"], 2),
|
||||
"base_stddev_pct": round(response["base_stddev_pct"], 2),
|
||||
"new_stddev_pct": round(response["new_stddev_pct"], 2),
|
||||
"confidence": round(response["confidence"], 2),
|
||||
"confidence_text": response["confidence_text"],
|
||||
"delta_value": round(response["delta_value"], 2),
|
||||
"delta_percentage": round(response["delta_pct"], 2),
|
||||
"magnitude": round(response["magnitude"], 2),
|
||||
"new_is_better": response["new_is_better"],
|
||||
"lower_is_better": response["lower_is_better"],
|
||||
"is_confident": response["is_confident"],
|
||||
"more_runs_are_needed": response["more_runs_are_needed"],
|
||||
"noise_metric": False,
|
||||
"graphs_link": f"https://treeherder.mozilla.org/perfherder/graphs?highlightedRevisions={test_perfcomp_push.revision}&"
|
||||
f"highlightedRevisions={test_perfcomp_push_2.revision}&"
|
||||
f"series={try_repository.name}%2C{base_sig.signature_hash}%2C1%2C{base_sig.framework.id}&"
|
||||
f"series={test_repository.name}%2C{base_sig.signature_hash}%2C1%2C{base_sig.framework.id}&"
|
||||
f"timerange=604800",
|
||||
"is_improvement": response["is_improvement"],
|
||||
"is_regression": response["is_regression"],
|
||||
"is_meaningful": response["is_meaningful"],
|
||||
},
|
||||
]
|
||||
|
||||
query_params = (
|
||||
'?base_repository={}&new_repository={}&base_revision={}&new_revision={}&framework={'
|
||||
'}&no_subtests=true'.format(
|
||||
"?base_repository={}&new_repository={}&base_revision={}&new_revision={}&framework={"
|
||||
"}&no_subtests=true".format(
|
||||
try_repository.name,
|
||||
test_repository.name,
|
||||
test_perfcomp_push.revision,
|
||||
|
@ -320,7 +320,7 @@ def test_perfcompare_results_with_only_one_run_and_diff_repo(
|
|||
)
|
||||
)
|
||||
|
||||
response = client.get(reverse('perfcompare-results') + query_params)
|
||||
response = client.get(reverse("perfcompare-results") + query_params)
|
||||
|
||||
assert response.status_code == 200
|
||||
assert expected[0] == response.json()[0]
|
||||
|
@ -340,19 +340,19 @@ def test_perfcompare_results_multiple_runs(
|
|||
test_macosx_platform,
|
||||
test_option_collection,
|
||||
):
|
||||
perf_jobs = Job.objects.filter(pk__in=range(1, 11)).order_by('push__time').all()
|
||||
perf_jobs = Job.objects.filter(pk__in=range(1, 11)).order_by("push__time").all()
|
||||
|
||||
test_perfcomp_push.time = SEVEN_DAYS_AGO
|
||||
test_perfcomp_push.save()
|
||||
test_perfcomp_push_2.time = datetime.datetime.now(test_perfcomp_push_2.save())
|
||||
|
||||
suite = 'a11yr'
|
||||
test = 'dhtml.html'
|
||||
extra_options = 'e10s fission stylo webrender'
|
||||
measurement_unit = 'ms'
|
||||
suite = "a11yr"
|
||||
test = "dhtml.html"
|
||||
extra_options = "e10s fission stylo webrender"
|
||||
measurement_unit = "ms"
|
||||
|
||||
sig1 = create_signature(
|
||||
signature_hash=(20 * 't1'),
|
||||
signature_hash=(20 * "t1"),
|
||||
extra_options=extra_options,
|
||||
platform=test_linux_platform,
|
||||
measurement_unit=measurement_unit,
|
||||
|
@ -371,7 +371,7 @@ def test_perfcompare_results_multiple_runs(
|
|||
create_perf_datum(index, job, test_perfcomp_push, sig1, sig1_val)
|
||||
|
||||
sig2 = create_signature(
|
||||
signature_hash=(20 * 't2'),
|
||||
signature_hash=(20 * "t2"),
|
||||
extra_options=extra_options,
|
||||
platform=test_linux_platform,
|
||||
measurement_unit=measurement_unit,
|
||||
|
@ -385,7 +385,7 @@ def test_perfcompare_results_multiple_runs(
|
|||
create_perf_datum(index, job, test_perfcomp_push_2, sig2, sig2_val)
|
||||
|
||||
sig3 = create_signature(
|
||||
signature_hash=(20 * 't3'),
|
||||
signature_hash=(20 * "t3"),
|
||||
extra_options=extra_options,
|
||||
platform=test_macosx_platform,
|
||||
measurement_unit=measurement_unit,
|
||||
|
@ -399,7 +399,7 @@ def test_perfcompare_results_multiple_runs(
|
|||
create_perf_datum(index, job, test_perfcomp_push, sig3, sig3_val)
|
||||
|
||||
sig4 = create_signature(
|
||||
signature_hash=(20 * 't4'),
|
||||
signature_hash=(20 * "t4"),
|
||||
extra_options=extra_options,
|
||||
platform=test_macosx_platform,
|
||||
measurement_unit=measurement_unit,
|
||||
|
@ -418,104 +418,104 @@ def test_perfcompare_results_multiple_runs(
|
|||
|
||||
expected = [
|
||||
{
|
||||
'base_rev': test_perfcomp_push.revision,
|
||||
'new_rev': test_perfcomp_push_2.revision,
|
||||
'framework_id': sig1.framework.id,
|
||||
'platform': sig1.platform.platform,
|
||||
'suite': sig1.suite,
|
||||
'is_empty': False,
|
||||
'header_name': first_row['header_name'],
|
||||
'base_repository_name': sig1.repository.name,
|
||||
'new_repository_name': sig2.repository.name,
|
||||
'base_app': '',
|
||||
'new_app': '',
|
||||
'is_complete': first_row['is_complete'],
|
||||
'base_measurement_unit': sig1.measurement_unit,
|
||||
'new_measurement_unit': sig2.measurement_unit,
|
||||
'base_retriggerable_job_ids': [1, 2, 4],
|
||||
'new_retriggerable_job_ids': [7, 8],
|
||||
'base_runs': sig1_val,
|
||||
'new_runs': sig2_val,
|
||||
'base_avg_value': round(first_row['base_avg_value'], 2),
|
||||
'new_avg_value': round(first_row['new_avg_value'], 2),
|
||||
'base_median_value': round(first_row['base_median_value'], 2),
|
||||
'new_median_value': round(first_row['new_median_value'], 2),
|
||||
'test': sig1.test,
|
||||
'option_name': first_row['option_name'],
|
||||
'extra_options': sig1.extra_options,
|
||||
'base_stddev': round(first_row['base_stddev'], 2),
|
||||
'new_stddev': round(first_row['new_stddev'], 2),
|
||||
'base_stddev_pct': round(first_row['base_stddev_pct'], 2),
|
||||
'new_stddev_pct': round(first_row['new_stddev_pct'], 2),
|
||||
'confidence': round(first_row['confidence'], 2),
|
||||
'confidence_text': first_row['confidence_text'],
|
||||
'delta_value': round(first_row['delta_value'], 2),
|
||||
'delta_percentage': round(first_row['delta_pct'], 2),
|
||||
'magnitude': round(first_row['magnitude'], 2),
|
||||
'new_is_better': first_row['new_is_better'],
|
||||
'lower_is_better': first_row['lower_is_better'],
|
||||
'is_confident': first_row['is_confident'],
|
||||
'more_runs_are_needed': first_row['more_runs_are_needed'],
|
||||
'noise_metric': False,
|
||||
'graphs_link': f'https://treeherder.mozilla.org/perfherder/graphs?highlightedRevisions={test_perfcomp_push.revision}&'
|
||||
f'highlightedRevisions={test_perfcomp_push_2.revision}&'
|
||||
f'series={test_repository.name}%2C{sig1.signature_hash}%2C1%2C{sig1.framework.id}&timerange=1209600',
|
||||
'is_improvement': first_row['is_improvement'],
|
||||
'is_regression': first_row['is_regression'],
|
||||
'is_meaningful': first_row['is_meaningful'],
|
||||
"base_rev": test_perfcomp_push.revision,
|
||||
"new_rev": test_perfcomp_push_2.revision,
|
||||
"framework_id": sig1.framework.id,
|
||||
"platform": sig1.platform.platform,
|
||||
"suite": sig1.suite,
|
||||
"is_empty": False,
|
||||
"header_name": first_row["header_name"],
|
||||
"base_repository_name": sig1.repository.name,
|
||||
"new_repository_name": sig2.repository.name,
|
||||
"base_app": "",
|
||||
"new_app": "",
|
||||
"is_complete": first_row["is_complete"],
|
||||
"base_measurement_unit": sig1.measurement_unit,
|
||||
"new_measurement_unit": sig2.measurement_unit,
|
||||
"base_retriggerable_job_ids": [1, 2, 4],
|
||||
"new_retriggerable_job_ids": [7, 8],
|
||||
"base_runs": sig1_val,
|
||||
"new_runs": sig2_val,
|
||||
"base_avg_value": round(first_row["base_avg_value"], 2),
|
||||
"new_avg_value": round(first_row["new_avg_value"], 2),
|
||||
"base_median_value": round(first_row["base_median_value"], 2),
|
||||
"new_median_value": round(first_row["new_median_value"], 2),
|
||||
"test": sig1.test,
|
||||
"option_name": first_row["option_name"],
|
||||
"extra_options": sig1.extra_options,
|
||||
"base_stddev": round(first_row["base_stddev"], 2),
|
||||
"new_stddev": round(first_row["new_stddev"], 2),
|
||||
"base_stddev_pct": round(first_row["base_stddev_pct"], 2),
|
||||
"new_stddev_pct": round(first_row["new_stddev_pct"], 2),
|
||||
"confidence": round(first_row["confidence"], 2),
|
||||
"confidence_text": first_row["confidence_text"],
|
||||
"delta_value": round(first_row["delta_value"], 2),
|
||||
"delta_percentage": round(first_row["delta_pct"], 2),
|
||||
"magnitude": round(first_row["magnitude"], 2),
|
||||
"new_is_better": first_row["new_is_better"],
|
||||
"lower_is_better": first_row["lower_is_better"],
|
||||
"is_confident": first_row["is_confident"],
|
||||
"more_runs_are_needed": first_row["more_runs_are_needed"],
|
||||
"noise_metric": False,
|
||||
"graphs_link": f"https://treeherder.mozilla.org/perfherder/graphs?highlightedRevisions={test_perfcomp_push.revision}&"
|
||||
f"highlightedRevisions={test_perfcomp_push_2.revision}&"
|
||||
f"series={test_repository.name}%2C{sig1.signature_hash}%2C1%2C{sig1.framework.id}&timerange=1209600",
|
||||
"is_improvement": first_row["is_improvement"],
|
||||
"is_regression": first_row["is_regression"],
|
||||
"is_meaningful": first_row["is_meaningful"],
|
||||
},
|
||||
{
|
||||
'base_rev': test_perfcomp_push.revision,
|
||||
'new_rev': test_perfcomp_push_2.revision,
|
||||
'framework_id': sig3.framework.id,
|
||||
'platform': sig3.platform.platform,
|
||||
'suite': sig3.suite,
|
||||
'is_empty': False,
|
||||
'header_name': second_row['header_name'],
|
||||
'base_repository_name': sig3.repository.name,
|
||||
'new_repository_name': sig4.repository.name,
|
||||
'base_app': '',
|
||||
'new_app': '',
|
||||
'is_complete': second_row['is_complete'],
|
||||
'base_measurement_unit': sig3.measurement_unit,
|
||||
'new_measurement_unit': sig4.measurement_unit,
|
||||
'base_retriggerable_job_ids': [1, 2],
|
||||
'new_retriggerable_job_ids': [4, 7],
|
||||
'base_runs': sig3_val,
|
||||
'new_runs': sig4_val,
|
||||
'base_avg_value': round(second_row['base_avg_value'], 2),
|
||||
'new_avg_value': round(second_row['new_avg_value'], 2),
|
||||
'base_median_value': round(second_row['base_median_value'], 2),
|
||||
'new_median_value': round(second_row['new_median_value'], 2),
|
||||
'test': sig3.test,
|
||||
'option_name': second_row['option_name'],
|
||||
'extra_options': sig3.extra_options,
|
||||
'base_stddev': round(second_row['base_stddev'], 2),
|
||||
'new_stddev': round(second_row['new_stddev'], 2),
|
||||
'base_stddev_pct': round(second_row['base_stddev_pct'], 2),
|
||||
'new_stddev_pct': round(second_row['new_stddev_pct'], 2),
|
||||
'confidence': round(second_row['confidence'], 2),
|
||||
'confidence_text': second_row['confidence_text'],
|
||||
'delta_value': round(second_row['delta_value'], 2),
|
||||
'delta_percentage': round(second_row['delta_pct'], 2),
|
||||
'magnitude': round(second_row['magnitude'], 2),
|
||||
'new_is_better': second_row['new_is_better'],
|
||||
'lower_is_better': second_row['lower_is_better'],
|
||||
'is_confident': second_row['is_confident'],
|
||||
'more_runs_are_needed': second_row['more_runs_are_needed'],
|
||||
'noise_metric': False,
|
||||
'graphs_link': f'https://treeherder.mozilla.org/perfherder/graphs?highlightedRevisions={test_perfcomp_push.revision}&'
|
||||
f'highlightedRevisions={test_perfcomp_push_2.revision}&'
|
||||
f'series={test_repository.name}%2C{sig3.signature_hash}%2C1%2C{sig1.framework.id}&timerange=1209600',
|
||||
'is_improvement': second_row['is_improvement'],
|
||||
'is_regression': second_row['is_regression'],
|
||||
'is_meaningful': second_row['is_meaningful'],
|
||||
"base_rev": test_perfcomp_push.revision,
|
||||
"new_rev": test_perfcomp_push_2.revision,
|
||||
"framework_id": sig3.framework.id,
|
||||
"platform": sig3.platform.platform,
|
||||
"suite": sig3.suite,
|
||||
"is_empty": False,
|
||||
"header_name": second_row["header_name"],
|
||||
"base_repository_name": sig3.repository.name,
|
||||
"new_repository_name": sig4.repository.name,
|
||||
"base_app": "",
|
||||
"new_app": "",
|
||||
"is_complete": second_row["is_complete"],
|
||||
"base_measurement_unit": sig3.measurement_unit,
|
||||
"new_measurement_unit": sig4.measurement_unit,
|
||||
"base_retriggerable_job_ids": [1, 2],
|
||||
"new_retriggerable_job_ids": [4, 7],
|
||||
"base_runs": sig3_val,
|
||||
"new_runs": sig4_val,
|
||||
"base_avg_value": round(second_row["base_avg_value"], 2),
|
||||
"new_avg_value": round(second_row["new_avg_value"], 2),
|
||||
"base_median_value": round(second_row["base_median_value"], 2),
|
||||
"new_median_value": round(second_row["new_median_value"], 2),
|
||||
"test": sig3.test,
|
||||
"option_name": second_row["option_name"],
|
||||
"extra_options": sig3.extra_options,
|
||||
"base_stddev": round(second_row["base_stddev"], 2),
|
||||
"new_stddev": round(second_row["new_stddev"], 2),
|
||||
"base_stddev_pct": round(second_row["base_stddev_pct"], 2),
|
||||
"new_stddev_pct": round(second_row["new_stddev_pct"], 2),
|
||||
"confidence": round(second_row["confidence"], 2),
|
||||
"confidence_text": second_row["confidence_text"],
|
||||
"delta_value": round(second_row["delta_value"], 2),
|
||||
"delta_percentage": round(second_row["delta_pct"], 2),
|
||||
"magnitude": round(second_row["magnitude"], 2),
|
||||
"new_is_better": second_row["new_is_better"],
|
||||
"lower_is_better": second_row["lower_is_better"],
|
||||
"is_confident": second_row["is_confident"],
|
||||
"more_runs_are_needed": second_row["more_runs_are_needed"],
|
||||
"noise_metric": False,
|
||||
"graphs_link": f"https://treeherder.mozilla.org/perfherder/graphs?highlightedRevisions={test_perfcomp_push.revision}&"
|
||||
f"highlightedRevisions={test_perfcomp_push_2.revision}&"
|
||||
f"series={test_repository.name}%2C{sig3.signature_hash}%2C1%2C{sig1.framework.id}&timerange=1209600",
|
||||
"is_improvement": second_row["is_improvement"],
|
||||
"is_regression": second_row["is_regression"],
|
||||
"is_meaningful": second_row["is_meaningful"],
|
||||
},
|
||||
]
|
||||
|
||||
query_params = (
|
||||
'?base_repository={}&new_repository={}&base_revision={}&new_revision={}&framework={'
|
||||
'}&no_subtests=true'.format(
|
||||
"?base_repository={}&new_repository={}&base_revision={}&new_revision={}&framework={"
|
||||
"}&no_subtests=true".format(
|
||||
test_perf_signature.repository.name,
|
||||
test_perf_signature.repository.name,
|
||||
test_perfcomp_push.revision,
|
||||
|
@ -524,7 +524,7 @@ def test_perfcompare_results_multiple_runs(
|
|||
)
|
||||
)
|
||||
|
||||
response = client.get(reverse('perfcompare-results') + query_params)
|
||||
response = client.get(reverse("perfcompare-results") + query_params)
|
||||
assert response.status_code == 200
|
||||
for result in expected:
|
||||
assert result in response.json()
|
||||
|
@ -533,8 +533,8 @@ def test_perfcompare_results_multiple_runs(
|
|||
def test_revision_is_not_found(client, test_perf_signature, test_perfcomp_push):
|
||||
non_existent_revision = "nonexistentrevision"
|
||||
query_params = (
|
||||
'?base_repository={}&new_repository={}&base_revision={}&new_revision={}&framework={'
|
||||
'}&no_subtests=true'.format(
|
||||
"?base_repository={}&new_repository={}&base_revision={}&new_revision={}&framework={"
|
||||
"}&no_subtests=true".format(
|
||||
test_perf_signature.repository.name,
|
||||
test_perf_signature.repository.name,
|
||||
non_existent_revision,
|
||||
|
@ -543,15 +543,15 @@ def test_revision_is_not_found(client, test_perf_signature, test_perfcomp_push):
|
|||
)
|
||||
)
|
||||
|
||||
response = client.get(reverse('perfcompare-results') + query_params)
|
||||
response = client.get(reverse("perfcompare-results") + query_params)
|
||||
assert response.status_code == 400
|
||||
assert response.json() == "No base push with revision {} from repo {}.".format(
|
||||
non_existent_revision, test_perf_signature.repository.name
|
||||
)
|
||||
|
||||
query_params = (
|
||||
'?base_repository={}&new_repository={}&base_revision={}&new_revision={}&framework={'
|
||||
'}&no_subtests=true'.format(
|
||||
"?base_repository={}&new_repository={}&base_revision={}&new_revision={}&framework={"
|
||||
"}&no_subtests=true".format(
|
||||
test_perf_signature.repository.name,
|
||||
test_perf_signature.repository.name,
|
||||
test_perfcomp_push.revision,
|
||||
|
@ -560,7 +560,7 @@ def test_revision_is_not_found(client, test_perf_signature, test_perfcomp_push):
|
|||
)
|
||||
)
|
||||
|
||||
response = client.get(reverse('perfcompare-results') + query_params)
|
||||
response = client.get(reverse("perfcompare-results") + query_params)
|
||||
assert response.status_code == 400
|
||||
assert response.json() == "No new push with revision {} from repo {}.".format(
|
||||
non_existent_revision, test_perf_signature.repository.name
|
||||
|
@ -572,8 +572,8 @@ def test_interval_is_required_when_comparing_without_base(
|
|||
):
|
||||
non_existent_revision = "nonexistentrevision"
|
||||
query_params = (
|
||||
'?base_repository={}&new_repository={}&new_revision={}&framework={'
|
||||
'}&no_subtests=true'.format(
|
||||
"?base_repository={}&new_repository={}&new_revision={}&framework={"
|
||||
"}&no_subtests=true".format(
|
||||
test_perf_signature.repository.name,
|
||||
test_perf_signature.repository.name,
|
||||
non_existent_revision,
|
||||
|
@ -581,68 +581,68 @@ def test_interval_is_required_when_comparing_without_base(
|
|||
)
|
||||
)
|
||||
|
||||
response = client.get(reverse('perfcompare-results') + query_params)
|
||||
response = client.get(reverse("perfcompare-results") + query_params)
|
||||
assert response.status_code == 400
|
||||
assert response.json() == {'non_field_errors': ['Field required: interval.']}
|
||||
assert response.json() == {"non_field_errors": ["Field required: interval."]}
|
||||
|
||||
|
||||
def get_expected(
|
||||
base_sig, extra_options, test_option_collection, new_perf_data_values, base_perf_data_values
|
||||
):
|
||||
response = {'option_name': test_option_collection.get(base_sig.option_collection_id, '')}
|
||||
response = {"option_name": test_option_collection.get(base_sig.option_collection_id, "")}
|
||||
test_suite = perfcompare_utils.get_test_suite(base_sig.suite, base_sig.test)
|
||||
response['header_name'] = perfcompare_utils.get_header_name(
|
||||
extra_options, response['option_name'], test_suite
|
||||
response["header_name"] = perfcompare_utils.get_header_name(
|
||||
extra_options, response["option_name"], test_suite
|
||||
)
|
||||
response['base_avg_value'] = perfcompare_utils.get_avg(
|
||||
base_perf_data_values, response['header_name']
|
||||
response["base_avg_value"] = perfcompare_utils.get_avg(
|
||||
base_perf_data_values, response["header_name"]
|
||||
)
|
||||
response['new_avg_value'] = perfcompare_utils.get_avg(
|
||||
new_perf_data_values, response['header_name']
|
||||
response["new_avg_value"] = perfcompare_utils.get_avg(
|
||||
new_perf_data_values, response["header_name"]
|
||||
)
|
||||
response['base_median_value'] = perfcompare_utils.get_median(base_perf_data_values)
|
||||
response['new_median_value'] = perfcompare_utils.get_median(new_perf_data_values)
|
||||
response['delta_value'] = perfcompare_utils.get_delta_value(
|
||||
response['new_avg_value'], response.get('base_avg_value')
|
||||
response["base_median_value"] = perfcompare_utils.get_median(base_perf_data_values)
|
||||
response["new_median_value"] = perfcompare_utils.get_median(new_perf_data_values)
|
||||
response["delta_value"] = perfcompare_utils.get_delta_value(
|
||||
response["new_avg_value"], response.get("base_avg_value")
|
||||
)
|
||||
response['delta_pct'] = perfcompare_utils.get_delta_percentage(
|
||||
response['delta_value'], response['base_avg_value']
|
||||
response["delta_pct"] = perfcompare_utils.get_delta_percentage(
|
||||
response["delta_value"], response["base_avg_value"]
|
||||
)
|
||||
response['base_stddev'] = perfcompare_utils.get_stddev(
|
||||
base_perf_data_values, response['header_name']
|
||||
response["base_stddev"] = perfcompare_utils.get_stddev(
|
||||
base_perf_data_values, response["header_name"]
|
||||
)
|
||||
response['new_stddev'] = perfcompare_utils.get_stddev(
|
||||
new_perf_data_values, response['header_name']
|
||||
response["new_stddev"] = perfcompare_utils.get_stddev(
|
||||
new_perf_data_values, response["header_name"]
|
||||
)
|
||||
response['base_stddev_pct'] = perfcompare_utils.get_stddev_pct(
|
||||
response['base_avg_value'], response['base_stddev']
|
||||
response["base_stddev_pct"] = perfcompare_utils.get_stddev_pct(
|
||||
response["base_avg_value"], response["base_stddev"]
|
||||
)
|
||||
response['new_stddev_pct'] = perfcompare_utils.get_stddev_pct(
|
||||
response['new_avg_value'], response['new_stddev']
|
||||
response["new_stddev_pct"] = perfcompare_utils.get_stddev_pct(
|
||||
response["new_avg_value"], response["new_stddev"]
|
||||
)
|
||||
response['magnitude'] = perfcompare_utils.get_magnitude(response['delta_pct'])
|
||||
response['new_is_better'] = perfcompare_utils.is_new_better(
|
||||
response['delta_value'], base_sig.lower_is_better
|
||||
response["magnitude"] = perfcompare_utils.get_magnitude(response["delta_pct"])
|
||||
response["new_is_better"] = perfcompare_utils.is_new_better(
|
||||
response["delta_value"], base_sig.lower_is_better
|
||||
)
|
||||
response['lower_is_better'] = base_sig.lower_is_better
|
||||
response['confidence'] = perfcompare_utils.get_abs_ttest_value(
|
||||
response["lower_is_better"] = base_sig.lower_is_better
|
||||
response["confidence"] = perfcompare_utils.get_abs_ttest_value(
|
||||
base_perf_data_values, new_perf_data_values
|
||||
)
|
||||
response['is_confident'] = perfcompare_utils.is_confident(
|
||||
len(base_perf_data_values), len(new_perf_data_values), response['confidence']
|
||||
response["is_confident"] = perfcompare_utils.is_confident(
|
||||
len(base_perf_data_values), len(new_perf_data_values), response["confidence"]
|
||||
)
|
||||
response['confidence_text'] = perfcompare_utils.get_confidence_text(response['confidence'])
|
||||
response['is_complete'] = True
|
||||
response['more_runs_are_needed'] = perfcompare_utils.more_runs_are_needed(
|
||||
response['is_complete'], response['is_confident'], len(base_perf_data_values)
|
||||
response["confidence_text"] = perfcompare_utils.get_confidence_text(response["confidence"])
|
||||
response["is_complete"] = True
|
||||
response["more_runs_are_needed"] = perfcompare_utils.more_runs_are_needed(
|
||||
response["is_complete"], response["is_confident"], len(base_perf_data_values)
|
||||
)
|
||||
class_name = perfcompare_utils.get_class_name(
|
||||
response['new_is_better'],
|
||||
response['base_avg_value'],
|
||||
response['new_avg_value'],
|
||||
response['confidence'],
|
||||
response["new_is_better"],
|
||||
response["base_avg_value"],
|
||||
response["new_avg_value"],
|
||||
response["confidence"],
|
||||
)
|
||||
response['is_improvement'] = class_name == 'success'
|
||||
response['is_regression'] = class_name == 'danger'
|
||||
response['is_meaningful'] = class_name == ''
|
||||
response["is_improvement"] = class_name == "success"
|
||||
response["is_regression"] = class_name == "danger"
|
||||
response["is_meaningful"] = class_name == ""
|
||||
return response
|
||||
|
|
|
@ -17,44 +17,44 @@ def test_alerts_get(
|
|||
test_taskcluster_metadata,
|
||||
test_taskcluster_metadata_2,
|
||||
):
|
||||
resp = client.get(reverse('performance-alerts-list'))
|
||||
resp = client.get(reverse("performance-alerts-list"))
|
||||
assert resp.status_code == 200
|
||||
|
||||
# should just have the one alert
|
||||
assert resp.json()['next'] is None
|
||||
assert resp.json()['previous'] is None
|
||||
assert len(resp.json()['results']) == 1
|
||||
assert set(resp.json()['results'][0].keys()) == {
|
||||
'amount_pct',
|
||||
'amount_abs',
|
||||
'id',
|
||||
'is_regression',
|
||||
'starred',
|
||||
'manually_created',
|
||||
'new_value',
|
||||
'prev_value',
|
||||
'related_summary_id',
|
||||
'series_signature',
|
||||
'taskcluster_metadata',
|
||||
'prev_taskcluster_metadata',
|
||||
'profile_url',
|
||||
'prev_profile_url',
|
||||
'summary_id',
|
||||
'status',
|
||||
't_value',
|
||||
'classifier',
|
||||
'classifier_email',
|
||||
'backfill_record',
|
||||
'noise_profile',
|
||||
assert resp.json()["next"] is None
|
||||
assert resp.json()["previous"] is None
|
||||
assert len(resp.json()["results"]) == 1
|
||||
assert set(resp.json()["results"][0].keys()) == {
|
||||
"amount_pct",
|
||||
"amount_abs",
|
||||
"id",
|
||||
"is_regression",
|
||||
"starred",
|
||||
"manually_created",
|
||||
"new_value",
|
||||
"prev_value",
|
||||
"related_summary_id",
|
||||
"series_signature",
|
||||
"taskcluster_metadata",
|
||||
"prev_taskcluster_metadata",
|
||||
"profile_url",
|
||||
"prev_profile_url",
|
||||
"summary_id",
|
||||
"status",
|
||||
"t_value",
|
||||
"classifier",
|
||||
"classifier_email",
|
||||
"backfill_record",
|
||||
"noise_profile",
|
||||
}
|
||||
assert resp.json()['results'][0]['related_summary_id'] is None
|
||||
assert set(resp.json()['results'][0]['taskcluster_metadata'].keys()) == {
|
||||
'task_id',
|
||||
'retry_id',
|
||||
assert resp.json()["results"][0]["related_summary_id"] is None
|
||||
assert set(resp.json()["results"][0]["taskcluster_metadata"].keys()) == {
|
||||
"task_id",
|
||||
"retry_id",
|
||||
}
|
||||
assert set(resp.json()['results'][0]['prev_taskcluster_metadata'].keys()) == {
|
||||
'task_id',
|
||||
'retry_id',
|
||||
assert set(resp.json()["results"][0]["prev_taskcluster_metadata"].keys()) == {
|
||||
"task_id",
|
||||
"retry_id",
|
||||
}
|
||||
|
||||
|
||||
|
@ -71,14 +71,14 @@ def test_alerts_put(
|
|||
test_user,
|
||||
test_sheriff,
|
||||
):
|
||||
resp = client.get(reverse('performance-alerts-list'))
|
||||
resp = client.get(reverse("performance-alerts-list"))
|
||||
assert resp.status_code == 200
|
||||
assert resp.json()['results'][0]['related_summary_id'] is None
|
||||
assert resp.json()["results"][0]["related_summary_id"] is None
|
||||
|
||||
# verify that we fail if not authenticated
|
||||
resp = client.put(
|
||||
reverse('performance-alerts-list') + '1/',
|
||||
{'related_summary_id': 2, 'status': PerformanceAlert.DOWNSTREAM},
|
||||
reverse("performance-alerts-list") + "1/",
|
||||
{"related_summary_id": 2, "status": PerformanceAlert.DOWNSTREAM},
|
||||
)
|
||||
assert resp.status_code == 403
|
||||
assert PerformanceAlert.objects.get(id=1).related_summary_id is None
|
||||
|
@ -86,8 +86,8 @@ def test_alerts_put(
|
|||
# verify that we fail if authenticated, but not staff
|
||||
client.force_authenticate(user=test_user)
|
||||
resp = client.put(
|
||||
reverse('performance-alerts-list') + '1/',
|
||||
{'related_summary_id': 2, 'status': PerformanceAlert.DOWNSTREAM},
|
||||
reverse("performance-alerts-list") + "1/",
|
||||
{"related_summary_id": 2, "status": PerformanceAlert.DOWNSTREAM},
|
||||
)
|
||||
assert resp.status_code == 403
|
||||
assert PerformanceAlert.objects.get(id=1).related_summary_id is None
|
||||
|
@ -95,8 +95,8 @@ def test_alerts_put(
|
|||
# verify that we succeed if authenticated + staff
|
||||
client.force_authenticate(user=test_sheriff)
|
||||
resp = client.put(
|
||||
reverse('performance-alerts-list') + '1/',
|
||||
{'related_summary_id': 2, 'status': PerformanceAlert.DOWNSTREAM},
|
||||
reverse("performance-alerts-list") + "1/",
|
||||
{"related_summary_id": 2, "status": PerformanceAlert.DOWNSTREAM},
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert PerformanceAlert.objects.get(id=1).related_summary_id == 2
|
||||
|
@ -104,8 +104,8 @@ def test_alerts_put(
|
|||
|
||||
# verify that we can unset it too
|
||||
resp = client.put(
|
||||
reverse('performance-alerts-list') + '1/',
|
||||
{'related_summary_id': None, 'status': PerformanceAlert.UNTRIAGED},
|
||||
reverse("performance-alerts-list") + "1/",
|
||||
{"related_summary_id": None, "status": PerformanceAlert.UNTRIAGED},
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert PerformanceAlert.objects.get(id=1).related_summary_id is None
|
||||
|
@ -136,8 +136,8 @@ def test_reassign_different_repository(
|
|||
# mark downstream of summary with different repository,
|
||||
# should succeed
|
||||
resp = authorized_sheriff_client.put(
|
||||
reverse('performance-alerts-list') + '1/',
|
||||
{'related_summary_id': test_perf_alert_summary_2.id, 'status': PerformanceAlert.DOWNSTREAM},
|
||||
reverse("performance-alerts-list") + "1/",
|
||||
{"related_summary_id": test_perf_alert_summary_2.id, "status": PerformanceAlert.DOWNSTREAM},
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
test_perf_alert.refresh_from_db()
|
||||
|
@ -155,7 +155,7 @@ def test_reassign_different_framework(
|
|||
):
|
||||
# verify that we can't reassign to another performance alert summary
|
||||
# with a different framework
|
||||
framework_2 = PerformanceFramework.objects.create(name='test_talos_2', enabled=True)
|
||||
framework_2 = PerformanceFramework.objects.create(name="test_talos_2", enabled=True)
|
||||
test_perf_alert_summary_2.framework = framework_2
|
||||
test_perf_alert_summary_2.save()
|
||||
|
||||
|
@ -168,8 +168,8 @@ def assert_incompatible_alert_assignment_fails(
|
|||
authorized_sheriff_client, perf_alert, incompatible_summary
|
||||
):
|
||||
resp = authorized_sheriff_client.put(
|
||||
reverse('performance-alerts-list') + '1/',
|
||||
{'related_summary_id': incompatible_summary.id, 'status': PerformanceAlert.REASSIGNED},
|
||||
reverse("performance-alerts-list") + "1/",
|
||||
{"related_summary_id": incompatible_summary.id, "status": PerformanceAlert.REASSIGNED},
|
||||
)
|
||||
assert resp.status_code == 400
|
||||
perf_alert.refresh_from_db()
|
||||
|
@ -181,25 +181,25 @@ def assert_incompatible_alert_assignment_fails(
|
|||
def alert_create_post_blob(test_perf_alert_summary, test_perf_signature):
|
||||
# this blob should be sufficient to create a new alert (assuming
|
||||
# the user of this API is authorized to do so!)
|
||||
return {'summary_id': test_perf_alert_summary.id, 'signature_id': test_perf_signature.id}
|
||||
return {"summary_id": test_perf_alert_summary.id, "signature_id": test_perf_signature.id}
|
||||
|
||||
|
||||
def test_alerts_post(
|
||||
client, alert_create_post_blob, test_user, test_sheriff, generate_enough_perf_datum
|
||||
):
|
||||
# verify that we fail if not authenticated
|
||||
resp = client.post(reverse('performance-alerts-list'), alert_create_post_blob)
|
||||
resp = client.post(reverse("performance-alerts-list"), alert_create_post_blob)
|
||||
assert resp.status_code == 403
|
||||
|
||||
# verify that we fail if authenticated, but not staff
|
||||
client.force_authenticate(user=test_user)
|
||||
resp = client.post(reverse('performance-alerts-list'), alert_create_post_blob)
|
||||
resp = client.post(reverse("performance-alerts-list"), alert_create_post_blob)
|
||||
assert resp.status_code == 403
|
||||
assert PerformanceAlert.objects.count() == 0
|
||||
|
||||
# verify that we succeed if staff + authenticated
|
||||
client.force_authenticate(user=test_sheriff)
|
||||
resp = client.post(reverse('performance-alerts-list'), alert_create_post_blob)
|
||||
resp = client.post(reverse("performance-alerts-list"), alert_create_post_blob)
|
||||
assert resp.status_code == 200
|
||||
assert PerformanceAlert.objects.count() == 1
|
||||
|
||||
|
@ -222,11 +222,11 @@ def test_alerts_post_insufficient_data(
|
|||
alert_create_post_blob,
|
||||
):
|
||||
# we should not succeed if insufficient data is passed through
|
||||
for removed_key in ['summary_id', 'signature_id']:
|
||||
for removed_key in ["summary_id", "signature_id"]:
|
||||
new_post_blob = copy.copy(alert_create_post_blob)
|
||||
del new_post_blob[removed_key]
|
||||
|
||||
resp = authorized_sheriff_client.post(reverse('performance-alerts-list'), new_post_blob)
|
||||
resp = authorized_sheriff_client.post(reverse("performance-alerts-list"), new_post_blob)
|
||||
assert resp.status_code == 400
|
||||
assert PerformanceAlert.objects.count() == 0
|
||||
|
||||
|
@ -239,7 +239,7 @@ def test_nudge_alert_towards_conflicting_one(
|
|||
old_conflicting_update = test_conflicting_perf_alert.last_updated
|
||||
|
||||
resp = authorized_sheriff_client.put(
|
||||
reverse('performance-alerts-list') + '1/', {'prev_push_id': 2, 'push_id': 3}
|
||||
reverse("performance-alerts-list") + "1/", {"prev_push_id": 2, "push_id": 3}
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
test_conflicting_perf_alert.refresh_from_db()
|
||||
|
@ -257,7 +257,7 @@ def test_nudge_alert_towards_conflicting_one(
|
|||
@pytest.mark.xfail
|
||||
@pytest.mark.parametrize(
|
||||
"perf_datum_id, towards_push_ids",
|
||||
[(3, {'prev_push_id': 1, 'push_id': 2}), (2, {'prev_push_id': 2, 'push_id': 3})],
|
||||
[(3, {"prev_push_id": 1, "push_id": 2}), (2, {"prev_push_id": 2, "push_id": 3})],
|
||||
)
|
||||
def test_nudge_alert_to_changeset_without_alert_summary(
|
||||
authorized_sheriff_client, test_perf_alert, test_perf_data, perf_datum_id, towards_push_ids
|
||||
|
@ -267,7 +267,7 @@ def test_nudge_alert_to_changeset_without_alert_summary(
|
|||
old_alert_summary_id = test_perf_alert.summary.id
|
||||
|
||||
resp = authorized_sheriff_client.put(
|
||||
reverse('performance-alerts-list') + '1/', towards_push_ids
|
||||
reverse("performance-alerts-list") + "1/", towards_push_ids
|
||||
)
|
||||
|
||||
assert resp.status_code == 200
|
||||
|
@ -276,8 +276,8 @@ def test_nudge_alert_to_changeset_without_alert_summary(
|
|||
new_alert_summary = test_perf_alert.summary
|
||||
|
||||
assert new_alert_summary.id != old_alert_summary_id
|
||||
assert 'alert_summary_id' in resp.json()
|
||||
assert resp.json()['alert_summary_id'] == new_alert_summary.id
|
||||
assert "alert_summary_id" in resp.json()
|
||||
assert resp.json()["alert_summary_id"] == new_alert_summary.id
|
||||
|
||||
# new summary has correct push ids
|
||||
assert new_alert_summary.prev_push_id == towards_push_ids["prev_push_id"]
|
||||
|
@ -291,7 +291,7 @@ def test_nudge_alert_to_changeset_without_alert_summary(
|
|||
@pytest.mark.xfail
|
||||
@pytest.mark.parametrize(
|
||||
"perf_datum_ids, alert_id_to_move, towards_push_ids",
|
||||
[((2, 3), 2, {'push_id': 2, 'prev_push_id': 1}), (None, 1, {'push_id': 3, 'prev_push_id': 2})],
|
||||
[((2, 3), 2, {"push_id": 2, "prev_push_id": 1}), (None, 1, {"push_id": 3, "prev_push_id": 2})],
|
||||
)
|
||||
def test_nudge_alert_to_changeset_with_an_alert_summary(
|
||||
authorized_sheriff_client,
|
||||
|
@ -325,7 +325,7 @@ def test_nudge_alert_to_changeset_with_an_alert_summary(
|
|||
assert target_summary.first_triaged is None
|
||||
|
||||
resp = authorized_sheriff_client.put(
|
||||
reverse('performance-alerts-list') + str(alert_id_to_move) + '/', towards_push_ids
|
||||
reverse("performance-alerts-list") + str(alert_id_to_move) + "/", towards_push_ids
|
||||
)
|
||||
|
||||
assert resp.status_code == 200
|
||||
|
@ -335,8 +335,8 @@ def test_nudge_alert_to_changeset_with_an_alert_summary(
|
|||
target_summary.refresh_from_db()
|
||||
|
||||
assert alert_to_move.summary.id != old_alert_summary_id
|
||||
assert 'alert_summary_id' in resp.json()
|
||||
assert resp.json()['alert_summary_id'] == alert_to_move.summary.id
|
||||
assert "alert_summary_id" in resp.json()
|
||||
assert resp.json()["alert_summary_id"] == alert_to_move.summary.id
|
||||
|
||||
# old alert summary gets deleted
|
||||
assert not PerformanceAlertSummary.objects.filter(pk=old_alert_summary_id).exists()
|
||||
|
@ -377,7 +377,7 @@ def test_nudge_left_alert_from_alert_summary_with_more_alerts(
|
|||
test_perf_alert.save()
|
||||
|
||||
resp = authorized_sheriff_client.put(
|
||||
reverse('performance-alerts-list') + '2/', {'push_id': 2, 'prev_push_id': 1}
|
||||
reverse("performance-alerts-list") + "2/", {"push_id": 2, "prev_push_id": 1}
|
||||
)
|
||||
|
||||
assert resp.status_code == 200
|
||||
|
@ -387,8 +387,8 @@ def test_nudge_left_alert_from_alert_summary_with_more_alerts(
|
|||
test_perf_alert_summary_2.refresh_from_db()
|
||||
|
||||
assert test_perf_alert_2.summary.id != old_alert_summary_id
|
||||
assert 'alert_summary_id' in resp.json()
|
||||
assert resp.json()['alert_summary_id'] == test_perf_alert_2.summary.id
|
||||
assert "alert_summary_id" in resp.json()
|
||||
assert resp.json()["alert_summary_id"] == test_perf_alert_2.summary.id
|
||||
|
||||
# old alert summary still there
|
||||
old_alert_summary = PerformanceAlertSummary.objects.filter(pk=old_alert_summary_id).first()
|
||||
|
@ -425,7 +425,7 @@ def test_nudge_right_alert_from_alert_summary_with_more_alerts(
|
|||
test_perf_alert_2.save()
|
||||
|
||||
resp = authorized_sheriff_client.put(
|
||||
reverse('performance-alerts-list') + '1/', {'push_id': 3, 'prev_push_id': 2}
|
||||
reverse("performance-alerts-list") + "1/", {"push_id": 3, "prev_push_id": 2}
|
||||
)
|
||||
|
||||
assert resp.status_code == 200
|
||||
|
@ -436,8 +436,8 @@ def test_nudge_right_alert_from_alert_summary_with_more_alerts(
|
|||
test_perf_alert_summary_2.refresh_from_db()
|
||||
|
||||
assert test_perf_alert.summary.id != old_alert_summary_id
|
||||
assert 'alert_summary_id' in resp.json()
|
||||
assert resp.json()['alert_summary_id'] == test_perf_alert.summary.id
|
||||
assert "alert_summary_id" in resp.json()
|
||||
assert resp.json()["alert_summary_id"] == test_perf_alert.summary.id
|
||||
|
||||
# old alert summary still there
|
||||
assert PerformanceAlertSummary.objects.filter(pk=old_alert_summary_id).count() == 1
|
||||
|
@ -458,7 +458,7 @@ def test_nudge_raises_exception_when_no_perf_data(
|
|||
initial_alert_count = PerformanceAlert.objects.all().count()
|
||||
|
||||
resp = authorized_sheriff_client.put(
|
||||
reverse('performance-alerts-list') + '1/', {'push_id': 3, 'prev_push_id': 2}
|
||||
reverse("performance-alerts-list") + "1/", {"push_id": 3, "prev_push_id": 2}
|
||||
)
|
||||
|
||||
assert resp.status_code == 400
|
||||
|
@ -471,7 +471,7 @@ def test_nudge_recalculates_alert_properties(
|
|||
authorized_sheriff_client, test_perf_alert, test_perf_alert_summary, test_perf_data
|
||||
):
|
||||
def _get_alert_properties(perf_alert):
|
||||
prop_names = ['amount_pct', 'amount_abs', 'prev_value', 'new_value', 't_value']
|
||||
prop_names = ["amount_pct", "amount_abs", "prev_value", "new_value", "t_value"]
|
||||
return [getattr(perf_alert, prop_name) for prop_name in prop_names]
|
||||
|
||||
# let's update the performance data
|
||||
|
@ -481,7 +481,7 @@ def test_nudge_recalculates_alert_properties(
|
|||
perf_datum.save()
|
||||
|
||||
resp = authorized_sheriff_client.put(
|
||||
reverse('performance-alerts-list') + '1/', {'push_id': 3, 'prev_push_id': 2}
|
||||
reverse("performance-alerts-list") + "1/", {"push_id": 3, "prev_push_id": 2}
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
test_perf_alert.refresh_from_db()
|
||||
|
@ -531,11 +531,11 @@ def test_timestamps_on_manual_created_alert_via_their_endpoints(
|
|||
# created <= last_updated, created <= first_triaged
|
||||
# BUT manually_created is True
|
||||
resp = authorized_sheriff_client.post(
|
||||
reverse('performance-alerts-list'), alert_create_post_blob
|
||||
reverse("performance-alerts-list"), alert_create_post_blob
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
|
||||
manual_alert_id = resp.json()['alert_id']
|
||||
manual_alert_id = resp.json()["alert_id"]
|
||||
manual_alert = PerformanceAlert.objects.get(pk=manual_alert_id)
|
||||
assert manual_alert.manually_created is True
|
||||
assert manual_alert.summary.first_triaged is not None
|
||||
|
@ -560,7 +560,7 @@ def test_alert_timestamps_via_endpoint(
|
|||
old_last_updated = test_perf_alert.last_updated
|
||||
|
||||
resp = authorized_sheriff_client.put(
|
||||
reverse('performance-alerts-list') + '1/', {'starred': True}
|
||||
reverse("performance-alerts-list") + "1/", {"starred": True}
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
test_perf_alert.refresh_from_db()
|
||||
|
@ -577,7 +577,7 @@ def test_alert_timestamps_via_endpoint(
|
|||
# keeps first_triaged the same
|
||||
authorized_sheriff_client.force_authenticate(user=test_sheriff)
|
||||
resp = authorized_sheriff_client.put(
|
||||
reverse('performance-alerts-list') + '1/', {'status': PerformanceAlert.ACKNOWLEDGED}
|
||||
reverse("performance-alerts-list") + "1/", {"status": PerformanceAlert.ACKNOWLEDGED}
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
test_perf_alert.refresh_from_db()
|
||||
|
@ -586,7 +586,7 @@ def test_alert_timestamps_via_endpoint(
|
|||
assert test_perf_alert.last_updated > old_last_updated
|
||||
|
||||
|
||||
@pytest.mark.parametrize('relation', [PerformanceAlert.DOWNSTREAM, PerformanceAlert.REASSIGNED])
|
||||
@pytest.mark.parametrize("relation", [PerformanceAlert.DOWNSTREAM, PerformanceAlert.REASSIGNED])
|
||||
def test_related_alerts_timestamps_via_endpoint(
|
||||
authorized_sheriff_client,
|
||||
test_sheriff,
|
||||
|
@ -609,8 +609,8 @@ def test_related_alerts_timestamps_via_endpoint(
|
|||
old_summary_last_updated_2 = test_perf_alert_summary_2.last_updated
|
||||
|
||||
resp = authorized_sheriff_client.put(
|
||||
reverse('performance-alerts-list') + '1/',
|
||||
{'status': relation, 'related_summary_id': test_perf_alert_summary_2.id},
|
||||
reverse("performance-alerts-list") + "1/",
|
||||
{"status": relation, "related_summary_id": test_perf_alert_summary_2.id},
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
test_perf_alert.refresh_from_db()
|
||||
|
@ -673,4 +673,4 @@ def dump_vars(alert_summaries, perf_data, alerts=None):
|
|||
for alert in alerts:
|
||||
dump(alert)
|
||||
for perf_datum in perf_data:
|
||||
pprint('PerfData(id={0.push_id}, push_timestamp={0.push_timestamp})'.format(perf_datum))
|
||||
pprint("PerfData(id={0.push_id}, push_timestamp={0.push_timestamp})".format(perf_datum))
|
||||
|
|
|
@ -31,8 +31,8 @@ def test_perf_alert_summary_onhold(test_repository_onhold, test_perf_framework):
|
|||
for i in range(2):
|
||||
Push.objects.create(
|
||||
repository=test_repository_onhold,
|
||||
revision='1234abcd{}'.format(i),
|
||||
author='foo@bar.com',
|
||||
revision="1234abcd{}".format(i),
|
||||
author="foo@bar.com",
|
||||
time=datetime.now(),
|
||||
)
|
||||
|
||||
|
@ -63,69 +63,69 @@ def test_alert_summaries_get(
|
|||
test_taskcluster_metadata_2,
|
||||
):
|
||||
# verify that we get the performance summary + alert on GET
|
||||
resp = client.get(reverse('performance-alert-summaries-list'))
|
||||
resp = client.get(reverse("performance-alert-summaries-list"))
|
||||
assert resp.status_code == 200
|
||||
|
||||
# should just have the one alert summary (with one alert)
|
||||
assert resp.json()['next'] is None
|
||||
assert resp.json()['previous'] is None
|
||||
assert len(resp.json()['results']) == 1
|
||||
assert set(resp.json()['results'][0].keys()) == {
|
||||
'alerts',
|
||||
'bug_number',
|
||||
'bug_updated',
|
||||
'bug_due_date',
|
||||
'issue_tracker',
|
||||
'notes',
|
||||
'assignee_username',
|
||||
'assignee_email',
|
||||
'framework',
|
||||
'id',
|
||||
'created',
|
||||
'first_triaged',
|
||||
'triage_due_date',
|
||||
'prev_push_id',
|
||||
'related_alerts',
|
||||
'repository',
|
||||
'push_id',
|
||||
'status',
|
||||
'revision',
|
||||
'push_timestamp',
|
||||
'prev_push_revision',
|
||||
'performance_tags',
|
||||
assert resp.json()["next"] is None
|
||||
assert resp.json()["previous"] is None
|
||||
assert len(resp.json()["results"]) == 1
|
||||
assert set(resp.json()["results"][0].keys()) == {
|
||||
"alerts",
|
||||
"bug_number",
|
||||
"bug_updated",
|
||||
"bug_due_date",
|
||||
"issue_tracker",
|
||||
"notes",
|
||||
"assignee_username",
|
||||
"assignee_email",
|
||||
"framework",
|
||||
"id",
|
||||
"created",
|
||||
"first_triaged",
|
||||
"triage_due_date",
|
||||
"prev_push_id",
|
||||
"related_alerts",
|
||||
"repository",
|
||||
"push_id",
|
||||
"status",
|
||||
"revision",
|
||||
"push_timestamp",
|
||||
"prev_push_revision",
|
||||
"performance_tags",
|
||||
}
|
||||
assert len(resp.json()['results'][0]['alerts']) == 1
|
||||
assert set(resp.json()['results'][0]['alerts'][0].keys()) == {
|
||||
'id',
|
||||
'status',
|
||||
'series_signature',
|
||||
'taskcluster_metadata',
|
||||
'prev_taskcluster_metadata',
|
||||
'profile_url',
|
||||
'prev_profile_url',
|
||||
'is_regression',
|
||||
'starred',
|
||||
'manually_created',
|
||||
'prev_value',
|
||||
'new_value',
|
||||
't_value',
|
||||
'amount_abs',
|
||||
'amount_pct',
|
||||
'summary_id',
|
||||
'related_summary_id',
|
||||
'classifier',
|
||||
'classifier_email',
|
||||
'backfill_record',
|
||||
'noise_profile',
|
||||
assert len(resp.json()["results"][0]["alerts"]) == 1
|
||||
assert set(resp.json()["results"][0]["alerts"][0].keys()) == {
|
||||
"id",
|
||||
"status",
|
||||
"series_signature",
|
||||
"taskcluster_metadata",
|
||||
"prev_taskcluster_metadata",
|
||||
"profile_url",
|
||||
"prev_profile_url",
|
||||
"is_regression",
|
||||
"starred",
|
||||
"manually_created",
|
||||
"prev_value",
|
||||
"new_value",
|
||||
"t_value",
|
||||
"amount_abs",
|
||||
"amount_pct",
|
||||
"summary_id",
|
||||
"related_summary_id",
|
||||
"classifier",
|
||||
"classifier_email",
|
||||
"backfill_record",
|
||||
"noise_profile",
|
||||
}
|
||||
assert resp.json()['results'][0]['related_alerts'] == []
|
||||
assert set(resp.json()['results'][0]['alerts'][0]['taskcluster_metadata'].keys()) == {
|
||||
'task_id',
|
||||
'retry_id',
|
||||
assert resp.json()["results"][0]["related_alerts"] == []
|
||||
assert set(resp.json()["results"][0]["alerts"][0]["taskcluster_metadata"].keys()) == {
|
||||
"task_id",
|
||||
"retry_id",
|
||||
}
|
||||
assert set(resp.json()['results'][0]['alerts'][0]['prev_taskcluster_metadata'].keys()) == {
|
||||
'task_id',
|
||||
'retry_id',
|
||||
assert set(resp.json()["results"][0]["alerts"][0]["prev_taskcluster_metadata"].keys()) == {
|
||||
"task_id",
|
||||
"retry_id",
|
||||
}
|
||||
|
||||
|
||||
|
@ -142,69 +142,69 @@ def test_alert_summaries_get_onhold(
|
|||
test_repository_onhold,
|
||||
):
|
||||
# verify that we get the performance summary + alert on GET
|
||||
resp = client.get(reverse('performance-alert-summaries-list'))
|
||||
resp = client.get(reverse("performance-alert-summaries-list"))
|
||||
assert resp.status_code == 200
|
||||
|
||||
# should just have the one alert summary (with one alert)
|
||||
assert resp.json()['next'] is None
|
||||
assert resp.json()['previous'] is None
|
||||
assert len(resp.json()['results']) == 1
|
||||
assert set(resp.json()['results'][0].keys()) == {
|
||||
'alerts',
|
||||
'bug_number',
|
||||
'bug_updated',
|
||||
'bug_due_date',
|
||||
'issue_tracker',
|
||||
'notes',
|
||||
'assignee_username',
|
||||
'assignee_email',
|
||||
'framework',
|
||||
'id',
|
||||
'created',
|
||||
'first_triaged',
|
||||
'triage_due_date',
|
||||
'prev_push_id',
|
||||
'related_alerts',
|
||||
'repository',
|
||||
'push_id',
|
||||
'status',
|
||||
'revision',
|
||||
'push_timestamp',
|
||||
'prev_push_revision',
|
||||
'performance_tags',
|
||||
assert resp.json()["next"] is None
|
||||
assert resp.json()["previous"] is None
|
||||
assert len(resp.json()["results"]) == 1
|
||||
assert set(resp.json()["results"][0].keys()) == {
|
||||
"alerts",
|
||||
"bug_number",
|
||||
"bug_updated",
|
||||
"bug_due_date",
|
||||
"issue_tracker",
|
||||
"notes",
|
||||
"assignee_username",
|
||||
"assignee_email",
|
||||
"framework",
|
||||
"id",
|
||||
"created",
|
||||
"first_triaged",
|
||||
"triage_due_date",
|
||||
"prev_push_id",
|
||||
"related_alerts",
|
||||
"repository",
|
||||
"push_id",
|
||||
"status",
|
||||
"revision",
|
||||
"push_timestamp",
|
||||
"prev_push_revision",
|
||||
"performance_tags",
|
||||
}
|
||||
assert len(resp.json()['results'][0]['alerts']) == 1
|
||||
assert set(resp.json()['results'][0]['alerts'][0].keys()) == {
|
||||
'id',
|
||||
'status',
|
||||
'series_signature',
|
||||
'taskcluster_metadata',
|
||||
'prev_taskcluster_metadata',
|
||||
'profile_url',
|
||||
'prev_profile_url',
|
||||
'is_regression',
|
||||
'starred',
|
||||
'manually_created',
|
||||
'prev_value',
|
||||
'new_value',
|
||||
't_value',
|
||||
'amount_abs',
|
||||
'amount_pct',
|
||||
'summary_id',
|
||||
'related_summary_id',
|
||||
'classifier',
|
||||
'classifier_email',
|
||||
'backfill_record',
|
||||
'noise_profile',
|
||||
assert len(resp.json()["results"][0]["alerts"]) == 1
|
||||
assert set(resp.json()["results"][0]["alerts"][0].keys()) == {
|
||||
"id",
|
||||
"status",
|
||||
"series_signature",
|
||||
"taskcluster_metadata",
|
||||
"prev_taskcluster_metadata",
|
||||
"profile_url",
|
||||
"prev_profile_url",
|
||||
"is_regression",
|
||||
"starred",
|
||||
"manually_created",
|
||||
"prev_value",
|
||||
"new_value",
|
||||
"t_value",
|
||||
"amount_abs",
|
||||
"amount_pct",
|
||||
"summary_id",
|
||||
"related_summary_id",
|
||||
"classifier",
|
||||
"classifier_email",
|
||||
"backfill_record",
|
||||
"noise_profile",
|
||||
}
|
||||
assert resp.json()['results'][0]['related_alerts'] == []
|
||||
assert set(resp.json()['results'][0]['alerts'][0]['taskcluster_metadata'].keys()) == {
|
||||
'task_id',
|
||||
'retry_id',
|
||||
assert resp.json()["results"][0]["related_alerts"] == []
|
||||
assert set(resp.json()["results"][0]["alerts"][0]["taskcluster_metadata"].keys()) == {
|
||||
"task_id",
|
||||
"retry_id",
|
||||
}
|
||||
assert set(resp.json()['results'][0]['alerts'][0]['prev_taskcluster_metadata'].keys()) == {
|
||||
'task_id',
|
||||
'retry_id',
|
||||
assert set(resp.json()["results"][0]["alerts"][0]["prev_taskcluster_metadata"].keys()) == {
|
||||
"task_id",
|
||||
"retry_id",
|
||||
}
|
||||
|
||||
|
||||
|
@ -212,27 +212,27 @@ def test_alert_summaries_put(
|
|||
client, test_repository, test_perf_signature, test_perf_alert_summary, test_user, test_sheriff
|
||||
):
|
||||
# verify that we fail if not authenticated
|
||||
resp = client.put(reverse('performance-alert-summaries-list') + '1/', {'status': 1})
|
||||
resp = client.put(reverse("performance-alert-summaries-list") + "1/", {"status": 1})
|
||||
assert resp.status_code == 403
|
||||
assert PerformanceAlertSummary.objects.get(id=1).status == 0
|
||||
|
||||
# verify that we fail if authenticated, but not staff
|
||||
client.force_authenticate(user=test_user)
|
||||
resp = client.put(reverse('performance-alert-summaries-list') + '1/', {'status': 1})
|
||||
resp = client.put(reverse("performance-alert-summaries-list") + "1/", {"status": 1})
|
||||
assert resp.status_code == 403
|
||||
assert PerformanceAlertSummary.objects.get(id=1).status == 0
|
||||
|
||||
# verify that we succeed if authenticated + staff
|
||||
client.force_authenticate(user=test_sheriff)
|
||||
resp = client.put(reverse('performance-alert-summaries-list') + '1/', {'status': 1})
|
||||
resp = client.put(reverse("performance-alert-summaries-list") + "1/", {"status": 1})
|
||||
assert resp.status_code == 200
|
||||
assert PerformanceAlertSummary.objects.get(id=1).status == 1
|
||||
|
||||
# verify we can set assignee
|
||||
client.force_authenticate(user=test_sheriff)
|
||||
resp = client.put(
|
||||
reverse('performance-alert-summaries-list') + '1/',
|
||||
{'assignee_username': test_user.username},
|
||||
reverse("performance-alert-summaries-list") + "1/",
|
||||
{"assignee_username": test_user.username},
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert PerformanceAlertSummary.objects.get(id=1).assignee == test_user
|
||||
|
@ -248,20 +248,20 @@ def test_auth_for_alert_summary_post(
|
|||
test_sheriff,
|
||||
):
|
||||
post_blob = {
|
||||
'repository_id': test_repository.id,
|
||||
'framework_id': test_perf_signature.framework.id,
|
||||
'prev_push_id': 1,
|
||||
'push_id': 2,
|
||||
"repository_id": test_repository.id,
|
||||
"framework_id": test_perf_signature.framework.id,
|
||||
"prev_push_id": 1,
|
||||
"push_id": 2,
|
||||
}
|
||||
|
||||
# verify that we fail if not authenticated
|
||||
resp = client.post(reverse('performance-alert-summaries-list'), post_blob)
|
||||
resp = client.post(reverse("performance-alert-summaries-list"), post_blob)
|
||||
assert resp.status_code == 403
|
||||
assert PerformanceAlertSummary.objects.count() == 0
|
||||
|
||||
# verify that we fail if authenticated, but not staff
|
||||
client.force_authenticate(user=test_user)
|
||||
resp = client.post(reverse('performance-alert-summaries-list'), post_blob)
|
||||
resp = client.post(reverse("performance-alert-summaries-list"), post_blob)
|
||||
assert resp.status_code == 403
|
||||
assert PerformanceAlertSummary.objects.count() == 0
|
||||
|
||||
|
@ -276,27 +276,27 @@ def test_alert_summary_post(
|
|||
test_sheriff,
|
||||
):
|
||||
post_blob = {
|
||||
'repository_id': test_repository.id,
|
||||
'framework_id': test_perf_signature.framework.id,
|
||||
'prev_push_id': 1,
|
||||
'push_id': 2,
|
||||
"repository_id": test_repository.id,
|
||||
"framework_id": test_perf_signature.framework.id,
|
||||
"prev_push_id": 1,
|
||||
"push_id": 2,
|
||||
}
|
||||
|
||||
# verify that we succeed if authenticated + staff
|
||||
resp = authorized_sheriff_client.post(reverse('performance-alert-summaries-list'), post_blob)
|
||||
resp = authorized_sheriff_client.post(reverse("performance-alert-summaries-list"), post_blob)
|
||||
assert resp.status_code == 200
|
||||
|
||||
assert PerformanceAlertSummary.objects.count() == 1
|
||||
alert_summary = PerformanceAlertSummary.objects.first()
|
||||
assert alert_summary.repository == test_repository
|
||||
assert alert_summary.framework == test_perf_signature.framework
|
||||
assert alert_summary.prev_push_id == post_blob['prev_push_id']
|
||||
assert alert_summary.push_id == post_blob['push_id']
|
||||
assert resp.data['alert_summary_id'] == alert_summary.id
|
||||
assert alert_summary.prev_push_id == post_blob["prev_push_id"]
|
||||
assert alert_summary.push_id == post_blob["push_id"]
|
||||
assert resp.data["alert_summary_id"] == alert_summary.id
|
||||
|
||||
# verify that we don't create a new performance alert summary if one
|
||||
# already exists (but also don't throw an error)
|
||||
resp = authorized_sheriff_client.post(reverse('performance-alert-summaries-list'), post_blob)
|
||||
resp = authorized_sheriff_client.post(reverse("performance-alert-summaries-list"), post_blob)
|
||||
assert resp.status_code == 200
|
||||
assert PerformanceAlertSummary.objects.count() == 1
|
||||
|
||||
|
@ -312,21 +312,21 @@ def test_push_range_validation_for_alert_summary_post(
|
|||
):
|
||||
identical_push = 1
|
||||
post_blob = {
|
||||
'repository_id': test_repository.id,
|
||||
'framework_id': test_perf_signature.framework.id,
|
||||
'prev_push_id': identical_push,
|
||||
'push_id': identical_push,
|
||||
"repository_id": test_repository.id,
|
||||
"framework_id": test_perf_signature.framework.id,
|
||||
"prev_push_id": identical_push,
|
||||
"push_id": identical_push,
|
||||
}
|
||||
|
||||
# verify that we succeed if authenticated + staff
|
||||
resp = authorized_sheriff_client.post(reverse('performance-alert-summaries-list'), post_blob)
|
||||
resp = authorized_sheriff_client.post(reverse("performance-alert-summaries-list"), post_blob)
|
||||
assert resp.status_code == 400
|
||||
|
||||
assert PerformanceAlertSummary.objects.count() == 0
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'modification', [{'notes': 'human created notes'}, {'bug_number': 123456, 'issue_tracker': 1}]
|
||||
"modification", [{"notes": "human created notes"}, {"bug_number": 123456, "issue_tracker": 1}]
|
||||
)
|
||||
def test_alert_summary_timestamps_via_endpoints(
|
||||
authorized_sheriff_client, test_perf_alert_summary, modification
|
||||
|
@ -335,7 +335,7 @@ def test_alert_summary_timestamps_via_endpoints(
|
|||
|
||||
# when editing notes & linking bugs
|
||||
resp = authorized_sheriff_client.put(
|
||||
reverse('performance-alert-summaries-list') + '1/', modification
|
||||
reverse("performance-alert-summaries-list") + "1/", modification
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
test_perf_alert_summary.refresh_from_db()
|
||||
|
@ -354,7 +354,7 @@ def test_bug_number_and_timestamp_on_setting_value(
|
|||
|
||||
# link a bug
|
||||
resp = authorized_sheriff_client.put(
|
||||
reverse('performance-alert-summaries-list') + '1/', {'bug_number': 123456}
|
||||
reverse("performance-alert-summaries-list") + "1/", {"bug_number": 123456}
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
test_perf_alert_summary.refresh_from_db()
|
||||
|
@ -374,7 +374,7 @@ def test_bug_number_and_timestamp_on_overriding(
|
|||
|
||||
# update the existing bug number
|
||||
resp = authorized_sheriff_client.put(
|
||||
reverse('performance-alert-summaries-list') + '1/', {'bug_number': 987654}
|
||||
reverse("performance-alert-summaries-list") + "1/", {"bug_number": 987654}
|
||||
)
|
||||
|
||||
assert resp.status_code == 200
|
||||
|
@ -393,7 +393,7 @@ def test_bug_number_and_timestamp_dont_update_from_other_modifications(
|
|||
|
||||
# link a bug
|
||||
resp = authorized_sheriff_client.put(
|
||||
reverse('performance-alert-summaries-list') + '1/', {'notes': 'human created notes'}
|
||||
reverse("performance-alert-summaries-list") + "1/", {"notes": "human created notes"}
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
test_perf_alert_summary.refresh_from_db()
|
||||
|
@ -409,8 +409,8 @@ def test_add_multiple_tags_to_alert_summary(
|
|||
assert test_perf_alert_summary.performance_tags.count() == 1
|
||||
|
||||
resp = authorized_sheriff_client.put(
|
||||
reverse('performance-alert-summaries-list') + '1/',
|
||||
{'performance_tags': [test_perf_tag.name, test_perf_tag_2.name]},
|
||||
reverse("performance-alert-summaries-list") + "1/",
|
||||
{"performance_tags": [test_perf_tag.name, test_perf_tag_2.name]},
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
test_perf_alert_summary.refresh_from_db()
|
||||
|
@ -422,7 +422,7 @@ def test_remove_a_tag_from_a_summary(authorized_sheriff_client, test_perf_alert_
|
|||
assert test_perf_alert_summary.performance_tags.count() == 1
|
||||
|
||||
resp = authorized_sheriff_client.put(
|
||||
reverse('performance-alert-summaries-list') + '1/', {'performance_tags': []}
|
||||
reverse("performance-alert-summaries-list") + "1/", {"performance_tags": []}
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
test_perf_alert_summary.refresh_from_db()
|
||||
|
@ -436,8 +436,8 @@ def test_cannot_add_unregistered_tag_to_a_summary(
|
|||
assert test_perf_alert_summary.performance_tags.count() == 1
|
||||
|
||||
resp = authorized_sheriff_client.put(
|
||||
reverse('performance-alert-summaries-list') + '1/',
|
||||
{'performance_tags': ['unregistered-tag']},
|
||||
reverse("performance-alert-summaries-list") + "1/",
|
||||
{"performance_tags": ["unregistered-tag"]},
|
||||
)
|
||||
assert resp.status_code == 400
|
||||
test_perf_alert_summary.refresh_from_db()
|
||||
|
@ -460,17 +460,17 @@ def test_timerange_with_summary_outside_range(
|
|||
test_perf_alert_summary_2.push.save()
|
||||
|
||||
resp = client.get(
|
||||
reverse('performance-alert-summaries-list'),
|
||||
reverse("performance-alert-summaries-list"),
|
||||
data={
|
||||
'framework': 1,
|
||||
'timerange': timerange_to_test,
|
||||
"framework": 1,
|
||||
"timerange": timerange_to_test,
|
||||
},
|
||||
)
|
||||
|
||||
assert resp.status_code == 200
|
||||
|
||||
retrieved_summaries = resp.json()['results']
|
||||
summary_ids = [summary['id'] for summary in retrieved_summaries]
|
||||
retrieved_summaries = resp.json()["results"]
|
||||
summary_ids = [summary["id"] for summary in retrieved_summaries]
|
||||
|
||||
assert test_perf_alert_summary_2.id in summary_ids
|
||||
assert len(summary_ids) == 1
|
||||
|
@ -491,16 +491,16 @@ def test_timerange_with_all_summaries_in_range(
|
|||
test_perf_alert_summary_2.push.save()
|
||||
|
||||
resp = client.get(
|
||||
reverse('performance-alert-summaries-list'),
|
||||
reverse("performance-alert-summaries-list"),
|
||||
data={
|
||||
'framework': 1,
|
||||
'timerange': timerange_to_test,
|
||||
"framework": 1,
|
||||
"timerange": timerange_to_test,
|
||||
},
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
|
||||
retrieved_summaries = resp.json()['results']
|
||||
summary_ids = [summary['id'] for summary in retrieved_summaries]
|
||||
retrieved_summaries = resp.json()["results"]
|
||||
summary_ids = [summary["id"] for summary in retrieved_summaries]
|
||||
|
||||
assert test_perf_alert_summary.id in summary_ids
|
||||
assert test_perf_alert_summary_2.id in summary_ids
|
||||
|
@ -511,16 +511,16 @@ def test_pagesize_is_limited_from_params(
|
|||
client, test_perf_alert_summary, test_perf_alert_summary_2
|
||||
):
|
||||
resp = client.get(
|
||||
reverse('performance-alert-summaries-list'),
|
||||
reverse("performance-alert-summaries-list"),
|
||||
data={
|
||||
'framework': 1,
|
||||
'limit': 1,
|
||||
"framework": 1,
|
||||
"limit": 1,
|
||||
},
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
|
||||
retrieved_summaries = resp.json()['results']
|
||||
summary_ids = [summary['id'] for summary in retrieved_summaries]
|
||||
retrieved_summaries = resp.json()["results"]
|
||||
summary_ids = [summary["id"] for summary in retrieved_summaries]
|
||||
|
||||
assert test_perf_alert_summary_2.id in summary_ids
|
||||
assert len(summary_ids) == 1
|
||||
|
@ -530,18 +530,18 @@ def test_pagesize_with_limit_higher_than_total_summaries(
|
|||
client, test_perf_alert_summary, test_perf_alert_summary_2
|
||||
):
|
||||
resp = client.get(
|
||||
reverse('performance-alert-summaries-list'),
|
||||
reverse("performance-alert-summaries-list"),
|
||||
data={
|
||||
'framework': 1,
|
||||
'limit': 5,
|
||||
"framework": 1,
|
||||
"limit": 5,
|
||||
},
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
resp_json = resp.json()
|
||||
assert resp_json['next'] is None
|
||||
assert resp_json['previous'] is None
|
||||
retrieved_summaries = resp_json['results']
|
||||
summary_ids = [summary['id'] for summary in retrieved_summaries]
|
||||
assert resp_json["next"] is None
|
||||
assert resp_json["previous"] is None
|
||||
retrieved_summaries = resp_json["results"]
|
||||
summary_ids = [summary["id"] for summary in retrieved_summaries]
|
||||
|
||||
assert test_perf_alert_summary.id in summary_ids
|
||||
assert test_perf_alert_summary_2.id in summary_ids
|
||||
|
@ -559,8 +559,8 @@ def related_alert(test_perf_alert_summary, test_perf_alert_summary_2, test_perf_
|
|||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'text_to_filter',
|
||||
['mysuite2', 'mysuite2 mytest2', 'mytest2 win7', 'mysuite2 mytest2 win7 e10s opt'],
|
||||
"text_to_filter",
|
||||
["mysuite2", "mysuite2 mytest2", "mytest2 win7", "mysuite2 mytest2 win7 e10s opt"],
|
||||
)
|
||||
def test_filter_text_accounts_for_related_alerts_also(
|
||||
text_to_filter, client, test_perf_alert_summary, test_perf_alert, related_alert
|
||||
|
@ -568,17 +568,17 @@ def test_filter_text_accounts_for_related_alerts_also(
|
|||
summary_id = test_perf_alert_summary.id
|
||||
|
||||
resp = client.get(
|
||||
reverse('performance-alert-summaries-list'),
|
||||
reverse("performance-alert-summaries-list"),
|
||||
data={
|
||||
'framework': 1,
|
||||
'page': 1,
|
||||
'filter_text': text_to_filter,
|
||||
"framework": 1,
|
||||
"page": 1,
|
||||
"filter_text": text_to_filter,
|
||||
}, # excluded 'status' field to emulate 'all statuses'
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
|
||||
retrieved_summaries = resp.json()['results']
|
||||
summary_ids = [summary['id'] for summary in retrieved_summaries]
|
||||
retrieved_summaries = resp.json()["results"]
|
||||
summary_ids = [summary["id"] for summary in retrieved_summaries]
|
||||
|
||||
assert summary_id in summary_ids
|
||||
# also ensure original & related summary are both fetched
|
||||
|
|
|
@ -4,30 +4,30 @@ from treeherder.perf.models import PerformanceBugTemplate, PerformanceFramework
|
|||
|
||||
|
||||
def test_perf_bug_template_api(client, test_perf_framework):
|
||||
framework2 = PerformanceFramework.objects.create(name='test_talos2', enabled=True)
|
||||
framework2 = PerformanceFramework.objects.create(name="test_talos2", enabled=True)
|
||||
|
||||
template_dicts = []
|
||||
for framework, i in zip((test_perf_framework, framework2), range(2)):
|
||||
dict = {
|
||||
'keywords': "keyword{}".format(i),
|
||||
'status_whiteboard': "sw{}".format(i),
|
||||
'default_component': "dfcom{}".format(i),
|
||||
'default_product': "dfprod{}".format(i),
|
||||
'cc_list': "foo{}@bar.com".format(i),
|
||||
'text': "my great text {}".format(i),
|
||||
"keywords": "keyword{}".format(i),
|
||||
"status_whiteboard": "sw{}".format(i),
|
||||
"default_component": "dfcom{}".format(i),
|
||||
"default_product": "dfprod{}".format(i),
|
||||
"cc_list": "foo{}@bar.com".format(i),
|
||||
"text": "my great text {}".format(i),
|
||||
}
|
||||
PerformanceBugTemplate.objects.create(framework=framework, **dict)
|
||||
dict['framework'] = framework.id
|
||||
dict["framework"] = framework.id
|
||||
template_dicts.append(dict)
|
||||
|
||||
# test that we can get them all
|
||||
resp = client.get(reverse('performance-bug-template-list'))
|
||||
resp = client.get(reverse("performance-bug-template-list"))
|
||||
assert resp.status_code == 200
|
||||
assert resp.json() == template_dicts
|
||||
|
||||
# test that we can get just one (the usual case, probably)
|
||||
resp = client.get(
|
||||
reverse('performance-bug-template-list') + '?framework={}'.format(test_perf_framework.id)
|
||||
reverse("performance-bug-template-list") + "?framework={}".format(test_perf_framework.id)
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert resp.json() == [template_dicts[0]]
|
||||
|
|
|
@ -25,13 +25,13 @@ def summary_perf_signature(test_perf_signature):
|
|||
# summary performance signature don't have test value
|
||||
signature = PerformanceSignature.objects.create(
|
||||
repository=test_perf_signature.repository,
|
||||
signature_hash=(40 * 's'),
|
||||
signature_hash=(40 * "s"),
|
||||
framework=test_perf_signature.framework,
|
||||
platform=test_perf_signature.platform,
|
||||
option_collection=test_perf_signature.option_collection,
|
||||
suite='mysuite',
|
||||
test='',
|
||||
extra_options='e10s shell',
|
||||
suite="mysuite",
|
||||
test="",
|
||||
extra_options="e10s shell",
|
||||
has_subtests=True,
|
||||
last_updated=datetime.datetime.now(),
|
||||
)
|
||||
|
@ -44,7 +44,7 @@ def summary_perf_signature(test_perf_signature):
|
|||
def test_perf_signature_same_hash_different_framework(test_perf_signature):
|
||||
# a new signature, same as the test_perf_signature in every
|
||||
# way, except it belongs to a different "framework"
|
||||
new_framework = PerformanceFramework.objects.create(name='test_talos_2', enabled=True)
|
||||
new_framework = PerformanceFramework.objects.create(name="test_talos_2", enabled=True)
|
||||
new_signature = PerformanceSignature.objects.create(
|
||||
repository=test_perf_signature.repository,
|
||||
signature_hash=test_perf_signature.signature_hash,
|
||||
|
@ -61,23 +61,23 @@ def test_perf_signature_same_hash_different_framework(test_perf_signature):
|
|||
|
||||
def test_no_summary_performance_data(client, test_perf_signature, test_repository):
|
||||
resp = client.get(
|
||||
reverse('performance-signatures-list', kwargs={"project": test_repository.name})
|
||||
reverse("performance-signatures-list", kwargs={"project": test_repository.name})
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert resp.json() == {
|
||||
str(test_perf_signature.id): {
|
||||
'id': test_perf_signature.id,
|
||||
'signature_hash': test_perf_signature.signature_hash,
|
||||
'test': test_perf_signature.test,
|
||||
'application': test_perf_signature.application,
|
||||
'suite': test_perf_signature.suite,
|
||||
'tags': test_perf_signature.tags.split(' '),
|
||||
'option_collection_hash': test_perf_signature.option_collection.option_collection_hash,
|
||||
'framework_id': test_perf_signature.framework.id,
|
||||
'machine_platform': test_perf_signature.platform.platform,
|
||||
'extra_options': test_perf_signature.extra_options.split(' '),
|
||||
'measurement_unit': test_perf_signature.measurement_unit,
|
||||
'should_alert': test_perf_signature.should_alert,
|
||||
"id": test_perf_signature.id,
|
||||
"signature_hash": test_perf_signature.signature_hash,
|
||||
"test": test_perf_signature.test,
|
||||
"application": test_perf_signature.application,
|
||||
"suite": test_perf_signature.suite,
|
||||
"tags": test_perf_signature.tags.split(" "),
|
||||
"option_collection_hash": test_perf_signature.option_collection.option_collection_hash,
|
||||
"framework_id": test_perf_signature.framework.id,
|
||||
"machine_platform": test_perf_signature.platform.platform,
|
||||
"extra_options": test_perf_signature.extra_options.split(" "),
|
||||
"measurement_unit": test_perf_signature.measurement_unit,
|
||||
"should_alert": test_perf_signature.should_alert,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -85,12 +85,12 @@ def test_no_summary_performance_data(client, test_perf_signature, test_repositor
|
|||
def test_performance_platforms(client, test_perf_signature):
|
||||
resp = client.get(
|
||||
reverse(
|
||||
'performance-signatures-platforms-list',
|
||||
"performance-signatures-platforms-list",
|
||||
kwargs={"project": test_perf_signature.repository.name},
|
||||
)
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert resp.json() == ['win7']
|
||||
assert resp.json() == ["win7"]
|
||||
|
||||
|
||||
def test_performance_platforms_expired_test(client, test_perf_signature):
|
||||
|
@ -99,10 +99,10 @@ def test_performance_platforms_expired_test(client, test_perf_signature):
|
|||
test_perf_signature.save()
|
||||
resp = client.get(
|
||||
reverse(
|
||||
'performance-signatures-platforms-list',
|
||||
"performance-signatures-platforms-list",
|
||||
kwargs={"project": test_perf_signature.repository.name},
|
||||
)
|
||||
+ '?interval={}'.format(86400)
|
||||
+ "?interval={}".format(86400)
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert resp.json() == []
|
||||
|
@ -110,8 +110,8 @@ def test_performance_platforms_expired_test(client, test_perf_signature):
|
|||
|
||||
def test_performance_platforms_framework_filtering(client, test_perf_signature):
|
||||
# check framework filtering
|
||||
framework2 = PerformanceFramework.objects.create(name='test_talos2', enabled=True)
|
||||
platform2 = MachinePlatform.objects.create(os_name='win', platform='win7-a', architecture='x86')
|
||||
framework2 = PerformanceFramework.objects.create(name="test_talos2", enabled=True)
|
||||
platform2 = MachinePlatform.objects.create(os_name="win", platform="win7-a", architecture="x86")
|
||||
PerformanceSignature.objects.create(
|
||||
repository=test_perf_signature.repository,
|
||||
signature_hash=test_perf_signature.signature_hash,
|
||||
|
@ -127,23 +127,23 @@ def test_performance_platforms_framework_filtering(client, test_perf_signature):
|
|||
# by default should return both
|
||||
resp = client.get(
|
||||
reverse(
|
||||
'performance-signatures-platforms-list',
|
||||
"performance-signatures-platforms-list",
|
||||
kwargs={"project": test_perf_signature.repository.name},
|
||||
)
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert sorted(resp.json()) == ['win7', 'win7-a']
|
||||
assert sorted(resp.json()) == ["win7", "win7-a"]
|
||||
|
||||
# if we specify just one framework, should only return one
|
||||
resp = client.get(
|
||||
reverse(
|
||||
'performance-signatures-platforms-list',
|
||||
"performance-signatures-platforms-list",
|
||||
kwargs={"project": test_perf_signature.repository.name},
|
||||
)
|
||||
+ '?framework={}'.format(framework2.id)
|
||||
+ "?framework={}".format(framework2.id)
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert resp.json() == ['win7-a']
|
||||
assert resp.json() == ["win7-a"]
|
||||
|
||||
|
||||
def test_summary_performance_data(
|
||||
|
@ -151,12 +151,12 @@ def test_summary_performance_data(
|
|||
):
|
||||
summary_signature_id = summary_perf_signature.id
|
||||
resp = client.get(
|
||||
reverse('performance-signatures-list', kwargs={"project": test_repository.name})
|
||||
reverse("performance-signatures-list", kwargs={"project": test_repository.name})
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
|
||||
resp = client.get(
|
||||
reverse('performance-signatures-list', kwargs={"project": test_repository.name})
|
||||
reverse("performance-signatures-list", kwargs={"project": test_repository.name})
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
|
||||
|
@ -165,30 +165,30 @@ def test_summary_performance_data(
|
|||
|
||||
for signature in [summary_perf_signature, test_perf_signature]:
|
||||
expected = {
|
||||
'id': signature.id,
|
||||
'signature_hash': signature.signature_hash,
|
||||
'suite': signature.suite,
|
||||
'option_collection_hash': signature.option_collection.option_collection_hash,
|
||||
'framework_id': signature.framework_id,
|
||||
'machine_platform': signature.platform.platform,
|
||||
'should_alert': signature.should_alert,
|
||||
"id": signature.id,
|
||||
"signature_hash": signature.signature_hash,
|
||||
"suite": signature.suite,
|
||||
"option_collection_hash": signature.option_collection.option_collection_hash,
|
||||
"framework_id": signature.framework_id,
|
||||
"machine_platform": signature.platform.platform,
|
||||
"should_alert": signature.should_alert,
|
||||
}
|
||||
if signature.test:
|
||||
expected['test'] = signature.test
|
||||
expected["test"] = signature.test
|
||||
if signature.has_subtests:
|
||||
expected['has_subtests'] = True
|
||||
expected["has_subtests"] = True
|
||||
if signature.tags:
|
||||
# tags stored as charField but api returns as list
|
||||
expected['tags'] = signature.tags.split(' ')
|
||||
expected["tags"] = signature.tags.split(" ")
|
||||
if signature.parent_signature:
|
||||
expected['parent_signature'] = signature.parent_signature.signature_hash
|
||||
expected["parent_signature"] = signature.parent_signature.signature_hash
|
||||
if signature.extra_options:
|
||||
# extra_options stored as charField but api returns as list
|
||||
expected['extra_options'] = signature.extra_options.split(' ')
|
||||
expected["extra_options"] = signature.extra_options.split(" ")
|
||||
if signature.measurement_unit:
|
||||
expected['measurement_unit'] = signature.measurement_unit
|
||||
expected["measurement_unit"] = signature.measurement_unit
|
||||
if signature.application:
|
||||
expected['application'] = signature.application
|
||||
expected["application"] = signature.application
|
||||
assert resp.data[signature.id] == expected
|
||||
|
||||
|
||||
|
@ -199,21 +199,21 @@ def test_filter_signatures_by_framework(
|
|||
|
||||
# Filter by original framework
|
||||
resp = client.get(
|
||||
reverse('performance-signatures-list', kwargs={"project": test_repository.name})
|
||||
+ '?framework=%s' % test_perf_signature.framework.id,
|
||||
reverse("performance-signatures-list", kwargs={"project": test_repository.name})
|
||||
+ "?framework=%s" % test_perf_signature.framework.id,
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert len(resp.data.keys()) == 1
|
||||
assert resp.data[test_perf_signature.id]['framework_id'] == test_perf_signature.framework.id
|
||||
assert resp.data[test_perf_signature.id]["framework_id"] == test_perf_signature.framework.id
|
||||
|
||||
# Filter by new framework
|
||||
resp = client.get(
|
||||
reverse('performance-signatures-list', kwargs={"project": test_repository.name})
|
||||
+ '?framework=%s' % signature2.framework.id,
|
||||
reverse("performance-signatures-list", kwargs={"project": test_repository.name})
|
||||
+ "?framework=%s" % signature2.framework.id,
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert len(resp.data.keys()) == 1
|
||||
assert resp.data[signature2.id]['framework_id'] == signature2.framework.id
|
||||
assert resp.data[signature2.id]["framework_id"] == signature2.framework.id
|
||||
|
||||
|
||||
def test_filter_data_by_no_retriggers(
|
||||
|
@ -258,17 +258,17 @@ def test_filter_data_by_no_retriggers(
|
|||
)
|
||||
|
||||
resp = client.get(
|
||||
reverse('performance-data-list', kwargs={"project": test_repository.name})
|
||||
+ '?signatures={}&no_retriggers=true'.format(test_perf_signature.signature_hash)
|
||||
reverse("performance-data-list", kwargs={"project": test_repository.name})
|
||||
+ "?signatures={}&no_retriggers=true".format(test_perf_signature.signature_hash)
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
datums = resp.data[test_perf_signature.signature_hash]
|
||||
assert len(datums) == 2
|
||||
assert set(datum['signature_id'] for datum in datums) == {
|
||||
assert set(datum["signature_id"] for datum in datums) == {
|
||||
test_perf_signature.id,
|
||||
test_perf_signature_2.id,
|
||||
}
|
||||
assert signature_for_retrigger_data.id not in set(datum['signature_id'] for datum in datums)
|
||||
assert signature_for_retrigger_data.id not in set(datum["signature_id"] for datum in datums)
|
||||
|
||||
|
||||
def test_filter_data_by_framework(
|
||||
|
@ -292,56 +292,56 @@ def test_filter_data_by_framework(
|
|||
# No filtering, return two datapoints (this behaviour actually sucks,
|
||||
# but it's "by design" for now, see bug 1265709)
|
||||
resp = client.get(
|
||||
reverse('performance-data-list', kwargs={"project": test_repository.name})
|
||||
+ '?signatures='
|
||||
reverse("performance-data-list", kwargs={"project": test_repository.name})
|
||||
+ "?signatures="
|
||||
+ test_perf_signature.signature_hash
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
datums = resp.data[test_perf_signature.signature_hash]
|
||||
assert len(datums) == 2
|
||||
assert set(datum['signature_id'] for datum in datums) == {1, 2}
|
||||
assert set(datum["signature_id"] for datum in datums) == {1, 2}
|
||||
|
||||
# Filtering by first framework
|
||||
resp = client.get(
|
||||
reverse('performance-data-list', kwargs={"project": test_repository.name})
|
||||
+ '?signatures={}&framework={}'.format(
|
||||
reverse("performance-data-list", kwargs={"project": test_repository.name})
|
||||
+ "?signatures={}&framework={}".format(
|
||||
test_perf_signature.signature_hash, test_perf_signature.framework.id
|
||||
)
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
datums = resp.data[test_perf_signature.signature_hash]
|
||||
assert len(datums) == 1
|
||||
assert datums[0]['signature_id'] == 1
|
||||
assert datums[0]["signature_id"] == 1
|
||||
|
||||
# Filtering by second framework
|
||||
resp = client.get(
|
||||
reverse('performance-data-list', kwargs={"project": test_repository.name})
|
||||
+ '?signatures={}&framework={}'.format(
|
||||
reverse("performance-data-list", kwargs={"project": test_repository.name})
|
||||
+ "?signatures={}&framework={}".format(
|
||||
test_perf_signature.signature_hash, signature2.framework.id
|
||||
)
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
datums = resp.data[test_perf_signature.signature_hash]
|
||||
assert len(datums) == 1
|
||||
assert datums[0]['signature_id'] == 2
|
||||
assert datums[0]["signature_id"] == 2
|
||||
|
||||
|
||||
def test_filter_signatures_by_interval(client, test_perf_signature):
|
||||
# interval for the last 24 hours, only one signature exists last updated within that timeframe
|
||||
resp = client.get(
|
||||
reverse(
|
||||
'performance-signatures-list', kwargs={"project": test_perf_signature.repository.name}
|
||||
"performance-signatures-list", kwargs={"project": test_perf_signature.repository.name}
|
||||
)
|
||||
+ '?interval={}'.format(86400)
|
||||
+ "?interval={}".format(86400)
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert len(resp.json().keys()) == 1
|
||||
assert resp.json()[str(test_perf_signature.id)]['id'] == 1
|
||||
assert resp.json()[str(test_perf_signature.id)]["id"] == 1
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'start_date, end_date, exp_count, exp_id',
|
||||
[(SEVEN_DAYS_AGO, ONE_DAY_AGO, 1, 1), (THREE_DAYS_AGO, '', 1, 1), (ONE_DAY_AGO, '', 0, 0)],
|
||||
"start_date, end_date, exp_count, exp_id",
|
||||
[(SEVEN_DAYS_AGO, ONE_DAY_AGO, 1, 1), (THREE_DAYS_AGO, "", 1, 1), (ONE_DAY_AGO, "", 0, 0)],
|
||||
)
|
||||
def test_filter_signatures_by_range(
|
||||
client, test_perf_signature, start_date, end_date, exp_count, exp_id
|
||||
|
@ -352,17 +352,17 @@ def test_filter_signatures_by_range(
|
|||
|
||||
resp = client.get(
|
||||
reverse(
|
||||
'performance-signatures-list', kwargs={"project": test_perf_signature.repository.name}
|
||||
"performance-signatures-list", kwargs={"project": test_perf_signature.repository.name}
|
||||
)
|
||||
+ '?start_date={}&end_date={}'.format(start_date, end_date)
|
||||
+ "?start_date={}&end_date={}".format(start_date, end_date)
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert len(resp.json().keys()) == exp_count
|
||||
if exp_count != 0:
|
||||
assert resp.json()[str(test_perf_signature.id)]['id'] == exp_id
|
||||
assert resp.json()[str(test_perf_signature.id)]["id"] == exp_id
|
||||
|
||||
|
||||
@pytest.mark.parametrize('interval, exp_push_ids', [(86400, {1}), (86400 * 3, {2, 1})])
|
||||
@pytest.mark.parametrize("interval, exp_push_ids", [(86400, {1}), (86400 * 3, {2, 1})])
|
||||
def test_filter_data_by_interval(
|
||||
client, test_repository, test_perf_signature, interval, exp_push_ids
|
||||
):
|
||||
|
@ -372,8 +372,8 @@ def test_filter_data_by_interval(
|
|||
):
|
||||
push = Push.objects.create(
|
||||
repository=test_repository,
|
||||
revision='abcdefgh%s' % i,
|
||||
author='foo@bar.com',
|
||||
revision="abcdefgh%s" % i,
|
||||
author="foo@bar.com",
|
||||
time=timestamp,
|
||||
)
|
||||
PerformanceDatum.objects.create(
|
||||
|
@ -386,20 +386,20 @@ def test_filter_data_by_interval(
|
|||
|
||||
# going back interval of 1 day, should find 1 item
|
||||
resp = client.get(
|
||||
reverse('performance-data-list', kwargs={"project": test_repository.name})
|
||||
+ '?signature_id={}&interval={}'.format(test_perf_signature.id, interval)
|
||||
reverse("performance-data-list", kwargs={"project": test_repository.name})
|
||||
+ "?signature_id={}&interval={}".format(test_perf_signature.id, interval)
|
||||
)
|
||||
|
||||
assert resp.status_code == 200
|
||||
|
||||
perf_data = resp.data[test_perf_signature.signature_hash]
|
||||
push_ids = {datum['push_id'] for datum in perf_data}
|
||||
push_ids = {datum["push_id"] for datum in perf_data}
|
||||
assert push_ids == exp_push_ids
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'start_date, end_date, exp_push_ids',
|
||||
[(SEVEN_DAYS_AGO, THREE_DAYS_AGO, {3}), (THREE_DAYS_AGO, '', {2, 1})],
|
||||
"start_date, end_date, exp_push_ids",
|
||||
[(SEVEN_DAYS_AGO, THREE_DAYS_AGO, {3}), (THREE_DAYS_AGO, "", {2, 1})],
|
||||
)
|
||||
def test_filter_data_by_range(
|
||||
client, test_repository, test_perf_signature, start_date, end_date, exp_push_ids
|
||||
|
@ -410,8 +410,8 @@ def test_filter_data_by_range(
|
|||
):
|
||||
push = Push.objects.create(
|
||||
repository=test_repository,
|
||||
revision='abcdefgh%s' % i,
|
||||
author='foo@bar.com',
|
||||
revision="abcdefgh%s" % i,
|
||||
author="foo@bar.com",
|
||||
time=timestamp,
|
||||
)
|
||||
PerformanceDatum.objects.create(
|
||||
|
@ -423,8 +423,8 @@ def test_filter_data_by_range(
|
|||
)
|
||||
|
||||
resp = client.get(
|
||||
reverse('performance-data-list', kwargs={"project": test_repository.name})
|
||||
+ '?signature_id={}&start_date={}&end_date={}'.format(
|
||||
reverse("performance-data-list", kwargs={"project": test_repository.name})
|
||||
+ "?signature_id={}&start_date={}&end_date={}".format(
|
||||
test_perf_signature.id, start_date, end_date
|
||||
)
|
||||
)
|
||||
|
@ -432,18 +432,18 @@ def test_filter_data_by_range(
|
|||
assert resp.status_code == 200
|
||||
|
||||
perf_data = resp.data[test_perf_signature.signature_hash]
|
||||
push_ids = {datum['push_id'] for datum in perf_data}
|
||||
push_ids = {datum["push_id"] for datum in perf_data}
|
||||
assert push_ids == exp_push_ids
|
||||
|
||||
|
||||
def test_job_ids_validity(client, test_repository):
|
||||
resp = client.get(
|
||||
reverse('performance-data-list', kwargs={"project": test_repository.name}) + '?job_id=1'
|
||||
reverse("performance-data-list", kwargs={"project": test_repository.name}) + "?job_id=1"
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
|
||||
resp = client.get(
|
||||
reverse('performance-data-list', kwargs={"project": test_repository.name}) + '?job_id=foo'
|
||||
reverse("performance-data-list", kwargs={"project": test_repository.name}) + "?job_id=foo"
|
||||
)
|
||||
assert resp.status_code == 400
|
||||
|
||||
|
@ -452,7 +452,7 @@ def test_filter_data_by_signature(
|
|||
client, test_repository, test_perf_signature, summary_perf_signature
|
||||
):
|
||||
push = Push.objects.create(
|
||||
repository=test_repository, revision='abcdefghi', author='foo@bar.com', time=NOW
|
||||
repository=test_repository, revision="abcdefghi", author="foo@bar.com", time=NOW
|
||||
)
|
||||
for i, signature in enumerate([test_perf_signature, summary_perf_signature]):
|
||||
PerformanceDatum.objects.create(
|
||||
|
@ -467,63 +467,63 @@ def test_filter_data_by_signature(
|
|||
# passing in signature_id and signature hash
|
||||
for i, signature in enumerate([test_perf_signature, summary_perf_signature]):
|
||||
for param, value in [
|
||||
('signatures', signature.signature_hash),
|
||||
('signature_id', signature.id),
|
||||
("signatures", signature.signature_hash),
|
||||
("signature_id", signature.id),
|
||||
]:
|
||||
resp = client.get(
|
||||
reverse('performance-data-list', kwargs={"project": test_repository.name})
|
||||
+ '?{}={}'.format(param, value)
|
||||
reverse("performance-data-list", kwargs={"project": test_repository.name})
|
||||
+ "?{}={}".format(param, value)
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert len(resp.data.keys()) == 1
|
||||
assert len(resp.data[signature.signature_hash]) == 1
|
||||
assert resp.data[signature.signature_hash][0]['signature_id'] == signature.id
|
||||
assert resp.data[signature.signature_hash][0]['value'] == float(i)
|
||||
assert resp.data[signature.signature_hash][0]["signature_id"] == signature.id
|
||||
assert resp.data[signature.signature_hash][0]["value"] == float(i)
|
||||
|
||||
|
||||
def test_perf_summary(client, test_perf_signature, test_perf_data):
|
||||
query_params1 = (
|
||||
'?repository={}&framework={}&interval=172800&no_subtests=true&revision={}'.format(
|
||||
"?repository={}&framework={}&interval=172800&no_subtests=true&revision={}".format(
|
||||
test_perf_signature.repository.name,
|
||||
test_perf_signature.framework_id,
|
||||
test_perf_data[0].push.revision,
|
||||
)
|
||||
)
|
||||
|
||||
query_params2 = '?repository={}&framework={}&interval=172800&no_subtests=true&startday=2013-11-01T23%3A28%3A29&endday=2013-11-30T23%3A28%3A29'.format(
|
||||
query_params2 = "?repository={}&framework={}&interval=172800&no_subtests=true&startday=2013-11-01T23%3A28%3A29&endday=2013-11-30T23%3A28%3A29".format(
|
||||
test_perf_signature.repository.name, test_perf_signature.framework_id
|
||||
)
|
||||
|
||||
expected = [
|
||||
{
|
||||
'signature_id': test_perf_signature.id,
|
||||
'framework_id': test_perf_signature.framework_id,
|
||||
'signature_hash': test_perf_signature.signature_hash,
|
||||
'platform': test_perf_signature.platform.platform,
|
||||
'test': test_perf_signature.test,
|
||||
'application': test_perf_signature.application,
|
||||
'lower_is_better': test_perf_signature.lower_is_better,
|
||||
'has_subtests': test_perf_signature.has_subtests,
|
||||
'tags': test_perf_signature.tags,
|
||||
'measurement_unit': test_perf_signature.measurement_unit,
|
||||
'values': [test_perf_data[0].value],
|
||||
'name': 'mysuite mytest opt e10s opt',
|
||||
'parent_signature': None,
|
||||
'job_ids': [test_perf_data[0].job_id],
|
||||
'suite': test_perf_signature.suite,
|
||||
'repository_name': test_perf_signature.repository.name,
|
||||
'repository_id': test_perf_signature.repository.id,
|
||||
'data': [],
|
||||
"signature_id": test_perf_signature.id,
|
||||
"framework_id": test_perf_signature.framework_id,
|
||||
"signature_hash": test_perf_signature.signature_hash,
|
||||
"platform": test_perf_signature.platform.platform,
|
||||
"test": test_perf_signature.test,
|
||||
"application": test_perf_signature.application,
|
||||
"lower_is_better": test_perf_signature.lower_is_better,
|
||||
"has_subtests": test_perf_signature.has_subtests,
|
||||
"tags": test_perf_signature.tags,
|
||||
"measurement_unit": test_perf_signature.measurement_unit,
|
||||
"values": [test_perf_data[0].value],
|
||||
"name": "mysuite mytest opt e10s opt",
|
||||
"parent_signature": None,
|
||||
"job_ids": [test_perf_data[0].job_id],
|
||||
"suite": test_perf_signature.suite,
|
||||
"repository_name": test_perf_signature.repository.name,
|
||||
"repository_id": test_perf_signature.repository.id,
|
||||
"data": [],
|
||||
}
|
||||
]
|
||||
|
||||
resp1 = client.get(reverse('performance-summary') + query_params1)
|
||||
resp1 = client.get(reverse("performance-summary") + query_params1)
|
||||
assert resp1.status_code == 200
|
||||
assert resp1.json() == expected
|
||||
|
||||
expected[0]['values'] = [item.value for item in test_perf_data]
|
||||
expected[0]['job_ids'] = [item.job_id for item in test_perf_data]
|
||||
resp2 = client.get(reverse('performance-summary') + query_params2)
|
||||
expected[0]["values"] = [item.value for item in test_perf_data]
|
||||
expected[0]["job_ids"] = [item.job_id for item in test_perf_data]
|
||||
resp2 = client.get(reverse("performance-summary") + query_params2)
|
||||
assert resp2.status_code == 200
|
||||
assert resp2.json() == expected
|
||||
|
||||
|
@ -539,14 +539,14 @@ def test_data_points_from_same_push_are_ordered_chronologically(
|
|||
As job ids are auto incremented, older jobs have smaller ids than newer ones.
|
||||
Thus, these ids are sufficient to check for chronological order.
|
||||
"""
|
||||
query_params = '?repository={}&framework={}&interval=172800&no_subtests=true&startday=2013-11-01T23%3A28%3A29&endday=2013-11-30T23%3A28%3A29'.format(
|
||||
query_params = "?repository={}&framework={}&interval=172800&no_subtests=true&startday=2013-11-01T23%3A28%3A29&endday=2013-11-30T23%3A28%3A29".format(
|
||||
test_perf_signature.repository.name, test_perf_signature.framework_id
|
||||
)
|
||||
|
||||
response = client.get(reverse('performance-summary') + query_params)
|
||||
response = client.get(reverse("performance-summary") + query_params)
|
||||
assert response.status_code == 200
|
||||
|
||||
job_ids = response.json()[0]['job_ids']
|
||||
job_ids = response.json()[0]["job_ids"]
|
||||
assert job_ids == sorted(job_ids)
|
||||
|
||||
|
||||
|
@ -554,7 +554,7 @@ def test_no_retriggers_perf_summary(
|
|||
client, push_stored, test_perf_signature, test_perf_signature_2, test_perf_data
|
||||
):
|
||||
push = Push.objects.get(id=1)
|
||||
query_params = '?repository={}&framework={}&no_subtests=true&revision={}&all_data=true&signature={}'.format(
|
||||
query_params = "?repository={}&framework={}&no_subtests=true&revision={}&all_data=true&signature={}".format(
|
||||
test_perf_signature.repository.name,
|
||||
test_perf_signature.framework_id,
|
||||
push.revision,
|
||||
|
@ -577,15 +577,15 @@ def test_no_retriggers_perf_summary(
|
|||
push_timestamp=push.time,
|
||||
)
|
||||
|
||||
response = client.get(reverse('performance-summary') + query_params)
|
||||
response = client.get(reverse("performance-summary") + query_params)
|
||||
content = response.json()
|
||||
assert response.status_code == 200
|
||||
assert len(content[0]['data']) == 2
|
||||
assert len(content[0]["data"]) == 2
|
||||
|
||||
response = client.get(reverse('performance-summary') + query_params + "&no_retriggers=true")
|
||||
response = client.get(reverse("performance-summary") + query_params + "&no_retriggers=true")
|
||||
content = response.json()
|
||||
assert response.status_code == 200
|
||||
assert len(content[0]['data']) == 1
|
||||
assert len(content[0]["data"]) == 1
|
||||
|
||||
|
||||
def test_filter_out_retriggers():
|
||||
|
@ -662,12 +662,12 @@ def test_filter_out_retriggers():
|
|||
filtered_data = PerformanceSummary._filter_out_retriggers(copy.deepcopy(input_data))
|
||||
for perf_summary in filtered_data:
|
||||
push_id_count = defaultdict(int)
|
||||
for idx, datum in enumerate(perf_summary['data']):
|
||||
push_id_count[datum['push_id']] += 1
|
||||
for idx, datum in enumerate(perf_summary["data"]):
|
||||
push_id_count[datum["push_id"]] += 1
|
||||
for push_id in push_id_count:
|
||||
assert push_id_count[push_id] == 1
|
||||
|
||||
assert len(filtered_data[0]['data']) == 3
|
||||
assert len(filtered_data[0]["data"]) == 3
|
||||
|
||||
no_retriggers_data = [
|
||||
{
|
||||
|
@ -719,7 +719,7 @@ def test_alert_summary_tasks_get(client, test_perf_alert_summary, test_perf_data
|
|||
status=PerformanceAlert.REASSIGNED,
|
||||
)
|
||||
resp = client.get(
|
||||
reverse('performance-alertsummary-tasks') + '?id={}'.format(test_perf_alert_summary.id)
|
||||
reverse("performance-alertsummary-tasks") + "?id={}".format(test_perf_alert_summary.id)
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert resp.json() == {
|
||||
|
@ -738,12 +738,12 @@ def test_alert_summary_tasks_get_failure(client, test_perf_alert_summary):
|
|||
not_exist_summary_id = test_perf_alert_summary.id
|
||||
test_perf_alert_summary.delete()
|
||||
resp = client.get(
|
||||
reverse('performance-alertsummary-tasks') + '?id={}'.format(not_exist_summary_id)
|
||||
reverse("performance-alertsummary-tasks") + "?id={}".format(not_exist_summary_id)
|
||||
)
|
||||
assert resp.status_code == 400
|
||||
assert resp.json() == {"message": ["PerformanceAlertSummary does not exist."]}
|
||||
|
||||
# verify that we fail if id does not exist as a query parameter
|
||||
resp = client.get(reverse('performance-alertsummary-tasks'))
|
||||
resp = client.get(reverse("performance-alertsummary-tasks"))
|
||||
assert resp.status_code == 400
|
||||
assert resp.json() == {"id": ["This field is required."]}
|
||||
|
|
|
@ -2,13 +2,13 @@ from django.urls import reverse
|
|||
|
||||
|
||||
def test_perf_tags_get(authorized_sheriff_client, test_perf_tag, test_perf_tag_2):
|
||||
resp = authorized_sheriff_client.get(reverse('performance-tags-list'))
|
||||
resp = authorized_sheriff_client.get(reverse("performance-tags-list"))
|
||||
assert resp.status_code == 200
|
||||
|
||||
assert len(resp.json()) == 2
|
||||
|
||||
assert resp.json()[0]['id'] == test_perf_tag.id
|
||||
assert resp.json()[0]['name'] == test_perf_tag.name
|
||||
assert resp.json()[0]["id"] == test_perf_tag.id
|
||||
assert resp.json()[0]["name"] == test_perf_tag.name
|
||||
|
||||
assert resp.json()[1]['id'] == test_perf_tag_2.id
|
||||
assert resp.json()[1]['name'] == test_perf_tag_2.name
|
||||
assert resp.json()[1]["id"] == test_perf_tag_2.id
|
||||
assert resp.json()[1]["name"] == test_perf_tag_2.name
|
||||
|
|
|
@ -16,8 +16,8 @@ def test_push_list_basic(client, eleven_jobs_stored, test_repository):
|
|||
"""
|
||||
resp = client.get(reverse("push-list", kwargs={"project": test_repository.name}))
|
||||
data = resp.json()
|
||||
results = data['results']
|
||||
meta = data['meta']
|
||||
results = data["results"]
|
||||
meta = data["meta"]
|
||||
|
||||
assert resp.status_code == 200
|
||||
assert isinstance(results, list)
|
||||
|
@ -25,19 +25,19 @@ def test_push_list_basic(client, eleven_jobs_stored, test_repository):
|
|||
assert len(results) == 10
|
||||
exp_keys = set(
|
||||
[
|
||||
u'id',
|
||||
u'repository_id',
|
||||
u'author',
|
||||
u'revision',
|
||||
u'revisions',
|
||||
u'revision_count',
|
||||
u'push_timestamp',
|
||||
"id",
|
||||
"repository_id",
|
||||
"author",
|
||||
"revision",
|
||||
"revisions",
|
||||
"revision_count",
|
||||
"push_timestamp",
|
||||
]
|
||||
)
|
||||
for rs in results:
|
||||
assert set(rs.keys()) == exp_keys
|
||||
|
||||
assert meta == {u'count': 10, u'filter_params': {}, u'repository': test_repository.name}
|
||||
assert meta == {"count": 10, "filter_params": {}, "repository": test_repository.name}
|
||||
|
||||
|
||||
def test_push_list_bad_project(client, transactional_db):
|
||||
|
@ -63,7 +63,7 @@ def test_push_list_empty_push_still_show(client, sample_push, test_repository):
|
|||
)
|
||||
assert resp.status_code == 200
|
||||
data = resp.json()
|
||||
assert len(data['results']) == 10
|
||||
assert len(data["results"]) == 10
|
||||
|
||||
|
||||
def test_push_list_single_short_revision(client, eleven_jobs_stored, test_repository):
|
||||
|
@ -75,15 +75,15 @@ def test_push_list_single_short_revision(client, eleven_jobs_stored, test_reposi
|
|||
reverse("push-list", kwargs={"project": test_repository.name}), {"revision": "45f8637cb9f7"}
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
results = resp.json()['results']
|
||||
meta = resp.json()['meta']
|
||||
results = resp.json()["results"]
|
||||
meta = resp.json()["meta"]
|
||||
assert len(results) == 1
|
||||
assert set([rs["revision"] for rs in results]) == {"45f8637cb9f78f19cb8463ff174e81756805d8cf"}
|
||||
assert meta == {
|
||||
u'count': 1,
|
||||
u'revision': u'45f8637cb9f7',
|
||||
u'filter_params': {u'revisions_short_revision': "45f8637cb9f7"},
|
||||
u'repository': test_repository.name,
|
||||
"count": 1,
|
||||
"revision": "45f8637cb9f7",
|
||||
"filter_params": {"revisions_short_revision": "45f8637cb9f7"},
|
||||
"repository": test_repository.name,
|
||||
}
|
||||
|
||||
|
||||
|
@ -97,15 +97,15 @@ def test_push_list_single_long_revision(client, eleven_jobs_stored, test_reposit
|
|||
{"revision": "45f8637cb9f78f19cb8463ff174e81756805d8cf"},
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
results = resp.json()['results']
|
||||
meta = resp.json()['meta']
|
||||
results = resp.json()["results"]
|
||||
meta = resp.json()["meta"]
|
||||
assert len(results) == 1
|
||||
assert set([rs["revision"] for rs in results]) == {"45f8637cb9f78f19cb8463ff174e81756805d8cf"}
|
||||
assert meta == {
|
||||
u'count': 1,
|
||||
u'revision': u'45f8637cb9f78f19cb8463ff174e81756805d8cf',
|
||||
u'filter_params': {u'revisions_long_revision': u'45f8637cb9f78f19cb8463ff174e81756805d8cf'},
|
||||
u'repository': test_repository.name,
|
||||
"count": 1,
|
||||
"revision": "45f8637cb9f78f19cb8463ff174e81756805d8cf",
|
||||
"filter_params": {"revisions_long_revision": "45f8637cb9f78f19cb8463ff174e81756805d8cf"},
|
||||
"repository": test_repository.name,
|
||||
}
|
||||
|
||||
|
||||
|
@ -121,21 +121,21 @@ def test_push_list_filter_by_revision(client, eleven_jobs_stored, test_repositor
|
|||
)
|
||||
assert resp.status_code == 200
|
||||
data = resp.json()
|
||||
results = data['results']
|
||||
meta = data['meta']
|
||||
results = data["results"]
|
||||
meta = data["meta"]
|
||||
assert len(results) == 4
|
||||
assert set([rs["revision"] for rs in results]) == {
|
||||
u'130965d3df6c9a1093b4725f3b877eaef80d72bc',
|
||||
u'7f417c3505e3d2599ac9540f02e3dbee307a3963',
|
||||
u'a69390334818373e2d7e6e9c8d626a328ed37d47',
|
||||
u'f361dcb60bbedaa01257fbca211452972f7a74b2',
|
||||
"130965d3df6c9a1093b4725f3b877eaef80d72bc",
|
||||
"7f417c3505e3d2599ac9540f02e3dbee307a3963",
|
||||
"a69390334818373e2d7e6e9c8d626a328ed37d47",
|
||||
"f361dcb60bbedaa01257fbca211452972f7a74b2",
|
||||
}
|
||||
assert meta == {
|
||||
u'count': 4,
|
||||
u'fromchange': u'130965d3df6c',
|
||||
u'filter_params': {u'push_timestamp__gte': 1384363842, u'push_timestamp__lte': 1384365942},
|
||||
u'repository': test_repository.name,
|
||||
u'tochange': u'f361dcb60bbe',
|
||||
"count": 4,
|
||||
"fromchange": "130965d3df6c",
|
||||
"filter_params": {"push_timestamp__gte": 1384363842, "push_timestamp__lte": 1384365942},
|
||||
"repository": test_repository.name,
|
||||
"tochange": "f361dcb60bbe",
|
||||
}
|
||||
|
||||
|
||||
|
@ -147,7 +147,7 @@ def test_push_list_filter_by_date(client, test_repository, sample_push):
|
|||
for i, datestr in zip(
|
||||
[3, 4, 5, 6, 7], ["2013-08-09", "2013-08-10", "2013-08-11", "2013-08-12", "2013-08-13"]
|
||||
):
|
||||
sample_push[i]['push_timestamp'] = utils.to_timestamp(utils.to_datetime(datestr))
|
||||
sample_push[i]["push_timestamp"] = utils.to_timestamp(utils.to_datetime(datestr))
|
||||
|
||||
store_push_data(test_repository, sample_push)
|
||||
|
||||
|
@ -157,35 +157,35 @@ def test_push_list_filter_by_date(client, test_repository, sample_push):
|
|||
)
|
||||
assert resp.status_code == 200
|
||||
data = resp.json()
|
||||
results = data['results']
|
||||
meta = data['meta']
|
||||
results = data["results"]
|
||||
meta = data["meta"]
|
||||
assert len(results) == 4
|
||||
assert set([rs["revision"] for rs in results]) == {
|
||||
u'ce17cad5d554cfffddee13d1d8421ae9ec5aad82',
|
||||
u'7f417c3505e3d2599ac9540f02e3dbee307a3963',
|
||||
u'a69390334818373e2d7e6e9c8d626a328ed37d47',
|
||||
u'f361dcb60bbedaa01257fbca211452972f7a74b2',
|
||||
"ce17cad5d554cfffddee13d1d8421ae9ec5aad82",
|
||||
"7f417c3505e3d2599ac9540f02e3dbee307a3963",
|
||||
"a69390334818373e2d7e6e9c8d626a328ed37d47",
|
||||
"f361dcb60bbedaa01257fbca211452972f7a74b2",
|
||||
}
|
||||
assert meta == {
|
||||
u'count': 4,
|
||||
u'enddate': u'2013-08-13',
|
||||
u'filter_params': {
|
||||
u'push_timestamp__gte': 1376092800.0,
|
||||
u'push_timestamp__lt': 1376438400.0,
|
||||
"count": 4,
|
||||
"enddate": "2013-08-13",
|
||||
"filter_params": {
|
||||
"push_timestamp__gte": 1376092800.0,
|
||||
"push_timestamp__lt": 1376438400.0,
|
||||
},
|
||||
u'repository': test_repository.name,
|
||||
u'startdate': u'2013-08-10',
|
||||
"repository": test_repository.name,
|
||||
"startdate": "2013-08-10",
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'filter_param, exp_ids',
|
||||
"filter_param, exp_ids",
|
||||
[
|
||||
('id__lt=2', [1]),
|
||||
('id__lte=2', [1, 2]),
|
||||
('id=2', [2]),
|
||||
('id__gt=2', [3]),
|
||||
('id__gte=2', [2, 3]),
|
||||
("id__lt=2", [1]),
|
||||
("id__lte=2", [1, 2]),
|
||||
("id=2", [2]),
|
||||
("id__gt=2", [3]),
|
||||
("id__gte=2", [2, 3]),
|
||||
],
|
||||
)
|
||||
def test_push_list_filter_by_id(client, test_repository, filter_param, exp_ids):
|
||||
|
@ -193,9 +193,9 @@ def test_push_list_filter_by_id(client, test_repository, filter_param, exp_ids):
|
|||
test filtering by id in various ways
|
||||
"""
|
||||
for revision, author in [
|
||||
('1234abcd', 'foo@bar.com'),
|
||||
('2234abcd', 'foo2@bar.com'),
|
||||
('3234abcd', 'foo3@bar.com'),
|
||||
("1234abcd", "foo@bar.com"),
|
||||
("2234abcd", "foo2@bar.com"),
|
||||
("3234abcd", "foo3@bar.com"),
|
||||
]:
|
||||
Push.objects.create(
|
||||
repository=test_repository,
|
||||
|
@ -204,11 +204,11 @@ def test_push_list_filter_by_id(client, test_repository, filter_param, exp_ids):
|
|||
time=datetime.datetime.now(),
|
||||
)
|
||||
resp = client.get(
|
||||
reverse("push-list", kwargs={"project": test_repository.name}) + '?' + filter_param
|
||||
reverse("push-list", kwargs={"project": test_repository.name}) + "?" + filter_param
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
results = resp.json()['results']
|
||||
assert set([result['id'] for result in results]) == set(exp_ids)
|
||||
results = resp.json()["results"]
|
||||
assert set([result["id"] for result in results]) == set(exp_ids)
|
||||
|
||||
|
||||
def test_push_list_id_in(client, test_repository):
|
||||
|
@ -216,9 +216,9 @@ def test_push_list_id_in(client, test_repository):
|
|||
test the id__in parameter
|
||||
"""
|
||||
for revision, author in [
|
||||
('1234abcd', 'foo@bar.com'),
|
||||
('2234abcd', 'foo2@bar.com'),
|
||||
('3234abcd', 'foo3@bar.com'),
|
||||
("1234abcd", "foo@bar.com"),
|
||||
("2234abcd", "foo2@bar.com"),
|
||||
("3234abcd", "foo3@bar.com"),
|
||||
]:
|
||||
Push.objects.create(
|
||||
repository=test_repository,
|
||||
|
@ -227,17 +227,17 @@ def test_push_list_id_in(client, test_repository):
|
|||
time=datetime.datetime.now(),
|
||||
)
|
||||
resp = client.get(
|
||||
reverse("push-list", kwargs={"project": test_repository.name}) + '?id__in=1,2'
|
||||
reverse("push-list", kwargs={"project": test_repository.name}) + "?id__in=1,2"
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
|
||||
results = resp.json()['results']
|
||||
results = resp.json()["results"]
|
||||
assert len(results) == 2 # would have 3 if filter not applied
|
||||
assert set([result['id'] for result in results]) == set([1, 2])
|
||||
assert set([result["id"] for result in results]) == set([1, 2])
|
||||
|
||||
# test that we do something sane if invalid list passed in
|
||||
resp = client.get(
|
||||
reverse("push-list", kwargs={"project": test_repository.name}) + '?id__in=1,2,foobar',
|
||||
reverse("push-list", kwargs={"project": test_repository.name}) + "?id__in=1,2,foobar",
|
||||
)
|
||||
assert resp.status_code == 400
|
||||
|
||||
|
@ -249,11 +249,11 @@ def test_push_list_bad_count(client, test_repository):
|
|||
bad_count = "ZAP%n%s%n%s"
|
||||
|
||||
resp = client.get(
|
||||
reverse("push-list", kwargs={"project": test_repository.name}), data={'count': bad_count}
|
||||
reverse("push-list", kwargs={"project": test_repository.name}), data={"count": bad_count}
|
||||
)
|
||||
|
||||
assert resp.status_code == 400
|
||||
assert resp.json() == {'detail': 'Valid count value required'}
|
||||
assert resp.json() == {"detail": "Valid count value required"}
|
||||
|
||||
|
||||
def test_push_author(client, test_repository):
|
||||
|
@ -261,9 +261,9 @@ def test_push_author(client, test_repository):
|
|||
test the author parameter
|
||||
"""
|
||||
for revision, author in [
|
||||
('1234abcd', 'foo@bar.com'),
|
||||
('2234abcd', 'foo@bar.com'),
|
||||
('3234abcd', 'foo2@bar.com'),
|
||||
("1234abcd", "foo@bar.com"),
|
||||
("2234abcd", "foo@bar.com"),
|
||||
("3234abcd", "foo2@bar.com"),
|
||||
]:
|
||||
Push.objects.create(
|
||||
repository=test_repository,
|
||||
|
@ -273,31 +273,31 @@ def test_push_author(client, test_repository):
|
|||
)
|
||||
|
||||
resp = client.get(
|
||||
reverse("push-list", kwargs={"project": test_repository.name}) + '?author=foo@bar.com'
|
||||
reverse("push-list", kwargs={"project": test_repository.name}) + "?author=foo@bar.com"
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
|
||||
results = resp.json()['results']
|
||||
results = resp.json()["results"]
|
||||
assert len(results) == 2 # would have 3 if filter not applied
|
||||
assert set([result['id'] for result in results]) == set([1, 2])
|
||||
assert set([result["id"] for result in results]) == set([1, 2])
|
||||
|
||||
resp = client.get(
|
||||
reverse("push-list", kwargs={"project": test_repository.name}) + '?author=foo2@bar.com'
|
||||
reverse("push-list", kwargs={"project": test_repository.name}) + "?author=foo2@bar.com"
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
|
||||
results = resp.json()['results']
|
||||
results = resp.json()["results"]
|
||||
assert len(results) == 1 # would have 3 if filter not applied
|
||||
assert results[0]['id'] == 3
|
||||
assert results[0]["id"] == 3
|
||||
|
||||
resp = client.get(
|
||||
reverse("push-list", kwargs={"project": test_repository.name}) + '?author=-foo2@bar.com'
|
||||
reverse("push-list", kwargs={"project": test_repository.name}) + "?author=-foo2@bar.com"
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
|
||||
results = resp.json()['results']
|
||||
results = resp.json()["results"]
|
||||
assert len(results) == 2 # would have 3 if filter not applied
|
||||
assert set([result['id'] for result in results]) == set([1, 2])
|
||||
assert set([result["id"] for result in results]) == set([1, 2])
|
||||
|
||||
|
||||
def test_push_reviewbot(client, test_repository):
|
||||
|
@ -305,10 +305,10 @@ def test_push_reviewbot(client, test_repository):
|
|||
test the reviewbot parameter
|
||||
"""
|
||||
for revision, author in [
|
||||
('1234abcd', 'foo@bar.com'),
|
||||
('2234abcd', 'foo2@bar.com'),
|
||||
('3234abcd', 'reviewbot'),
|
||||
('4234abcd', 'reviewbot'),
|
||||
("1234abcd", "foo@bar.com"),
|
||||
("2234abcd", "foo2@bar.com"),
|
||||
("3234abcd", "reviewbot"),
|
||||
("4234abcd", "reviewbot"),
|
||||
]:
|
||||
Push.objects.create(
|
||||
repository=test_repository,
|
||||
|
@ -319,13 +319,13 @@ def test_push_reviewbot(client, test_repository):
|
|||
|
||||
resp = client.get(
|
||||
reverse("push-list", kwargs={"project": test_repository.name})
|
||||
+ '?hide_reviewbot_pushes=true'
|
||||
+ "?hide_reviewbot_pushes=true"
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
|
||||
results = resp.json()['results']
|
||||
results = resp.json()["results"]
|
||||
assert len(results) == 2
|
||||
assert set([result['id'] for result in results]) == set([1, 2])
|
||||
assert set([result["id"] for result in results]) == set([1, 2])
|
||||
|
||||
|
||||
def test_push_list_without_jobs(client, test_repository, sample_push):
|
||||
|
@ -337,16 +337,16 @@ def test_push_list_without_jobs(client, test_repository, sample_push):
|
|||
resp = client.get(reverse("push-list", kwargs={"project": test_repository.name}))
|
||||
assert resp.status_code == 200
|
||||
data = resp.json()
|
||||
results = data['results']
|
||||
results = data["results"]
|
||||
assert len(results) == 10
|
||||
assert all([('platforms' not in result) for result in results])
|
||||
assert all([("platforms" not in result) for result in results])
|
||||
|
||||
meta = data['meta']
|
||||
meta = data["meta"]
|
||||
|
||||
assert meta == {
|
||||
u'count': len(results),
|
||||
u'filter_params': {},
|
||||
u'repository': test_repository.name,
|
||||
"count": len(results),
|
||||
"filter_params": {},
|
||||
"repository": test_repository.name,
|
||||
}
|
||||
|
||||
|
||||
|
@ -400,13 +400,13 @@ def test_push_status(client, test_job, test_user):
|
|||
)
|
||||
assert resp.status_code == 200
|
||||
assert isinstance(resp.json(), dict)
|
||||
assert resp.json() == {'success': 1, 'completed': 1, 'pending': 0, 'running': 0}
|
||||
assert resp.json() == {"success": 1, "completed": 1, "pending": 0, "running": 0}
|
||||
|
||||
JobNote.objects.create(
|
||||
job=test_job,
|
||||
failure_classification=failure_classification,
|
||||
user=test_user,
|
||||
text='A random note',
|
||||
text="A random note",
|
||||
)
|
||||
|
||||
resp = client.get(
|
||||
|
@ -414,4 +414,4 @@ def test_push_status(client, test_job, test_user):
|
|||
)
|
||||
assert resp.status_code == 200
|
||||
assert isinstance(resp.json(), dict)
|
||||
assert resp.json() == {'completed': 0, 'pending': 0, 'running': 0}
|
||||
assert resp.json() == {"completed": 0, "pending": 0, "running": 0}
|
||||
|
|
|
@ -7,7 +7,7 @@ from rest_framework.test import APIRequestFactory
|
|||
|
||||
class RequestVersionView(APIView):
|
||||
def get(self, request, *args, **kwargs):
|
||||
return Response({'version': request.version})
|
||||
return Response({"version": request.version})
|
||||
|
||||
|
||||
factory = APIRequestFactory()
|
||||
|
@ -15,25 +15,25 @@ factory = APIRequestFactory()
|
|||
|
||||
def test_unsupported_version():
|
||||
view = RequestVersionView.as_view()
|
||||
request = factory.get('/endpoint/', HTTP_ACCEPT='application/json; version=foo.bar')
|
||||
request = factory.get("/endpoint/", HTTP_ACCEPT="application/json; version=foo.bar")
|
||||
try:
|
||||
response = view(request)
|
||||
except NotAcceptable:
|
||||
pass
|
||||
assert response.data == {u'detail': u'Invalid version in "Accept" header.'}
|
||||
assert response.data == {"detail": 'Invalid version in "Accept" header.'}
|
||||
|
||||
|
||||
def test_correct_version():
|
||||
view = RequestVersionView.as_view()
|
||||
version = settings.REST_FRAMEWORK['ALLOWED_VERSIONS'][0]
|
||||
request = factory.get('/endpoint/', HTTP_ACCEPT='application/json; version={0}'.format(version))
|
||||
version = settings.REST_FRAMEWORK["ALLOWED_VERSIONS"][0]
|
||||
request = factory.get("/endpoint/", HTTP_ACCEPT="application/json; version={0}".format(version))
|
||||
response = view(request)
|
||||
assert response.data == {'version': version}
|
||||
assert response.data == {"version": version}
|
||||
|
||||
|
||||
def test_default_version():
|
||||
view = RequestVersionView.as_view()
|
||||
request = factory.get('/endpoint/', HTTP_ACCEPT='application/json')
|
||||
request = factory.get("/endpoint/", HTTP_ACCEPT="application/json")
|
||||
response = view(request)
|
||||
version = settings.REST_FRAMEWORK['DEFAULT_VERSION']
|
||||
assert response.data == {'version': version}
|
||||
version = settings.REST_FRAMEWORK["DEFAULT_VERSION"]
|
||||
assert response.data == {"version": version}
|
||||
|
|
|
@ -2,4 +2,4 @@
|
|||
# Django starts so that shared_task will use this app.
|
||||
from .celery import app as celery_app
|
||||
|
||||
__all__ = ('celery_app',)
|
||||
__all__ = ("celery_app",)
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче