зеркало из https://github.com/mozilla/treeherder.git
N806: variables in functions should be lowercase
This commit is contained in:
Родитель
3d812c391c
Коммит
642cff92bf
|
@ -53,8 +53,8 @@ def print_url_to_taskcluster(job_guid):
|
|||
job_guid = job["job_guid"]
|
||||
(decoded_task_id, _) = job_guid.split("/")
|
||||
# As of slugid v2, slugid.encode() returns a string not bytestring under Python 3.
|
||||
taskId = slugid.encode(uuid.UUID(decoded_task_id))
|
||||
logger.info("https://firefox-ci-tc.services.mozilla.com/tasks/%s", taskId)
|
||||
task_id = slugid.encode(uuid.UUID(decoded_task_id))
|
||||
logger.info("https://firefox-ci-tc.services.mozilla.com/tasks/%s", task_id)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -95,13 +95,13 @@ if __name__ == "__main__":
|
|||
th_instance_not_found.append(job)
|
||||
else:
|
||||
# You can use this value in a url with &selectedJob=
|
||||
jobId = job["id"]
|
||||
job_id = job["id"]
|
||||
remove_some_attributes(job, production_job)
|
||||
|
||||
differences = DeepDiff(job, production_dict[job["job_guid"]])
|
||||
if differences:
|
||||
pprint.pprint(differences)
|
||||
logger.info(jobId)
|
||||
logger.info(job_id)
|
||||
else:
|
||||
# Delete jobs that don"t have any differences
|
||||
del production_dict[job["job_guid"]]
|
||||
|
|
|
@ -39,6 +39,8 @@ select = [
|
|||
"F",
|
||||
# pyupgrade
|
||||
"UP",
|
||||
# pep-naming
|
||||
"N806",
|
||||
]
|
||||
|
||||
ignore = [
|
||||
|
|
|
@ -48,7 +48,7 @@ def test_bz_reopen_bugs(request, mock_bugzilla_reopen_request, client, test_job,
|
|||
|
||||
import json
|
||||
|
||||
EXPECTED_REOPEN_ATTEMPTS = {
|
||||
expected_reopen_attempts = {
|
||||
"https://thisisnotbugzilla.org/rest/bug/202": json.dumps(
|
||||
{
|
||||
"status": "REOPENED",
|
||||
|
@ -68,4 +68,4 @@ def test_bz_reopen_bugs(request, mock_bugzilla_reopen_request, client, test_job,
|
|||
}
|
||||
),
|
||||
}
|
||||
assert reopened_bugs == EXPECTED_REOPEN_ATTEMPTS
|
||||
assert reopened_bugs == expected_reopen_attempts
|
||||
|
|
|
@ -38,12 +38,12 @@ def transformed_pulse_jobs(sample_data, test_repository):
|
|||
|
||||
def mock_artifact(taskId, runId, artifactName):
|
||||
# Mock artifact with empty body
|
||||
baseUrl = (
|
||||
base_url = (
|
||||
"https://taskcluster.net/api/queue/v1/task/{taskId}/runs/{runId}/artifacts/{artifactName}"
|
||||
)
|
||||
responses.add(
|
||||
responses.GET,
|
||||
baseUrl.format(taskId=taskId, runId=runId, artifactName=artifactName),
|
||||
base_url.format(taskId=taskId, runId=runId, artifactName=artifactName),
|
||||
body="",
|
||||
content_type="text/plain",
|
||||
status=200,
|
||||
|
@ -53,20 +53,20 @@ def mock_artifact(taskId, runId, artifactName):
|
|||
@pytest.fixture
|
||||
async def new_pulse_jobs(sample_data, test_repository, push_stored):
|
||||
revision = push_stored[0]["revisions"][0]["revision"]
|
||||
pulseMessages = copy.deepcopy(sample_data.taskcluster_pulse_messages)
|
||||
pulse_messages = copy.deepcopy(sample_data.taskcluster_pulse_messages)
|
||||
tasks = copy.deepcopy(sample_data.taskcluster_tasks)
|
||||
jobs = []
|
||||
# Over here we transform the Pulse messages into the intermediary taskcluster-treeherder
|
||||
# generated messages
|
||||
for message in list(pulseMessages.values()):
|
||||
taskId = message["payload"]["status"]["taskId"]
|
||||
task = tasks[taskId]
|
||||
for message in list(pulse_messages.values()):
|
||||
task_id = message["payload"]["status"]["taskId"]
|
||||
task = tasks[task_id]
|
||||
|
||||
# If we pass task to handleMessage we won't hit the network
|
||||
taskRuns = await handleMessage(message, task)
|
||||
task_runs = await handleMessage(message, task)
|
||||
# handleMessage returns [] when it is a task that is not meant for Treeherder
|
||||
for run in reversed(taskRuns):
|
||||
mock_artifact(taskId, run["retryId"], "public/logs/live_backing.log")
|
||||
for run in reversed(task_runs):
|
||||
mock_artifact(task_id, run["retryId"], "public/logs/live_backing.log")
|
||||
run["origin"]["project"] = test_repository.name
|
||||
run["origin"]["revision"] = revision
|
||||
jobs.append(run)
|
||||
|
@ -99,11 +99,11 @@ def test_new_job_transformation(new_pulse_jobs, new_transformed_jobs, failure_cl
|
|||
job_guid = message["taskId"]
|
||||
(decoded_task_id, _) = job_guid.split("/")
|
||||
# As of slugid v2, slugid.encode() returns a string not bytestring under Python 3.
|
||||
taskId = slugid.encode(uuid.UUID(decoded_task_id))
|
||||
task_id = slugid.encode(uuid.UUID(decoded_task_id))
|
||||
transformed_job = jl.process_job(message, "https://firefox-ci-tc.services.mozilla.com")
|
||||
# Not all messages from Taskcluster will be processed
|
||||
if transformed_job:
|
||||
assert new_transformed_jobs[taskId] == transformed_job
|
||||
assert new_transformed_jobs[task_id] == transformed_job
|
||||
|
||||
|
||||
def test_ingest_pulse_jobs(
|
||||
|
|
|
@ -78,8 +78,8 @@ def test_smaller_than_bigger():
|
|||
|
||||
def test_extra_option_max_length():
|
||||
with open(os.path.join("schemas", "performance-artifact.json")) as f:
|
||||
PERFHERDER_SCHEMA = json.load(f)
|
||||
assert 100 == _lookup_extra_options_max(PERFHERDER_SCHEMA)
|
||||
perfherder_schema = json.load(f)
|
||||
assert 100 == _lookup_extra_options_max(perfherder_schema)
|
||||
|
||||
|
||||
def test_validate_perf_schema_no_exception():
|
||||
|
|
|
@ -190,20 +190,20 @@ def test_import(mock_bugscache_bugzilla_request):
|
|||
assert bug.dupe_of == 1662628
|
||||
|
||||
# key: open bug, values: duplicates
|
||||
EXPECTED_BUG_DUPE_OF_DATA = {
|
||||
expected_bug_dupe_of_data = {
|
||||
1392106: [1442991, 1443801],
|
||||
1411358: [1204281],
|
||||
1662628: [1652208, 1660324, 1660719, 1660765, 1663081, 1663118, 1702255],
|
||||
1736534: [],
|
||||
}
|
||||
|
||||
for open_bug, duplicates in EXPECTED_BUG_DUPE_OF_DATA.items():
|
||||
for open_bug, duplicates in expected_bug_dupe_of_data.items():
|
||||
assert Bugscache.objects.get(id=open_bug).dupe_of is None
|
||||
assert set(Bugscache.objects.filter(dupe_of=open_bug).values_list("id", flat=True)) == set(
|
||||
duplicates
|
||||
)
|
||||
|
||||
EXPECTED_BUG_COUNT = sum(
|
||||
[1 + len(duplicates) for duplicates in EXPECTED_BUG_DUPE_OF_DATA.values()]
|
||||
expected_bug_count = sum(
|
||||
[1 + len(duplicates) for duplicates in expected_bug_dupe_of_data.values()]
|
||||
)
|
||||
assert len(Bugscache.objects.all()) == EXPECTED_BUG_COUNT
|
||||
assert len(Bugscache.objects.all()) == expected_bug_count
|
||||
|
|
|
@ -40,7 +40,7 @@ def test_data_ingestion(setup_repository_data, mock_file_bugzilla_map_request):
|
|||
import_process.run()
|
||||
assert FilesBugzillaMap.objects.count() == 7
|
||||
|
||||
EXPECTED_FILES_BUGZILLA_DATA_IMPORT_1 = [
|
||||
expected_files_bugzilla_data_import_1 = [
|
||||
("AUTHORS", "AUTHORS", "mozilla.org", "Licensing"),
|
||||
("browser/components/BrowserGlue.jsm", "BrowserGlue.jsm", "Firefox", "General"),
|
||||
(
|
||||
|
@ -74,7 +74,7 @@ def test_data_ingestion(setup_repository_data, mock_file_bugzilla_map_request):
|
|||
"File first seen on mozilla-beta",
|
||||
),
|
||||
]
|
||||
assert EXPECTED_FILES_BUGZILLA_DATA_IMPORT_1 == list(
|
||||
assert expected_files_bugzilla_data_import_1 == list(
|
||||
FilesBugzillaMap.objects.all()
|
||||
.values_list(
|
||||
"path", "file_name", "bugzilla_component__product", "bugzilla_component__component"
|
||||
|
@ -82,7 +82,7 @@ def test_data_ingestion(setup_repository_data, mock_file_bugzilla_map_request):
|
|||
.order_by("path")
|
||||
)
|
||||
|
||||
EXPECTED_BUGZILLA_COMPONENTS_IMPORT_1 = [
|
||||
expected_bugzilla_components_import_1 = [
|
||||
("Core", "Storage: IndexedDB"),
|
||||
("Firefox", "General"),
|
||||
("Mock", "File first seen on mozilla-beta"),
|
||||
|
@ -91,7 +91,7 @@ def test_data_ingestion(setup_repository_data, mock_file_bugzilla_map_request):
|
|||
("mozilla.org", "Different path, same product, different component"),
|
||||
("mozilla.org", "Licensing"),
|
||||
]
|
||||
assert EXPECTED_BUGZILLA_COMPONENTS_IMPORT_1 == sorted(
|
||||
assert expected_bugzilla_components_import_1 == sorted(
|
||||
list(
|
||||
BugzillaComponent.objects.all()
|
||||
.values_list("product", "component")
|
||||
|
@ -103,7 +103,7 @@ def test_data_ingestion(setup_repository_data, mock_file_bugzilla_map_request):
|
|||
import_process.run()
|
||||
assert FilesBugzillaMap.objects.count() == 6
|
||||
|
||||
EXPECTED_FILES_BUGZILLA_DATA_IMPORT_2 = [
|
||||
expected_files_bugzilla_data_import_2 = [
|
||||
("AUTHORS", "AUTHORS", "mozilla.org", "Import 2: same product, different component"),
|
||||
("browser/components/BrowserGlue.jsm", "BrowserGlue.jsm", "Firefox", "General"),
|
||||
(
|
||||
|
@ -131,7 +131,7 @@ def test_data_ingestion(setup_repository_data, mock_file_bugzilla_map_request):
|
|||
"File first seen on mozilla-beta",
|
||||
),
|
||||
]
|
||||
assert EXPECTED_FILES_BUGZILLA_DATA_IMPORT_2 == sorted(
|
||||
assert expected_files_bugzilla_data_import_2 == sorted(
|
||||
list(
|
||||
FilesBugzillaMap.objects.all()
|
||||
.values_list(
|
||||
|
@ -141,7 +141,7 @@ def test_data_ingestion(setup_repository_data, mock_file_bugzilla_map_request):
|
|||
)
|
||||
)
|
||||
|
||||
EXPECTED_BUGZILLA_COMPONENTS_IMPORT_2 = [
|
||||
expected_bugzilla_components_import_2 = [
|
||||
("Core", "Storage: IndexedDB"),
|
||||
("Core", "Storage: IndexedDB2"),
|
||||
("Firefox", "General"),
|
||||
|
@ -149,7 +149,7 @@ def test_data_ingestion(setup_repository_data, mock_file_bugzilla_map_request):
|
|||
("Testing", "web-platform-tests"),
|
||||
("mozilla.org", "Import 2: same product, different component"),
|
||||
]
|
||||
assert EXPECTED_BUGZILLA_COMPONENTS_IMPORT_2 == sorted(
|
||||
assert expected_bugzilla_components_import_2 == sorted(
|
||||
list(
|
||||
BugzillaComponent.objects.all()
|
||||
.values_list("product", "component")
|
||||
|
|
|
@ -132,42 +132,42 @@ def test_formula_demands_at_least_framework_and_suite(FormulaClass, betamax_reco
|
|||
def test_breakdown_updates_between_calculations(FormulaClass, betamax_recorder):
|
||||
formula = FormulaClass(betamax_recorder.session)
|
||||
|
||||
test_moniker_A = ("build_metrics", "build times")
|
||||
test_moniker_B = ("talos", "tp5n", "nonmain_startup_fileio")
|
||||
test_moniker_a = ("build_metrics", "build times")
|
||||
test_moniker_b = ("talos", "tp5n", "nonmain_startup_fileio")
|
||||
|
||||
cassette_preffix_A = "-".join(filter(None, test_moniker_A))
|
||||
cassette_preffix_B = "-".join(filter(None, test_moniker_B))
|
||||
cassette_preffix_a = "-".join(filter(None, test_moniker_a))
|
||||
cassette_preffix_b = "-".join(filter(None, test_moniker_b))
|
||||
|
||||
with betamax_recorder.use_cassette(f"{cassette_preffix_A}", serialize_with="prettyjson"):
|
||||
formula(*test_moniker_A) # let it perform calculus & cache breakdown
|
||||
breakdown_A = formula.breakdown()
|
||||
with betamax_recorder.use_cassette(f"{cassette_preffix_a}", serialize_with="prettyjson"):
|
||||
formula(*test_moniker_a) # let it perform calculus & cache breakdown
|
||||
breakdown_a = formula.breakdown()
|
||||
|
||||
with betamax_recorder.use_cassette(f"{cassette_preffix_B}", serialize_with="prettyjson"):
|
||||
formula(*test_moniker_B) # let it perform calculus & cache breakdown
|
||||
breakdown_B = formula.breakdown()
|
||||
with betamax_recorder.use_cassette(f"{cassette_preffix_b}", serialize_with="prettyjson"):
|
||||
formula(*test_moniker_b) # let it perform calculus & cache breakdown
|
||||
breakdown_b = formula.breakdown()
|
||||
|
||||
assert breakdown_A != breakdown_B
|
||||
assert breakdown_a != breakdown_b
|
||||
|
||||
|
||||
@pytest.mark.parametrize("FormulaClass", concrete_formula_classes())
|
||||
def test_breakdown_resets_to_null_when_calculus_errors_out(FormulaClass, betamax_recorder):
|
||||
formula = FormulaClass(betamax_recorder.session)
|
||||
|
||||
test_moniker_A = ("build_metrics", "build times")
|
||||
test_moniker_B = ("nonexistent_framework", "nonexistent_suite")
|
||||
test_moniker_a = ("build_metrics", "build times")
|
||||
test_moniker_b = ("nonexistent_framework", "nonexistent_suite")
|
||||
|
||||
cassette_preffix_A = "-".join(filter(None, test_moniker_A))
|
||||
cassette_preffix_B = "-".join(filter(None, test_moniker_B))
|
||||
cassette_preffix_a = "-".join(filter(None, test_moniker_a))
|
||||
cassette_preffix_b = "-".join(filter(None, test_moniker_b))
|
||||
|
||||
# run happy path calculus
|
||||
with betamax_recorder.use_cassette(f"{cassette_preffix_A}", serialize_with="prettyjson"):
|
||||
formula(*test_moniker_A) # let it perform calculus & cache breakdown
|
||||
with betamax_recorder.use_cassette(f"{cassette_preffix_a}", serialize_with="prettyjson"):
|
||||
formula(*test_moniker_a) # let it perform calculus & cache breakdown
|
||||
_ = formula.breakdown()
|
||||
|
||||
# now run alternated path calculus
|
||||
with betamax_recorder.use_cassette(f"{cassette_preffix_B}", serialize_with="prettyjson"):
|
||||
with betamax_recorder.use_cassette(f"{cassette_preffix_b}", serialize_with="prettyjson"):
|
||||
with pytest.raises(NoFiledBugs):
|
||||
formula(*test_moniker_B) # intentionally blows up while doing calculus
|
||||
formula(*test_moniker_b) # intentionally blows up while doing calculus
|
||||
|
||||
# cached breakdown got invalidated & can no longer be obtained
|
||||
with pytest.raises(RuntimeError):
|
||||
|
|
|
@ -83,22 +83,22 @@ def test_detect_alerts_in_series(
|
|||
mock_deviance,
|
||||
):
|
||||
base_time = time.time() # generate it based off current time
|
||||
INTERVAL = 30
|
||||
interval = 30
|
||||
_generate_performance_data(
|
||||
test_repository,
|
||||
test_perf_signature,
|
||||
base_time,
|
||||
1,
|
||||
0.5,
|
||||
int(INTERVAL / 2),
|
||||
int(interval / 2),
|
||||
)
|
||||
_generate_performance_data(
|
||||
test_repository,
|
||||
test_perf_signature,
|
||||
base_time,
|
||||
int(INTERVAL / 2) + 1,
|
||||
int(interval / 2) + 1,
|
||||
1.0,
|
||||
int(INTERVAL / 2),
|
||||
int(interval / 2),
|
||||
)
|
||||
|
||||
generate_new_alerts_in_series(test_perf_signature)
|
||||
|
@ -107,8 +107,8 @@ def test_detect_alerts_in_series(
|
|||
assert PerformanceAlertSummary.objects.count() == 1
|
||||
_verify_alert(
|
||||
1,
|
||||
(INTERVAL / 2) + 1,
|
||||
(INTERVAL / 2),
|
||||
(interval / 2) + 1,
|
||||
(interval / 2),
|
||||
test_perf_signature,
|
||||
0.5,
|
||||
1.0,
|
||||
|
@ -125,8 +125,8 @@ def test_detect_alerts_in_series(
|
|||
assert PerformanceAlertSummary.objects.count() == 1
|
||||
_verify_alert(
|
||||
1,
|
||||
(INTERVAL / 2) + 1,
|
||||
(INTERVAL / 2),
|
||||
(interval / 2) + 1,
|
||||
(interval / 2),
|
||||
test_perf_signature,
|
||||
0.5,
|
||||
1.0,
|
||||
|
@ -142,9 +142,9 @@ def test_detect_alerts_in_series(
|
|||
test_repository,
|
||||
test_perf_signature,
|
||||
base_time,
|
||||
(INTERVAL + 1),
|
||||
(interval + 1),
|
||||
2.0,
|
||||
INTERVAL,
|
||||
interval,
|
||||
)
|
||||
generate_new_alerts_in_series(test_perf_signature)
|
||||
|
||||
|
@ -152,8 +152,8 @@ def test_detect_alerts_in_series(
|
|||
assert PerformanceAlertSummary.objects.count() == 2
|
||||
_verify_alert(
|
||||
2,
|
||||
INTERVAL + 1,
|
||||
INTERVAL,
|
||||
interval + 1,
|
||||
interval,
|
||||
test_perf_signature,
|
||||
1.0,
|
||||
2.0,
|
||||
|
@ -232,22 +232,22 @@ def test_no_alerts_with_old_data(
|
|||
test_perf_signature,
|
||||
):
|
||||
base_time = 0 # 1970, too old!
|
||||
INTERVAL = 30
|
||||
interval = 30
|
||||
_generate_performance_data(
|
||||
test_repository,
|
||||
test_perf_signature,
|
||||
base_time,
|
||||
1,
|
||||
0.5,
|
||||
int(INTERVAL / 2),
|
||||
int(interval / 2),
|
||||
)
|
||||
_generate_performance_data(
|
||||
test_repository,
|
||||
test_perf_signature,
|
||||
base_time,
|
||||
int(INTERVAL / 2) + 1,
|
||||
int(interval / 2) + 1,
|
||||
1.0,
|
||||
int(INTERVAL / 2),
|
||||
int(interval / 2),
|
||||
)
|
||||
|
||||
generate_new_alerts_in_series(test_perf_signature)
|
||||
|
@ -269,7 +269,7 @@ def test_custom_alert_threshold(
|
|||
# under default settings, this set of data would generate
|
||||
# 2 alerts, but we'll set an artificially high threshold
|
||||
# of 200% that should only generate 1
|
||||
INTERVAL = 60
|
||||
interval = 60
|
||||
base_time = time.time()
|
||||
_generate_performance_data(
|
||||
test_repository,
|
||||
|
@ -277,23 +277,23 @@ def test_custom_alert_threshold(
|
|||
base_time,
|
||||
1,
|
||||
0.5,
|
||||
int(INTERVAL / 3),
|
||||
int(interval / 3),
|
||||
)
|
||||
_generate_performance_data(
|
||||
test_repository,
|
||||
test_perf_signature,
|
||||
base_time,
|
||||
int(INTERVAL / 3) + 1,
|
||||
int(interval / 3) + 1,
|
||||
0.6,
|
||||
int(INTERVAL / 3),
|
||||
int(interval / 3),
|
||||
)
|
||||
_generate_performance_data(
|
||||
test_repository,
|
||||
test_perf_signature,
|
||||
base_time,
|
||||
2 * int(INTERVAL / 3) + 1,
|
||||
2 * int(interval / 3) + 1,
|
||||
2.0,
|
||||
int(INTERVAL / 3),
|
||||
int(interval / 3),
|
||||
)
|
||||
|
||||
generate_new_alerts_in_series(test_perf_signature)
|
||||
|
@ -319,22 +319,22 @@ def test_alert_change_type_absolute(
|
|||
test_perf_signature.save()
|
||||
|
||||
base_time = time.time() # generate it based off current time
|
||||
INTERVAL = 30
|
||||
interval = 30
|
||||
_generate_performance_data(
|
||||
test_repository,
|
||||
test_perf_signature,
|
||||
base_time,
|
||||
1,
|
||||
0.5,
|
||||
int(INTERVAL / 2),
|
||||
int(interval / 2),
|
||||
)
|
||||
_generate_performance_data(
|
||||
test_repository,
|
||||
test_perf_signature,
|
||||
base_time,
|
||||
int(INTERVAL / 2) + 1,
|
||||
int(interval / 2) + 1,
|
||||
new_value,
|
||||
int(INTERVAL / 2),
|
||||
int(interval / 2),
|
||||
)
|
||||
|
||||
generate_new_alerts_in_series(test_perf_signature)
|
||||
|
|
|
@ -123,10 +123,10 @@ def test_detect_changes_few_revisions_many_values():
|
|||
def test_detect_changes_historical_data(filename, expected_timestamps):
|
||||
"""Parse JSON produced by http://graphs.mozilla.org/api/test/runs"""
|
||||
# Configuration for Analyzer
|
||||
FORE_WINDOW = 12
|
||||
MIN_BACK_WINDOW = 12
|
||||
MAX_BACK_WINDOW = 24
|
||||
THRESHOLD = 7
|
||||
fore_window = 12
|
||||
min_back_window = 12
|
||||
max_back_window = 24
|
||||
threshold = 7
|
||||
|
||||
payload = SampleData.get_perf_data(os.path.join("graphs", filename))
|
||||
runs = payload["test_runs"]
|
||||
|
@ -134,10 +134,10 @@ def test_detect_changes_historical_data(filename, expected_timestamps):
|
|||
|
||||
results = detect_changes(
|
||||
data,
|
||||
min_back_window=MIN_BACK_WINDOW,
|
||||
max_back_window=MAX_BACK_WINDOW,
|
||||
fore_window=FORE_WINDOW,
|
||||
t_threshold=THRESHOLD,
|
||||
min_back_window=min_back_window,
|
||||
max_back_window=max_back_window,
|
||||
fore_window=fore_window,
|
||||
t_threshold=threshold,
|
||||
)
|
||||
regression_timestamps = [d.push_timestamp for d in results if d.change_detected]
|
||||
assert regression_timestamps == expected_timestamps
|
||||
|
|
|
@ -18,37 +18,37 @@ def test_bugzilla_components_for_path(client, test_job):
|
|||
bugzilla_component=BugzillaComponent.objects.last(),
|
||||
)
|
||||
|
||||
URL_BASE = reverse("bugzilla-component-list")
|
||||
url_base = reverse("bugzilla-component-list")
|
||||
|
||||
EXPECTED_MOCK1 = [{"product": "Mock Product 1", "component": "Mock Component 1"}]
|
||||
expected_mock1 = [{"product": "Mock Product 1", "component": "Mock Component 1"}]
|
||||
|
||||
resp = client.get(URL_BASE + "?path=file_1.extension")
|
||||
resp = client.get(url_base + "?path=file_1.extension")
|
||||
assert resp.status_code == 200
|
||||
assert resp.json() == EXPECTED_MOCK1
|
||||
assert resp.json() == expected_mock1
|
||||
|
||||
resp = client.get(URL_BASE + "?path=file_2.extension")
|
||||
resp = client.get(url_base + "?path=file_2.extension")
|
||||
assert resp.json() == []
|
||||
|
||||
resp = client.get(URL_BASE + "?path=ile_2.extension")
|
||||
resp = client.get(url_base + "?path=ile_2.extension")
|
||||
assert resp.json() == []
|
||||
|
||||
resp = client.get(URL_BASE + "?path=file_1")
|
||||
assert resp.json() == EXPECTED_MOCK1
|
||||
resp = client.get(url_base + "?path=file_1")
|
||||
assert resp.json() == expected_mock1
|
||||
|
||||
resp = client.get(URL_BASE + "?path=mock/folder/file_1.extension")
|
||||
assert resp.json() == EXPECTED_MOCK1
|
||||
resp = client.get(url_base + "?path=mock/folder/file_1.extension")
|
||||
assert resp.json() == expected_mock1
|
||||
|
||||
resp = client.get(URL_BASE + "?path=other_mock/other_folder/file_1.extension")
|
||||
resp = client.get(url_base + "?path=other_mock/other_folder/file_1.extension")
|
||||
# Should also pass because search falls back to file name if no match for path.
|
||||
assert resp.json() == EXPECTED_MOCK1
|
||||
assert resp.json() == expected_mock1
|
||||
|
||||
resp = client.get(URL_BASE + "?path=folder/file_1.extension")
|
||||
assert resp.json() == EXPECTED_MOCK1
|
||||
resp = client.get(url_base + "?path=folder/file_1.extension")
|
||||
assert resp.json() == expected_mock1
|
||||
|
||||
resp = client.get(URL_BASE + "?path=folder/file_1.other_extension")
|
||||
assert resp.json() == EXPECTED_MOCK1
|
||||
resp = client.get(url_base + "?path=folder/file_1.other_extension")
|
||||
assert resp.json() == expected_mock1
|
||||
|
||||
resp = client.get(URL_BASE + "?path=completely.unrelated")
|
||||
resp = client.get(url_base + "?path=completely.unrelated")
|
||||
assert resp.json() == []
|
||||
|
||||
BugzillaComponent.objects.create(product="Mock Product 1", component="Mock Component 2")
|
||||
|
@ -59,25 +59,25 @@ def test_bugzilla_components_for_path(client, test_job):
|
|||
bugzilla_component=BugzillaComponent.objects.last(),
|
||||
)
|
||||
|
||||
EXPECTED_MOCK2 = [{"product": "Mock Product 1", "component": "Mock Component 2"}]
|
||||
expected_mock2 = [{"product": "Mock Product 1", "component": "Mock Component 2"}]
|
||||
|
||||
EXPECTED_MOCK1_MOCK2 = [
|
||||
expected_mock1_mock2 = [
|
||||
{"product": "Mock Product 1", "component": "Mock Component 1"},
|
||||
{"product": "Mock Product 1", "component": "Mock Component 2"},
|
||||
]
|
||||
|
||||
resp = client.get(URL_BASE + "?path=file_1.extension")
|
||||
assert resp.json() == EXPECTED_MOCK1_MOCK2
|
||||
resp = client.get(url_base + "?path=file_1.extension")
|
||||
assert resp.json() == expected_mock1_mock2
|
||||
|
||||
resp = client.get(URL_BASE + "?path=mock/folder/file_1.extension")
|
||||
assert resp.json() == EXPECTED_MOCK1
|
||||
resp = client.get(url_base + "?path=mock/folder/file_1.extension")
|
||||
assert resp.json() == expected_mock1
|
||||
|
||||
resp = client.get(URL_BASE + "?path=mock/folder_2/file_1.extension")
|
||||
assert resp.json() == EXPECTED_MOCK2
|
||||
resp = client.get(url_base + "?path=mock/folder_2/file_1.extension")
|
||||
assert resp.json() == expected_mock2
|
||||
|
||||
resp = client.get(URL_BASE + "?path=other_mock/other_folder/file_1.extension")
|
||||
resp = client.get(url_base + "?path=other_mock/other_folder/file_1.extension")
|
||||
# Should also pass because search falls back to file name if no match for path.
|
||||
assert resp.json() == EXPECTED_MOCK1_MOCK2
|
||||
assert resp.json() == expected_mock1_mock2
|
||||
|
||||
BugzillaComponent.objects.create(product="Mock Product 3", component="Mock Component 3")
|
||||
|
||||
|
@ -87,16 +87,16 @@ def test_bugzilla_components_for_path(client, test_job):
|
|||
bugzilla_component=BugzillaComponent.objects.last(),
|
||||
)
|
||||
|
||||
EXPECTED_MOCK3 = [{"product": "Mock Product 3", "component": "Mock Component 3"}]
|
||||
expected_mock3 = [{"product": "Mock Product 3", "component": "Mock Component 3"}]
|
||||
|
||||
resp = client.get(URL_BASE + "?path=other.file.js")
|
||||
assert resp.json() == EXPECTED_MOCK3
|
||||
resp = client.get(url_base + "?path=other.file.js")
|
||||
assert resp.json() == expected_mock3
|
||||
|
||||
resp = client.get(URL_BASE + "?path=other.file")
|
||||
assert resp.json() == EXPECTED_MOCK3
|
||||
resp = client.get(url_base + "?path=other.file")
|
||||
assert resp.json() == expected_mock3
|
||||
|
||||
resp = client.get(URL_BASE + "?path=other")
|
||||
assert resp.json() == EXPECTED_MOCK3
|
||||
resp = client.get(url_base + "?path=other")
|
||||
assert resp.json() == expected_mock3
|
||||
|
||||
BugzillaComponent.objects.create(product="Mock Product 4", component="Mock Component 4")
|
||||
|
||||
|
@ -106,23 +106,23 @@ def test_bugzilla_components_for_path(client, test_job):
|
|||
bugzilla_component=BugzillaComponent.objects.last(),
|
||||
)
|
||||
|
||||
EXPECTED_MOCK4 = [{"product": "Mock Product 4", "component": "Mock Component 4"}]
|
||||
expected_mock4 = [{"product": "Mock Product 4", "component": "Mock Component 4"}]
|
||||
|
||||
EXPECTED_MOCK3_MOCK4 = [
|
||||
expected_mock3_mock4 = [
|
||||
{"product": "Mock Product 3", "component": "Mock Component 3"},
|
||||
{"product": "Mock Product 4", "component": "Mock Component 4"},
|
||||
]
|
||||
|
||||
resp = client.get(URL_BASE + "?path=other.file.js")
|
||||
assert resp.json() == EXPECTED_MOCK3
|
||||
resp = client.get(url_base + "?path=other.file.js")
|
||||
assert resp.json() == expected_mock3
|
||||
|
||||
resp = client.get(URL_BASE + "?path=other.extension")
|
||||
assert resp.json() == EXPECTED_MOCK4
|
||||
resp = client.get(url_base + "?path=other.extension")
|
||||
assert resp.json() == expected_mock4
|
||||
|
||||
resp = client.get(URL_BASE + "?path=other")
|
||||
assert resp.json() == EXPECTED_MOCK3_MOCK4
|
||||
resp = client.get(url_base + "?path=other")
|
||||
assert resp.json() == expected_mock3_mock4
|
||||
|
||||
resp = client.get(URL_BASE + "?path=another")
|
||||
resp = client.get(url_base + "?path=another")
|
||||
assert resp.json() == []
|
||||
|
||||
BugzillaComponent.objects.create(
|
||||
|
@ -166,19 +166,19 @@ def test_bugzilla_components_for_path(client, test_job):
|
|||
bugzilla_component=BugzillaComponent.objects.last(),
|
||||
)
|
||||
|
||||
EXPECTED_MOCK_ORG_MOZILLA = [
|
||||
expected_mock_org_mozilla = [
|
||||
{
|
||||
"product": "Mock Product org.mozilla.*.<TestName>",
|
||||
"component": "Mock Component File Match",
|
||||
}
|
||||
]
|
||||
|
||||
resp = client.get(URL_BASE + "?path=org.mozilla.geckoview.test.MockTestName#Subtest")
|
||||
assert resp.json() == EXPECTED_MOCK_ORG_MOZILLA
|
||||
resp = client.get(url_base + "?path=org.mozilla.geckoview.test.MockTestName#Subtest")
|
||||
assert resp.json() == expected_mock_org_mozilla
|
||||
|
||||
# Only take test name into account.
|
||||
resp = client.get(URL_BASE + "?path=org.mozilla.otherproduct.otherfolder.MockTestName")
|
||||
assert resp.json() == EXPECTED_MOCK_ORG_MOZILLA
|
||||
resp = client.get(url_base + "?path=org.mozilla.otherproduct.otherfolder.MockTestName")
|
||||
assert resp.json() == expected_mock_org_mozilla
|
||||
|
||||
BugzillaComponent.objects.create(product="Testing", component="Mochitest")
|
||||
|
||||
|
@ -189,5 +189,5 @@ def test_bugzilla_components_for_path(client, test_job):
|
|||
)
|
||||
|
||||
# Respect the ignore list of product and component combinations.
|
||||
resp = client.get(URL_BASE + "?path=mock/mochitest/mochitest.test")
|
||||
resp = client.get(url_base + "?path=mock/mochitest/mochitest.test")
|
||||
assert resp.json() == []
|
||||
|
|
|
@ -27,9 +27,9 @@ def reopen_intermittent_bugs():
|
|||
)
|
||||
# Intermittent bugs get closed after 3 weeks of inactivity if other conditions don't apply:
|
||||
# https://github.com/mozilla/relman-auto-nag/blob/c7439e247677333c1cd8c435234b3ef3adc49680/auto_nag/scripts/close_intermittents.py#L17
|
||||
RECENT_DAYS = 7
|
||||
recent_days = 7
|
||||
recently_used_bugs = set(
|
||||
BugJobMap.objects.filter(created__gt=datetime.now() - timedelta(RECENT_DAYS)).values_list(
|
||||
BugJobMap.objects.filter(created__gt=datetime.now() - timedelta(recent_days)).values_list(
|
||||
"bug_id", flat=True
|
||||
)
|
||||
)
|
||||
|
|
|
@ -118,8 +118,8 @@ async def ingest_task(taskId, root_url):
|
|||
# Remove default timeout limit of 5 minutes
|
||||
timeout = aiohttp.ClientTimeout(total=0)
|
||||
async with taskcluster.aio.createSession(connector=conn, timeout=timeout) as session:
|
||||
asyncQueue = taskcluster.aio.Queue({"rootUrl": root_url}, session=session)
|
||||
results = await asyncio.gather(asyncQueue.status(taskId), asyncQueue.task(taskId))
|
||||
async_queue = taskcluster.aio.Queue({"rootUrl": root_url}, session=session)
|
||||
results = await asyncio.gather(async_queue.status(taskId), async_queue.task(taskId))
|
||||
await handleTask(
|
||||
{
|
||||
"status": results[0]["status"],
|
||||
|
@ -130,7 +130,7 @@ async def ingest_task(taskId, root_url):
|
|||
|
||||
|
||||
async def handleTask(task, root_url):
|
||||
taskId = task["status"]["taskId"]
|
||||
task_id = task["status"]["taskId"]
|
||||
runs = task["status"]["runs"]
|
||||
# If we iterate in order of the runs, we will not be able to mark older runs as
|
||||
# "retry" instead of exception
|
||||
|
@ -139,7 +139,7 @@ async def handleTask(task, root_url):
|
|||
"exchange": stateToExchange[run["state"]],
|
||||
"payload": {
|
||||
"status": {
|
||||
"taskId": taskId,
|
||||
"taskId": task_id,
|
||||
"runs": runs,
|
||||
},
|
||||
"runId": run["runId"],
|
||||
|
@ -148,35 +148,35 @@ async def handleTask(task, root_url):
|
|||
}
|
||||
|
||||
try:
|
||||
taskRuns = await handleMessage(message, task["task"])
|
||||
task_runs = await handleMessage(message, task["task"])
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
|
||||
if taskRuns:
|
||||
if task_runs:
|
||||
# Schedule and run jobs inside the thread pool executor
|
||||
jobFutures = [
|
||||
routine_to_future(process_job_with_threads, run, root_url) for run in taskRuns
|
||||
job_futures = [
|
||||
routine_to_future(process_job_with_threads, run, root_url) for run in task_runs
|
||||
]
|
||||
await await_futures(jobFutures)
|
||||
await await_futures(job_futures)
|
||||
|
||||
|
||||
async def fetchGroupTasks(taskGroupId, root_url):
|
||||
tasks = []
|
||||
query = {}
|
||||
continuationToken = ""
|
||||
continuation_token = ""
|
||||
# Limiting the connection pool just in case we have too many
|
||||
conn = aiohttp.TCPConnector(limit=10)
|
||||
# Remove default timeout limit of 5 minutes
|
||||
timeout = aiohttp.ClientTimeout(total=0)
|
||||
async with taskcluster.aio.createSession(connector=conn, timeout=timeout) as session:
|
||||
asyncQueue = taskcluster.aio.Queue({"rootUrl": root_url}, session=session)
|
||||
async_queue = taskcluster.aio.Queue({"rootUrl": root_url}, session=session)
|
||||
while True:
|
||||
if continuationToken:
|
||||
query = {"continuationToken": continuationToken}
|
||||
response = await asyncQueue.listTaskGroup(taskGroupId, query=query)
|
||||
if continuation_token:
|
||||
query = {"continuationToken": continuation_token}
|
||||
response = await async_queue.listTaskGroup(taskGroupId, query=query)
|
||||
tasks.extend(response["tasks"])
|
||||
continuationToken = response.get("continuationToken")
|
||||
if continuationToken is None:
|
||||
continuation_token = response.get("continuationToken")
|
||||
if continuation_token is None:
|
||||
break
|
||||
logger.info("Requesting more tasks. %s tasks so far...", len(tasks))
|
||||
return tasks
|
||||
|
@ -193,8 +193,8 @@ async def processTasks(taskGroupId, root_url):
|
|||
return
|
||||
|
||||
# Schedule and run tasks inside the thread pool executor
|
||||
taskFutures = [routine_to_future(handleTask, task, root_url) for task in tasks]
|
||||
await await_futures(taskFutures)
|
||||
task_futures = [routine_to_future(handleTask, task, root_url) for task in tasks]
|
||||
await await_futures(task_futures)
|
||||
|
||||
|
||||
async def routine_to_future(func, *args):
|
||||
|
@ -249,12 +249,12 @@ def get_decision_task_id(project, revision, root_url):
|
|||
def repo_meta(project):
|
||||
_repo = Repository.objects.filter(name=project)[0]
|
||||
assert _repo, f"The project {project} you specified is incorrect"
|
||||
splitUrl = _repo.url.split("/")
|
||||
split_url = _repo.url.split("/")
|
||||
return {
|
||||
"url": _repo.url,
|
||||
"branch": _repo.branch,
|
||||
"owner": splitUrl[3],
|
||||
"repo": splitUrl[4],
|
||||
"owner": split_url[3],
|
||||
"repo": split_url[4],
|
||||
"tc_root_url": _repo.tc_root_url,
|
||||
}
|
||||
|
||||
|
@ -270,16 +270,16 @@ def query_data(repo_meta, commit):
|
|||
event_base_sha = repo_meta["branch"]
|
||||
# First we try with `master` being the base sha
|
||||
# e.g. https://api.github.com/repos/servo/servo/compare/master...1418c0555ff77e5a3d6cf0c6020ba92ece36be2e
|
||||
compareResponse = github.compare_shas(
|
||||
compare_response = github.compare_shas(
|
||||
repo_meta["owner"], repo_meta["repo"], repo_meta["branch"], commit
|
||||
)
|
||||
merge_base_commit = compareResponse.get("merge_base_commit")
|
||||
merge_base_commit = compare_response.get("merge_base_commit")
|
||||
if merge_base_commit:
|
||||
commiter_date = merge_base_commit["commit"]["committer"]["date"]
|
||||
# Since we don't use PushEvents that contain the "before" or "event.base.sha" fields [1]
|
||||
# we need to discover the right parent which existed in the base branch.
|
||||
# [1] https://github.com/taskcluster/taskcluster/blob/3dda0adf85619d18c5dcf255259f3e274d2be346/services/github/src/api.js#L55
|
||||
parents = compareResponse["merge_base_commit"]["parents"]
|
||||
parents = compare_response["merge_base_commit"]["parents"]
|
||||
if len(parents) == 1:
|
||||
parent = parents[0]
|
||||
commit_info = fetch_json(parent["url"])
|
||||
|
@ -301,12 +301,12 @@ def query_data(repo_meta, commit):
|
|||
assert event_base_sha != repo_meta["branch"]
|
||||
logger.info("We have a new base: %s", event_base_sha)
|
||||
# When using the correct event_base_sha the "commits" field will be correct
|
||||
compareResponse = github.compare_shas(
|
||||
compare_response = github.compare_shas(
|
||||
repo_meta["owner"], repo_meta["repo"], event_base_sha, commit
|
||||
)
|
||||
|
||||
commits = []
|
||||
for _commit in compareResponse["commits"]:
|
||||
for _commit in compare_response["commits"]:
|
||||
commits.append(
|
||||
{
|
||||
"message": _commit["commit"]["message"],
|
||||
|
@ -453,7 +453,7 @@ class Command(BaseCommand):
|
|||
|
||||
def handle(self, *args, **options):
|
||||
loop = asyncio.get_event_loop()
|
||||
typeOfIngestion = options["ingestion_type"][0]
|
||||
type_of_ingestion = options["ingestion_type"][0]
|
||||
root_url = options["root_url"]
|
||||
|
||||
if not options["enable_eager_celery"]:
|
||||
|
@ -462,22 +462,22 @@ class Command(BaseCommand):
|
|||
# Make sure all tasks are run synchronously / immediately
|
||||
settings.CELERY_TASK_ALWAYS_EAGER = True
|
||||
|
||||
if typeOfIngestion == "task":
|
||||
if type_of_ingestion == "task":
|
||||
assert options["taskId"]
|
||||
loop.run_until_complete(ingest_task(options["taskId"], root_url))
|
||||
elif typeOfIngestion == "prUrl":
|
||||
elif type_of_ingestion == "prUrl":
|
||||
assert options["prUrl"]
|
||||
ingest_pr(options["prUrl"], root_url)
|
||||
elif typeOfIngestion.find("git") > -1:
|
||||
elif type_of_ingestion.find("git") > -1:
|
||||
if not os.environ.get("GITHUB_TOKEN"):
|
||||
logger.warning(
|
||||
"If you don't set up GITHUB_TOKEN you might hit Github's rate limiting. See docs for info."
|
||||
)
|
||||
if typeOfIngestion == "git-push":
|
||||
if type_of_ingestion == "git-push":
|
||||
ingest_push(options["project"], options["commit"])
|
||||
elif typeOfIngestion == "git-pushes":
|
||||
elif type_of_ingestion == "git-pushes":
|
||||
ingest_git_pushes(options["project"], options["dryRun"])
|
||||
elif typeOfIngestion == "push":
|
||||
elif type_of_ingestion == "push":
|
||||
ingest_hg_push(options)
|
||||
else:
|
||||
raise Exception("Please check the code for valid ingestion types.")
|
||||
|
|
|
@ -38,17 +38,17 @@ def stateFromRun(jobRun):
|
|||
|
||||
|
||||
def resultFromRun(jobRun):
|
||||
RUN_TO_RESULT = {
|
||||
run_to_result = {
|
||||
"completed": "success",
|
||||
"failed": "fail",
|
||||
}
|
||||
state = jobRun["state"]
|
||||
if state in list(RUN_TO_RESULT.keys()):
|
||||
return RUN_TO_RESULT[state]
|
||||
if state in list(run_to_result.keys()):
|
||||
return run_to_result[state]
|
||||
elif state == "exception":
|
||||
reasonResolved = jobRun.get("reasonResolved")
|
||||
if reasonResolved in ["canceled", "superseded"]:
|
||||
return reasonResolved
|
||||
reason_resolved = jobRun.get("reasonResolved")
|
||||
if reason_resolved in ["canceled", "superseded"]:
|
||||
return reason_resolved
|
||||
return "exception"
|
||||
else:
|
||||
return "unknown"
|
||||
|
@ -57,12 +57,12 @@ def resultFromRun(jobRun):
|
|||
# Creates a log entry for Treeherder to retrieve and parse. This log is
|
||||
# displayed on the Treeherder Log Viewer once parsed.
|
||||
def createLogReference(root_url, taskId, runId):
|
||||
logUrl = taskcluster_urls.api(
|
||||
log_url = taskcluster_urls.api(
|
||||
root_url, "queue", "v1", "task/{taskId}/runs/{runId}/artifacts/public/logs/live_backing.log"
|
||||
).format(taskId=taskId, runId=runId)
|
||||
return {
|
||||
"name": "live_backing_log",
|
||||
"url": logUrl,
|
||||
"url": log_url,
|
||||
}
|
||||
|
||||
|
||||
|
@ -71,27 +71,27 @@ def createLogReference(root_url, taskId, runId):
|
|||
# Treeherder job message.
|
||||
# TODO: Refactor https://bugzilla.mozilla.org/show_bug.cgi?id=1560596
|
||||
def parseRouteInfo(prefix, taskId, routes, task):
|
||||
matchingRoutes = list(filter(lambda route: route.split(".")[0] == "tc-treeherder", routes))
|
||||
matching_routes = list(filter(lambda route: route.split(".")[0] == "tc-treeherder", routes))
|
||||
|
||||
if len(matchingRoutes) != 1:
|
||||
if len(matching_routes) != 1:
|
||||
raise PulseHandlerError(
|
||||
"Could not determine Treeherder route. Either there is no route, "
|
||||
+ "or more than one matching route exists."
|
||||
+ f"Task ID: {taskId} Routes: {routes}"
|
||||
)
|
||||
|
||||
parsedRoute = parseRoute(matchingRoutes[0])
|
||||
parsed_route = parseRoute(matching_routes[0])
|
||||
|
||||
return parsedRoute
|
||||
return parsed_route
|
||||
|
||||
|
||||
def validateTask(task):
|
||||
treeherderMetadata = task.get("extra", {}).get("treeherder")
|
||||
if not treeherderMetadata:
|
||||
treeherder_metadata = task.get("extra", {}).get("treeherder")
|
||||
if not treeherder_metadata:
|
||||
logger.debug("Task metadata is missing Treeherder job configuration.")
|
||||
return False
|
||||
try:
|
||||
jsonschema.validate(treeherderMetadata, get_json_schema("task-treeherder-config.yml"))
|
||||
jsonschema.validate(treeherder_metadata, get_json_schema("task-treeherder-config.yml"))
|
||||
except (jsonschema.ValidationError, jsonschema.SchemaError) as e:
|
||||
logger.error("JSON Schema validation error during Taskcluser message ingestion: %s", e)
|
||||
return False
|
||||
|
@ -169,26 +169,26 @@ def ignore_task(task, taskId, rootUrl, project):
|
|||
async def handleMessage(message, taskDefinition=None):
|
||||
async with taskcluster.aio.createSession() as session:
|
||||
jobs = []
|
||||
taskId = message["payload"]["status"]["taskId"]
|
||||
asyncQueue = taskcluster.aio.Queue({"rootUrl": message["root_url"]}, session=session)
|
||||
task = (await asyncQueue.task(taskId)) if not taskDefinition else taskDefinition
|
||||
task_id = message["payload"]["status"]["taskId"]
|
||||
async_queue = taskcluster.aio.Queue({"rootUrl": message["root_url"]}, session=session)
|
||||
task = (await async_queue.task(task_id)) if not taskDefinition else taskDefinition
|
||||
|
||||
try:
|
||||
parsedRoute = parseRouteInfo("tc-treeherder", taskId, task["routes"], task)
|
||||
parsed_route = parseRouteInfo("tc-treeherder", task_id, task["routes"], task)
|
||||
except PulseHandlerError as e:
|
||||
logger.debug("%s", str(e))
|
||||
return jobs
|
||||
|
||||
if ignore_task(task, taskId, message["root_url"], parsedRoute["project"]):
|
||||
if ignore_task(task, task_id, message["root_url"], parsed_route["project"]):
|
||||
return jobs
|
||||
|
||||
logger.debug("Message received for task %s", taskId)
|
||||
logger.debug("Message received for task %s", task_id)
|
||||
|
||||
# Validation failures are common and logged, so do nothing more.
|
||||
if not validateTask(task):
|
||||
return jobs
|
||||
|
||||
taskType = EXCHANGE_EVENT_MAP.get(message["exchange"])
|
||||
task_type = EXCHANGE_EVENT_MAP.get(message["exchange"])
|
||||
|
||||
# Originally this code was only within the "pending" case, however, in order to support
|
||||
# ingesting all tasks at once which might not have "pending" case
|
||||
|
@ -196,18 +196,18 @@ async def handleMessage(message, taskDefinition=None):
|
|||
# This will only work if the previous run has not yet been processed by Treeherder
|
||||
# since _remove_existing_jobs() will prevent it
|
||||
if message["payload"]["runId"] > 0:
|
||||
jobs.append(await handleTaskRerun(parsedRoute, task, message, session))
|
||||
jobs.append(await handleTaskRerun(parsed_route, task, message, session))
|
||||
|
||||
if not taskType:
|
||||
if not task_type:
|
||||
raise Exception("Unknown exchange: {exchange}".format(exchange=message["exchange"]))
|
||||
elif taskType == "pending":
|
||||
jobs.append(handleTaskPending(parsedRoute, task, message))
|
||||
elif taskType == "running":
|
||||
jobs.append(handleTaskRunning(parsedRoute, task, message))
|
||||
elif taskType in ("completed", "failed"):
|
||||
jobs.append(await handleTaskCompleted(parsedRoute, task, message, session))
|
||||
elif taskType == "exception":
|
||||
jobs.append(await handleTaskException(parsedRoute, task, message, session))
|
||||
elif task_type == "pending":
|
||||
jobs.append(handleTaskPending(parsed_route, task, message))
|
||||
elif task_type == "running":
|
||||
jobs.append(handleTaskRunning(parsed_route, task, message))
|
||||
elif task_type in ("completed", "failed"):
|
||||
jobs.append(await handleTaskCompleted(parsed_route, task, message, session))
|
||||
elif task_type == "exception":
|
||||
jobs.append(await handleTaskException(parsed_route, task, message, session))
|
||||
|
||||
return jobs
|
||||
|
||||
|
@ -218,30 +218,30 @@ async def handleMessage(message, taskDefinition=None):
|
|||
# Specific handlers for each message type will add/remove information necessary
|
||||
# for the type of task event..
|
||||
def buildMessage(pushInfo, task, runId, payload):
|
||||
taskId = payload["status"]["taskId"]
|
||||
jobRun = payload["status"]["runs"][runId]
|
||||
treeherderConfig = task["extra"]["treeherder"]
|
||||
task_id = payload["status"]["taskId"]
|
||||
job_run = payload["status"]["runs"][runId]
|
||||
treeherder_config = task["extra"]["treeherder"]
|
||||
|
||||
job = {
|
||||
"buildSystem": "taskcluster",
|
||||
"owner": task["metadata"]["owner"],
|
||||
"taskId": f"{slugid.decode(taskId)}/{runId}",
|
||||
"taskId": f"{slugid.decode(task_id)}/{runId}",
|
||||
"retryId": runId,
|
||||
"isRetried": False,
|
||||
"display": {
|
||||
# jobSymbols could be an integer (i.e. Chunk ID) but need to be strings
|
||||
# for treeherder
|
||||
"jobSymbol": str(treeherderConfig["symbol"]),
|
||||
"groupSymbol": treeherderConfig.get("groupSymbol", "?"),
|
||||
"jobSymbol": str(treeherder_config["symbol"]),
|
||||
"groupSymbol": treeherder_config.get("groupSymbol", "?"),
|
||||
# Maximum job name length is 140 chars...
|
||||
"jobName": task["metadata"]["name"][0:139],
|
||||
},
|
||||
"state": stateFromRun(jobRun),
|
||||
"result": resultFromRun(jobRun),
|
||||
"tier": treeherderConfig.get("tier", 1),
|
||||
"state": stateFromRun(job_run),
|
||||
"result": resultFromRun(job_run),
|
||||
"tier": treeherder_config.get("tier", 1),
|
||||
"timeScheduled": task["created"],
|
||||
"jobKind": treeherderConfig.get("jobKind", "other"),
|
||||
"reason": treeherderConfig.get("reason", "scheduled"),
|
||||
"jobKind": treeherder_config.get("jobKind", "other"),
|
||||
"reason": treeherder_config.get("reason", "scheduled"),
|
||||
"jobInfo": {
|
||||
"links": [],
|
||||
"summary": task["metadata"]["description"],
|
||||
|
@ -263,28 +263,28 @@ def buildMessage(pushInfo, task, runId, payload):
|
|||
|
||||
# Transform "collection" into an array of labels if task doesn't
|
||||
# define "labels".
|
||||
labels = treeherderConfig.get("labels", [])
|
||||
labels = treeherder_config.get("labels", [])
|
||||
if not labels:
|
||||
if not treeherderConfig.get("collection"):
|
||||
if not treeherder_config.get("collection"):
|
||||
labels = ["opt"]
|
||||
else:
|
||||
labels = list(treeherderConfig["collection"].keys())
|
||||
labels = list(treeherder_config["collection"].keys())
|
||||
|
||||
job["labels"] = labels
|
||||
|
||||
machine = treeherderConfig.get("machine", {})
|
||||
machine = treeherder_config.get("machine", {})
|
||||
job["buildMachine"] = {
|
||||
"name": jobRun.get("workerId", "unknown"),
|
||||
"name": job_run.get("workerId", "unknown"),
|
||||
"platform": machine.get("platform", task["workerType"]),
|
||||
"os": machine.get("os", "-"),
|
||||
"architecture": machine.get("architecture", "-"),
|
||||
}
|
||||
|
||||
if treeherderConfig.get("productName"):
|
||||
job["productName"] = treeherderConfig["productName"]
|
||||
if treeherder_config.get("productName"):
|
||||
job["productName"] = treeherder_config["productName"]
|
||||
|
||||
if treeherderConfig.get("groupName"):
|
||||
job["display"]["groupName"] = treeherderConfig["groupName"]
|
||||
if treeherder_config.get("groupName"):
|
||||
job["display"]["groupName"] = treeherder_config["groupName"]
|
||||
|
||||
return job
|
||||
|
||||
|
@ -318,13 +318,13 @@ def handleTaskRunning(pushInfo, task, message):
|
|||
|
||||
async def handleTaskCompleted(pushInfo, task, message, session):
|
||||
payload = message["payload"]
|
||||
jobRun = payload["status"]["runs"][payload["runId"]]
|
||||
job_run = payload["status"]["runs"][payload["runId"]]
|
||||
job = buildMessage(pushInfo, task, payload["runId"], payload)
|
||||
|
||||
job["timeStarted"] = jobRun["started"]
|
||||
job["timeCompleted"] = jobRun["resolved"]
|
||||
job["timeStarted"] = job_run["started"]
|
||||
job["timeCompleted"] = job_run["resolved"]
|
||||
job["logs"] = [
|
||||
createLogReference(message["root_url"], payload["status"]["taskId"], jobRun["runId"]),
|
||||
createLogReference(message["root_url"], payload["status"]["taskId"], job_run["runId"]),
|
||||
]
|
||||
job = await addArtifactUploadedLinks(
|
||||
message["root_url"], payload["status"]["taskId"], payload["runId"], job, session
|
||||
|
@ -334,17 +334,17 @@ async def handleTaskCompleted(pushInfo, task, message, session):
|
|||
|
||||
async def handleTaskException(pushInfo, task, message, session):
|
||||
payload = message["payload"]
|
||||
jobRun = payload["status"]["runs"][payload["runId"]]
|
||||
job_run = payload["status"]["runs"][payload["runId"]]
|
||||
# Do not report runs that were created as an exception. Such cases
|
||||
# are deadline-exceeded
|
||||
if jobRun["reasonCreated"] == "exception":
|
||||
if job_run["reasonCreated"] == "exception":
|
||||
return
|
||||
|
||||
job = buildMessage(pushInfo, task, payload["runId"], payload)
|
||||
# Jobs that get cancelled before running don't have a started time
|
||||
if jobRun.get("started"):
|
||||
job["timeStarted"] = jobRun["started"]
|
||||
job["timeCompleted"] = jobRun["resolved"]
|
||||
if job_run.get("started"):
|
||||
job["timeStarted"] = job_run["started"]
|
||||
job["timeCompleted"] = job_run["resolved"]
|
||||
# exceptions generally have no logs, so in the interest of not linking to a 404'ing artifact,
|
||||
# don't include a link
|
||||
job["logs"] = []
|
||||
|
@ -355,21 +355,21 @@ async def handleTaskException(pushInfo, task, message, session):
|
|||
|
||||
|
||||
async def fetchArtifacts(root_url, taskId, runId, session):
|
||||
asyncQueue = taskcluster.aio.Queue({"rootUrl": root_url}, session=session)
|
||||
res = await asyncQueue.listArtifacts(taskId, runId)
|
||||
async_queue = taskcluster.aio.Queue({"rootUrl": root_url}, session=session)
|
||||
res = await async_queue.listArtifacts(taskId, runId)
|
||||
artifacts = res["artifacts"]
|
||||
|
||||
continuationToken = res.get("continuationToken")
|
||||
while continuationToken is not None:
|
||||
continuation_token = res.get("continuationToken")
|
||||
while continuation_token is not None:
|
||||
continuation = {"continuationToken": res["continuationToken"]}
|
||||
|
||||
try:
|
||||
res = await asyncQueue.listArtifacts(taskId, runId, continuation)
|
||||
res = await async_queue.listArtifacts(taskId, runId, continuation)
|
||||
except Exception:
|
||||
break
|
||||
|
||||
artifacts = artifacts.concat(res["artifacts"])
|
||||
continuationToken = res.get("continuationToken")
|
||||
continuation_token = res.get("continuationToken")
|
||||
|
||||
return artifacts
|
||||
|
||||
|
|
|
@ -14,28 +14,28 @@
|
|||
def parseRoute(route):
|
||||
id = None
|
||||
owner = None
|
||||
parsedProject = None
|
||||
parsedRoute = route.split(".")
|
||||
project = parsedRoute[2]
|
||||
parsed_project = None
|
||||
parsed_route = route.split(".")
|
||||
project = parsed_route[2]
|
||||
if len(project.split("/")) == 2:
|
||||
[owner, parsedProject] = project.split("/")
|
||||
[owner, parsed_project] = project.split("/")
|
||||
else:
|
||||
parsedProject = project
|
||||
parsed_project = project
|
||||
|
||||
if len(parsedRoute) == 5:
|
||||
id = parsedRoute[4]
|
||||
if len(parsed_route) == 5:
|
||||
id = parsed_route[4]
|
||||
|
||||
pushInfo = {
|
||||
"destination": parsedRoute[0],
|
||||
push_info = {
|
||||
"destination": parsed_route[0],
|
||||
"id": int(id) if id else 0,
|
||||
"project": parsedProject,
|
||||
"revision": parsedRoute[3],
|
||||
"project": parsed_project,
|
||||
"revision": parsed_route[3],
|
||||
}
|
||||
|
||||
if owner and parsedProject:
|
||||
pushInfo["owner"] = owner
|
||||
pushInfo["origin"] = "github.com"
|
||||
if owner and parsed_project:
|
||||
push_info["owner"] = owner
|
||||
push_info["origin"] = "github.com"
|
||||
else:
|
||||
pushInfo["origin"] = "hg.mozilla.org"
|
||||
push_info["origin"] = "hg.mozilla.org"
|
||||
|
||||
return pushInfo
|
||||
return push_info
|
||||
|
|
|
@ -37,8 +37,8 @@ def add_headers_function(headers, path, url):
|
|||
if report_uri not in CSP_DIRECTIVES:
|
||||
CSP_DIRECTIVES.append(report_uri)
|
||||
|
||||
CSP_HEADER = "; ".join(CSP_DIRECTIVES)
|
||||
headers["Content-Security-Policy"] = CSP_HEADER
|
||||
csp_header = "; ".join(CSP_DIRECTIVES)
|
||||
headers["Content-Security-Policy"] = csp_header
|
||||
|
||||
|
||||
class CustomWhiteNoise(WhiteNoiseMiddleware):
|
||||
|
|
|
@ -48,8 +48,8 @@ def get_error_summary(job, queryset=None):
|
|||
dates = list(line_cache.keys())
|
||||
dates.sort()
|
||||
for d in dates:
|
||||
dTime = datetime.datetime.strptime(d, "%Y-%m-%d")
|
||||
if dTime <= (job.submit_time - datetime.timedelta(days=LINE_CACHE_TIMEOUT_DAYS)):
|
||||
date_time = datetime.datetime.strptime(d, "%Y-%m-%d")
|
||||
if date_time <= (job.submit_time - datetime.timedelta(days=LINE_CACHE_TIMEOUT_DAYS)):
|
||||
del line_cache[d]
|
||||
else:
|
||||
break
|
||||
|
|
|
@ -28,7 +28,7 @@ Type 'yes' to continue, or 'no' to cancel: """
|
|||
# verbose, so let's do that programmatically
|
||||
s = PerformanceSignature.objects.get(id=1)
|
||||
PerformanceDatum.objects.filter(signature=s).delete()
|
||||
INTERVAL = 30
|
||||
interval = 30
|
||||
now = time.time()
|
||||
|
||||
# create a push first as need a push_id
|
||||
|
@ -40,8 +40,8 @@ Type 'yes' to continue, or 'no' to cancel: """
|
|||
)
|
||||
|
||||
for t, v in zip(
|
||||
[i for i in range(INTERVAL)],
|
||||
([0.5 for i in range(int(INTERVAL / 2))] + [1.0 for i in range(int(INTERVAL / 2))]),
|
||||
[i for i in range(interval)],
|
||||
([0.5 for i in range(int(interval / 2))] + [1.0 for i in range(int(interval / 2))]),
|
||||
):
|
||||
PerformanceDatum.objects.create(
|
||||
repository=s.repository,
|
||||
|
|
|
@ -10,14 +10,14 @@ def get_commit_history(repository, revision, push):
|
|||
from mozci.push import Push as MozciPush
|
||||
from mozci.errors import ParentPushNotFound
|
||||
|
||||
mozciPush = MozciPush([revision], repository.name)
|
||||
mozci_push = MozciPush([revision], repository.name)
|
||||
parent = None
|
||||
parent_sha = None
|
||||
parent_repo = None
|
||||
parent_push = None
|
||||
|
||||
try:
|
||||
parent = mozciPush.parent
|
||||
parent = mozci_push.parent
|
||||
except ParentPushNotFound:
|
||||
pass
|
||||
|
||||
|
|
|
@ -73,7 +73,7 @@ def get_history(
|
|||
# For each failure item in ``tests``, we group all jobs of the exact same type into
|
||||
# a field called `jobs`. So it has passed and failed jobs in there.
|
||||
#
|
||||
def get_current_test_failures(push, option_map, jobs, investigatedTests=None):
|
||||
def get_current_test_failures(push, option_map, jobs, investigated_tests=None):
|
||||
# Using .distinct(<fields>) here would help by removing duplicate FailureLines
|
||||
# for the same job (with different sub-tests), but it's only supported by
|
||||
# postgres. Just using .distinct() has no effect.
|
||||
|
@ -107,19 +107,19 @@ def get_current_test_failures(push, option_map, jobs, investigatedTests=None):
|
|||
all_failed_jobs[job.id] = job
|
||||
# The 't' ensures the key starts with a character, as required for a query selector
|
||||
test_key = re.sub(r"\W+", "", f"t{test_name}{config}{platform}{job_name}{job_group}")
|
||||
isClassifiedIntermittent = any(
|
||||
is_classified_intermittent = any(
|
||||
job["failure_classification_id"] == 4 for job in jobs[job_name]
|
||||
)
|
||||
|
||||
isInvestigated = False
|
||||
investigatedTestId = None
|
||||
for investigatedTest in investigatedTests:
|
||||
is_investigated = False
|
||||
investigated_test_id = None
|
||||
for investigated_test in investigated_tests:
|
||||
if (
|
||||
investigatedTest.test == test_name
|
||||
and job.job_type.id == investigatedTest.job_type.id
|
||||
investigated_test.test == test_name
|
||||
and job.job_type.id == investigated_test.job_type.id
|
||||
):
|
||||
isInvestigated = True
|
||||
investigatedTestId = investigatedTest.id
|
||||
is_investigated = True
|
||||
investigated_test_id = investigated_test.id
|
||||
break
|
||||
|
||||
if test_key not in tests:
|
||||
|
@ -140,16 +140,16 @@ def get_current_test_failures(push, option_map, jobs, investigatedTests=None):
|
|||
"totalFailures": 0,
|
||||
"totalJobs": 0,
|
||||
"failedInParent": False,
|
||||
"isClassifiedIntermittent": isClassifiedIntermittent,
|
||||
"isInvestigated": isInvestigated,
|
||||
"investigatedTestId": investigatedTestId,
|
||||
"isClassifiedIntermittent": is_classified_intermittent,
|
||||
"isInvestigated": is_investigated,
|
||||
"investigatedTestId": investigated_test_id,
|
||||
}
|
||||
tests[test_key] = line
|
||||
countJobs = len(
|
||||
count_jobs = len(
|
||||
list(filter(lambda x: x["result"] in ["success", "testfailed"], jobs[job_name]))
|
||||
)
|
||||
tests[test_key]["totalFailures"] += 1
|
||||
tests[test_key]["totalJobs"] = countJobs
|
||||
tests[test_key]["totalJobs"] = count_jobs
|
||||
|
||||
# Each line of the sorted list that is returned here represents one test file per platform/
|
||||
# config. Each line will have at least one failing job, but may have several
|
||||
|
@ -232,13 +232,13 @@ def get_test_failures(
|
|||
fixed_by_commit_history = get_history(
|
||||
2, push_date, fixed_by_commit_history_days, option_map, repository_ids
|
||||
)
|
||||
investigatedTests = InvestigatedTests.objects.filter(push=push)
|
||||
investigated_tests = InvestigatedTests.objects.filter(push=push)
|
||||
|
||||
# ``push_failures`` are tests that have FailureLine records created by our Log Parser.
|
||||
# These are tests we are able to show to examine to see if we can determine they are
|
||||
# intermittent. If they are not, we tell the user they need investigation.
|
||||
# These are failures ONLY for the current push, not relative to history.
|
||||
push_failures = get_current_test_failures(push, option_map, jobs, investigatedTests)
|
||||
push_failures = get_current_test_failures(push, option_map, jobs, investigated_tests)
|
||||
filtered_push_failures = [failure for failure in push_failures if filter_failure(failure)]
|
||||
|
||||
# Based on the intermittent and FixedByCommit history, set the appropriate classification
|
||||
|
|
|
@ -19,11 +19,11 @@ class FilesBugzillaMapViewSet(viewsets.ReadOnlyModelViewSet):
|
|||
# combinations can be in the failure line, it might not be a test and
|
||||
# the real issue gets logged earlier but not detected as failure line.
|
||||
# Require user input for the product and component to use.
|
||||
IGNORE_LIST_PRODUCT_COMPONENT = [
|
||||
ignore_list_product_component = [
|
||||
{product: "Testing", component: "Mochitest"},
|
||||
]
|
||||
for product_component in queryset:
|
||||
if product_component not in IGNORE_LIST_PRODUCT_COMPONENT:
|
||||
if product_component not in ignore_list_product_component:
|
||||
filtered_queryset.append(product_component)
|
||||
return filtered_queryset[:5]
|
||||
|
||||
|
@ -40,8 +40,8 @@ class FilesBugzillaMapViewSet(viewsets.ReadOnlyModelViewSet):
|
|||
# Drop parameters
|
||||
path = (path.split("?"))[0]
|
||||
file = (path.split("/"))[-1]
|
||||
fileNameParts = file.split(".")
|
||||
file_without_extension = fileNameParts[0] + ("." if len(fileNameParts) > 1 else "")
|
||||
file_name_parts = file.split(".")
|
||||
file_without_extension = file_name_parts[0] + ("." if len(file_name_parts) > 1 else "")
|
||||
queryset = (
|
||||
FilesBugzillaMap.objects.select_related("bugzilla_component")
|
||||
.filter(path__endswith=path)
|
||||
|
|
|
@ -35,13 +35,13 @@ class InvestigatedViewSet(viewsets.ModelViewSet):
|
|||
project = kwargs["project"]
|
||||
revision = request.query_params.get("revision")
|
||||
test = request.data["test"]
|
||||
jobName = request.data["jobName"]
|
||||
jobSymbol = request.data["jobSymbol"]
|
||||
job_name = request.data["jobName"]
|
||||
job_symbol = request.data["jobSymbol"]
|
||||
|
||||
try:
|
||||
repository = Repository.objects.get(name=project)
|
||||
push = Push.objects.get(revision=revision, repository=repository)
|
||||
job_type = JobType.objects.get(name=jobName, symbol=jobSymbol)
|
||||
job_type = JobType.objects.get(name=job_name, symbol=job_symbol)
|
||||
serializer = self.get_serializer(data=request.data)
|
||||
serializer.is_valid(raise_exception=True)
|
||||
serializer.save(push=push, job_type=job_type, test=test)
|
||||
|
@ -54,7 +54,7 @@ class InvestigatedViewSet(viewsets.ModelViewSet):
|
|||
return Response(f"No push with revision: {revision}", status=HTTP_404_NOT_FOUND)
|
||||
|
||||
except JobType.DoesNotExist:
|
||||
return Response(f"No JobType with job name: {jobName}", status=HTTP_404_NOT_FOUND)
|
||||
return Response(f"No JobType with job name: {job_name}", status=HTTP_404_NOT_FOUND)
|
||||
|
||||
def destroy(self, request, project, pk=None):
|
||||
try:
|
||||
|
|
|
@ -313,7 +313,7 @@ class JobsProjectViewSet(viewsets.ViewSet):
|
|||
- count (10)
|
||||
- return_type (dict)
|
||||
"""
|
||||
MAX_JOBS_COUNT = 2000
|
||||
max_jobs_count = 2000
|
||||
|
||||
filter_params = {}
|
||||
|
||||
|
@ -348,8 +348,8 @@ class JobsProjectViewSet(viewsets.ViewSet):
|
|||
return Response("Invalid value for offset or count", status=HTTP_400_BAD_REQUEST)
|
||||
return_type = filter_params.get("return_type", "dict").lower()
|
||||
|
||||
if count > MAX_JOBS_COUNT:
|
||||
msg = f"Specified count exceeds API MAX_JOBS_COUNT value: {MAX_JOBS_COUNT}"
|
||||
if count > max_jobs_count:
|
||||
msg = f"Specified count exceeds API MAX_JOBS_COUNT value: {max_jobs_count}"
|
||||
return Response({"detail": msg}, status=HTTP_400_BAD_REQUEST)
|
||||
|
||||
try:
|
||||
|
|
|
@ -35,7 +35,7 @@ class PushViewSet(viewsets.ViewSet):
|
|||
GET method for list of ``push`` records with revisions
|
||||
"""
|
||||
# What is the upper limit on the number of pushes returned by the api
|
||||
MAX_PUSH_COUNT = 1000
|
||||
max_push_count = 1000
|
||||
|
||||
# make a mutable copy of these params
|
||||
filter_params = request.query_params.copy()
|
||||
|
@ -167,8 +167,8 @@ class PushViewSet(viewsets.ViewSet):
|
|||
except ValueError:
|
||||
return Response({"detail": "Valid count value required"}, status=HTTP_400_BAD_REQUEST)
|
||||
|
||||
if count > MAX_PUSH_COUNT:
|
||||
msg = f"Specified count exceeds api limit: {MAX_PUSH_COUNT}"
|
||||
if count > max_push_count:
|
||||
msg = f"Specified count exceeds api limit: {max_push_count}"
|
||||
return Response({"detail": msg}, status=HTTP_400_BAD_REQUEST)
|
||||
|
||||
# we used to have a "full" parameter for this endpoint so you could
|
||||
|
|
Загрузка…
Ссылка в новой задаче