Bug 1823654 - Introduce pyupgrade (#7904)

* Ruff Auto fix

* Ruff unsafe fixes auto fix

* Use builtin list instead of typing.List

---------

Co-authored-by: Sebastian Hengst <aryx.github@gmx-topmail.de>
This commit is contained in:
Yoann Schneider 2024-02-02 21:02:10 +01:00 коммит произвёл GitHub
Родитель a0e709276e
Коммит 8028121253
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: B5690EEEBB952194
103 изменённых файлов: 320 добавлений и 429 удалений

Просмотреть файл

@ -27,7 +27,7 @@ def main(args):
# Support comma separated projects
projects = args.projects.split(",")
for _project in projects:
logger.info("Comparing {} against production.".format(_project))
logger.info(f"Comparing {_project} against production.")
# Remove properties that are irrelevant for the comparison
pushes = compare_to_client.get_pushes(_project, count=50)
for _push in sorted(pushes, key=lambda push: push["revision"]):

Просмотреть файл

@ -37,6 +37,8 @@ select = [
"W",
# pyflakes
"F",
# pyupgrade
"UP",
]
ignore = [

Просмотреть файл

@ -42,10 +42,10 @@ def create_failure_lines(job, failure_line_list, start_line=0):
job_log = JobLog.objects.create(
job=job,
name="{}{}".format(base_data.get("test"), job.id),
url="bar{}".format(i),
url=f"bar{i}",
status=1,
)
print("create jobLog for job id: {}".format(job.id))
print(f"create jobLog for job id: {job.id}")
failure_line.job_log = job_log
failure_line.save()
failure_lines.append(failure_line)

Просмотреть файл

@ -427,7 +427,7 @@ def eleven_job_blobs(sample_data, sample_push, test_repository, mock_log_parser)
del blob["sources"]
blob["revision"] = sample_push[push_index]["revision"]
blob["taskcluster_task_id"] = "V3SVuxO8TFy37En_6HcXL{}".format(task_id_index)
blob["taskcluster_task_id"] = f"V3SVuxO8TFy37En_6HcXL{task_id_index}"
blob["taskcluster_retry_id"] = "0"
blobs.append(blob)
@ -463,7 +463,7 @@ def eleven_job_blobs_new_date(sample_data, sample_push, test_repository, mock_lo
del blob["sources"]
blob["revision"] = sample_push[push_index]["revision"]
blob["taskcluster_task_id"] = "V3SVuxO8TFy37En_6HcX{:0>2}".format(task_id_index)
blob["taskcluster_task_id"] = f"V3SVuxO8TFy37En_6HcX{task_id_index:0>2}"
blob["taskcluster_retry_id"] = "0"
blob["job"]["revision"] = sample_push[push_index]["revision"]
blob["job"]["submit_timestamp"] = sample_push[push_index]["push_timestamp"]
@ -843,7 +843,7 @@ def mock_file_bugzilla_map_request(monkeypatch):
% project
)
files_bugzilla_data = None
file_name = "files_bugzilla_map_%s_%s.json" % (project, self.run_id)
file_name = f"files_bugzilla_map_{project}_{self.run_id}.json"
exception = None
try:
tests_folder = os.path.dirname(__file__)
@ -1117,7 +1117,7 @@ def bug_data(eleven_jobs_stored, test_repository, test_push, bugs):
bug_id = bugs[0].id
job_id = jobs[0].id
th_models.BugJobMap.create(job_id=job_id, bug_id=bug_id)
query_string = "?startday=2012-05-09&endday=2018-05-10&tree={}".format(test_repository.name)
query_string = f"?startday=2012-05-09&endday=2018-05-10&tree={test_repository.name}"
return {
"tree": test_repository.name,
@ -1270,7 +1270,7 @@ class JSONFixtureLoader:
def __call__(self, fixture_filename):
fixture_path = join(*self._prior_dirs, fixture_filename)
with open(fixture_path, "r") as f:
with open(fixture_path) as f:
return json.load(f)

Просмотреть файл

@ -1,4 +1,4 @@
from mock import MagicMock
from unittest.mock import MagicMock
from tests.test_utils import add_log_response
from treeherder.etl.jobs import store_job_data

Просмотреть файл

@ -5,7 +5,6 @@ import operator
import time
import pytest
from typing import List
from django.core.management import call_command
from django.db import IntegrityError
@ -87,7 +86,7 @@ def sample_perf_artifact() -> dict:
@pytest.fixture
def sibling_perf_artifacts(sample_perf_artifact: dict) -> List[dict]:
def sibling_perf_artifacts(sample_perf_artifact: dict) -> list[dict]:
"""intended to belong to the same job"""
artifacts = [copy.deepcopy(sample_perf_artifact) for _ in range(3)]

Просмотреть файл

@ -104,7 +104,7 @@ def test_ingest_hg_pushlog_cache_last_push(test_repository, test_base_dir, activ
pushes = pushlog_dict["pushes"]
max_push_id = max(int(k) for k in pushes.keys())
cache_key = "{}:last_push_id".format(test_repository.name)
cache_key = f"{test_repository.name}:last_push_id"
assert cache.get(cache_key) == max_push_id

Просмотреть файл

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
from treeherder.etl.text import astral_filter, filter_re

Просмотреть файл

@ -37,7 +37,7 @@ def test_intermittents_commenter(bug_data):
comment_params = process.generate_bug_changes(startday, endday, alt_startday, alt_endday)
with open("tests/intermittents_commenter/expected_comment.text", "r") as comment:
with open("tests/intermittents_commenter/expected_comment.text") as comment:
expected_comment = comment.read()
print(len(expected_comment))
print(len(comment_params[0]["changes"]["comment"]["body"]))

Просмотреть файл

@ -18,7 +18,7 @@ def do_test(log):
result file with the same prefix.
"""
url = add_log_response("{}.txt.gz".format(log))
url = add_log_response(f"{log}.txt.gz")
builder = LogViewerArtifactBuilder(url)
lpc = ArtifactBuilderCollection(url, builders=builder)
@ -31,7 +31,7 @@ def do_test(log):
# with open(SampleData().get_log_path("{0}.logview.json".format(log)), "w") as f:
# f.write(json.dumps(act, indent=2))
exp = test_utils.load_exp("{0}.logview.json".format(log))
exp = test_utils.load_exp(f"{log}.logview.json")
assert act == exp

Просмотреть файл

@ -27,6 +27,6 @@ def test_performance_log_parsing_malformed_perfherder_data():
}
],
}
parser.parse_line("PERFHERDER_DATA: {}".format(json.dumps(valid_perfherder_data)), 3)
parser.parse_line(f"PERFHERDER_DATA: {json.dumps(valid_perfherder_data)}", 3)
assert parser.get_artifact() == [valid_perfherder_data]

Просмотреть файл

@ -1,6 +1,5 @@
import random
import datetime
from typing import Tuple
from treeherder.perf.auto_perf_sheriffing.backfill_reports import (
BackfillReportMaintainer,
@ -141,7 +140,7 @@ def test_reports_are_updated_after_alert_summaries_change(
assert initial_records_timestamps != records_timestamps
def __fetch_report_timestamps(test_perf_alert_summary) -> Tuple:
def __fetch_report_timestamps(test_perf_alert_summary) -> tuple:
report = BackfillReport.objects.get(summary=test_perf_alert_summary)
report_timestamps = report.created, report.last_updated
records_timestamps = [record.created for record in report.records.all()]

Просмотреть файл

@ -2,7 +2,7 @@ from datetime import datetime, timedelta
import pytest
from django.conf import settings
from typing import List, Type, Callable
from typing import Callable
from tests.perf.auto_sheriffing_criteria.conftest import CASSETTES_RECORDING_DATE
from treeherder.config.settings import BZ_DATETIME_FORMAT
@ -18,15 +18,15 @@ from treeherder.perf.sheriffing_criteria import (
pytestmark = [pytest.mark.freeze_time(CASSETTES_RECORDING_DATE, tick=True)]
def bugzilla_formula_instances() -> List[BugzillaFormula]:
def bugzilla_formula_instances() -> list[BugzillaFormula]:
return [EngineerTractionFormula(), FixRatioFormula()]
def formula_instances() -> List[Callable]:
def formula_instances() -> list[Callable]:
return bugzilla_formula_instances() + [TotalAlertsFormula()]
def concrete_formula_classes() -> List[Type[BugzillaFormula]]:
def concrete_formula_classes() -> list[type[BugzillaFormula]]:
return [EngineerTractionFormula, FixRatioFormula]

Просмотреть файл

@ -151,7 +151,7 @@ def should_take_more_than(seconds: float):
@pytest.fixture
def updatable_criteria_csv(tmp_path):
updatable_csv = tmp_path / "updatable-criteria.csv"
with open(RECORD_TEST_PATH, "r") as file_:
with open(RECORD_TEST_PATH) as file_:
updatable_csv.write_text(file_.read())
return updatable_csv

Просмотреть файл

@ -1,7 +1,6 @@
import pytest
from datetime import datetime, timedelta
from typing import List
from tests.perf.auto_sheriffing_criteria.conftest import CASSETTES_RECORDING_DATE
from treeherder.config.settings import BZ_DATETIME_FORMAT
@ -44,7 +43,7 @@ def quantified_bugs(betamax_recorder) -> list:
@pytest.fixture
def cooled_down_bugs(nonblock_session, quantified_bugs) -> List[dict]:
def cooled_down_bugs(nonblock_session, quantified_bugs) -> list[dict]:
bugs = []
for bug in quantified_bugs:
created_at = datetime.strptime(bug["creation_time"], BZ_DATETIME_FORMAT)

Просмотреть файл

@ -34,7 +34,7 @@ def test_get_usage(push_usage, test_repository):
nrql = "SELECT%20max(needInvestigation)%20FROM%20push_health_need_investigation%20FACET%20revision%20SINCE%201%20DAY%20AGO%20TIMESERIES%20where%20repo%3D'{}'%20AND%20appName%3D'{}'".format(
"try", "treeherder-prod"
)
new_relic_url = "{}?nrql={}".format(settings.NEW_RELIC_INSIGHTS_API_URL, nrql)
new_relic_url = f"{settings.NEW_RELIC_INSIGHTS_API_URL}?nrql={nrql}"
responses.add(
responses.GET,

Просмотреть файл

@ -5,89 +5,67 @@ import os
class SampleData:
@classmethod
def get_perf_data(cls, filename):
with open(
"{0}/sample_data/artifacts/performance/{1}".format(os.path.dirname(__file__), filename)
) as f:
with open(f"{os.path.dirname(__file__)}/sample_data/artifacts/performance/{filename}") as f:
return json.load(f)
def __init__(self):
self.job_data_file = "{0}/sample_data/job_data.txt".format(os.path.dirname(__file__))
self.job_data_file = f"{os.path.dirname(__file__)}/sample_data/job_data.txt"
self.push_data_file = "{0}/sample_data/push_data.json".format(os.path.dirname(__file__))
self.push_data_file = f"{os.path.dirname(__file__)}/sample_data/push_data.json"
self.logs_dir = "{0}/sample_data/logs".format(os.path.dirname(__file__))
self.logs_dir = f"{os.path.dirname(__file__)}/sample_data/logs"
with open(
"{0}/sample_data/artifacts/text_log_summary.json".format(os.path.dirname(__file__))
) as f:
with open(f"{os.path.dirname(__file__)}/sample_data/artifacts/text_log_summary.json") as f:
self.text_log_summary = json.load(f)
with open(
"{0}/sample_data/pulse_consumer/taskcluster_pulse_messages.json".format(
"{}/sample_data/pulse_consumer/taskcluster_pulse_messages.json".format(
os.path.dirname(__file__)
)
) as f:
self.taskcluster_pulse_messages = json.load(f)
with open(
"{0}/sample_data/pulse_consumer/taskcluster_tasks.json".format(
os.path.dirname(__file__)
)
f"{os.path.dirname(__file__)}/sample_data/pulse_consumer/taskcluster_tasks.json"
) as f:
self.taskcluster_tasks = json.load(f)
with open(
"{0}/sample_data/pulse_consumer/taskcluster_transformed_jobs.json".format(
"{}/sample_data/pulse_consumer/taskcluster_transformed_jobs.json".format(
os.path.dirname(__file__)
)
) as f:
self.taskcluster_transformed_jobs = json.load(f)
with open(
"{0}/sample_data/pulse_consumer/job_data.json".format(os.path.dirname(__file__))
) as f:
with open(f"{os.path.dirname(__file__)}/sample_data/pulse_consumer/job_data.json") as f:
self.pulse_jobs = json.load(f)
with open(
"{0}/sample_data/pulse_consumer/transformed_job_data.json".format(
os.path.dirname(__file__)
)
f"{os.path.dirname(__file__)}/sample_data/pulse_consumer/transformed_job_data.json"
) as f:
self.transformed_pulse_jobs = json.load(f)
with open(
"{0}/sample_data/pulse_consumer/github_push.json".format(os.path.dirname(__file__))
) as f:
with open(f"{os.path.dirname(__file__)}/sample_data/pulse_consumer/github_push.json") as f:
self.github_push = json.load(f)
with open(
"{0}/sample_data/pulse_consumer/transformed_gh_push.json".format(
os.path.dirname(__file__)
)
f"{os.path.dirname(__file__)}/sample_data/pulse_consumer/transformed_gh_push.json"
) as f:
self.transformed_github_push = json.load(f)
with open(
"{0}/sample_data/pulse_consumer/github_pr.json".format(os.path.dirname(__file__))
) as f:
with open(f"{os.path.dirname(__file__)}/sample_data/pulse_consumer/github_pr.json") as f:
self.github_pr = json.load(f)
with open(
"{0}/sample_data/pulse_consumer/transformed_gh_pr.json".format(
os.path.dirname(__file__)
)
f"{os.path.dirname(__file__)}/sample_data/pulse_consumer/transformed_gh_pr.json"
) as f:
self.transformed_github_pr = json.load(f)
with open(
"{0}/sample_data/pulse_consumer/hg_push.json".format(os.path.dirname(__file__))
) as f:
with open(f"{os.path.dirname(__file__)}/sample_data/pulse_consumer/hg_push.json") as f:
self.hg_push = json.load(f)
with open(
"{0}/sample_data/pulse_consumer/transformed_hg_push.json".format(
os.path.dirname(__file__)
)
f"{os.path.dirname(__file__)}/sample_data/pulse_consumer/transformed_hg_push.json"
) as f:
self.transformed_hg_push = json.load(f)
@ -106,4 +84,4 @@ class SampleData:
def get_log_path(self, name):
"""Returns the full path to a log file"""
return "{0}/{1}".format(self.logs_dir, name)
return f"{self.logs_dir}/{name}"

Просмотреть файл

@ -54,15 +54,15 @@ class TestTaskclusterModelImpl:
def test_task_in_context(self):
# match
tag_set_list, task_tags = [
tag_set_list, task_tags = (
load_json_fixture(f) for f in ("matchingTagSetList.json", "matchingTaskTags.json")
]
)
assert TaskclusterModelImpl._task_in_context(tag_set_list, task_tags) is True
# mismatch
tag_set_list, task_tags = [
tag_set_list, task_tags = (
load_json_fixture(f) for f in ("mismatchingTagSetList.json", "mismatchingTaskTags.json")
]
)
assert TaskclusterModelImpl._task_in_context(tag_set_list, task_tags) is False
def test_get_action(self, actions_json, expected_backfill_task):

Просмотреть файл

@ -9,7 +9,7 @@ def test_get_version(client):
response = client.get("/__version__")
assert response.status_code == 200
with open(f"{settings.BASE_DIR}/version.json", "r") as version_file:
with open(f"{settings.BASE_DIR}/version.json") as version_file:
assert response.json() == json.loads(version_file.read())

Просмотреть файл

@ -213,7 +213,7 @@ def add_log_response(filename):
Set up responses for a local gzipped log and return the url for it.
"""
log_path = SampleData().get_log_path(filename)
log_url = "http://my-log.mozilla.org/{}".format(filename)
log_url = f"http://my-log.mozilla.org/{filename}"
with open(log_path, "rb") as log_file:
content = log_file.read()

Просмотреть файл

@ -95,7 +95,7 @@ def test_bug_job_map_detail(client, eleven_jobs_stored, test_repository, test_us
user=test_user,
)
pk = "{0}-{1}".format(job.id, bug.id)
pk = f"{job.id}-{bug.id}"
resp = client.get(
reverse("bug-job-map-detail", kwargs={"project": test_repository.name, "pk": pk})
@ -130,7 +130,7 @@ def test_bug_job_map_delete(
if not test_no_auth:
client.force_authenticate(user=test_user)
pk = "{0}-{1}".format(job.id, bug.id)
pk = f"{job.id}-{bug.id}"
resp = client.delete(
reverse("bug-job-map-detail", kwargs={"project": test_repository.name, "pk": pk})

Просмотреть файл

@ -1,5 +1,3 @@
# coding: utf-8
import json
import responses

Просмотреть файл

@ -18,11 +18,9 @@ def test_job_list(client, eleven_jobs_stored, test_repository, offset, count, ex
endpoint.
"""
url = reverse("jobs-list", kwargs={"project": test_repository.name})
params = "&".join(
["{}={}".format(k, v) for k, v in [("offset", offset), ("count", count)] if v]
)
params = "&".join([f"{k}={v}" for k, v in [("offset", offset), ("count", count)] if v])
if params:
url += "?{}".format(params)
url += f"?{params}"
resp = client.get(url)
assert resp.status_code == 200
response_dict = resp.json()
@ -143,7 +141,7 @@ def test_job_list_filter_fields(client, eleven_jobs_stored, test_repository, fie
to make this test easy.
"""
url = reverse("jobs-list", kwargs={"project": test_repository.name})
final_url = url + "?{}={}".format(fieldname, expected)
final_url = url + f"?{fieldname}={expected}"
resp = client.get(final_url)
assert resp.status_code == 200
first = resp.json()["results"][0]
@ -245,11 +243,9 @@ def test_list_similar_jobs(client, eleven_jobs_stored, offset, count, expected_n
job = Job.objects.get(id=1)
url = reverse("jobs-similar-jobs", kwargs={"project": job.repository.name, "pk": job.id})
params = "&".join(
["{}={}".format(k, v) for k, v in [("offset", offset), ("count", count)] if v]
)
params = "&".join([f"{k}={v}" for k, v in [("offset", offset), ("count", count)] if v])
if params:
url += "?{}".format(params)
url += f"?{params}"
resp = client.get(url)
assert resp.status_code == 200
@ -288,7 +284,7 @@ def test_last_modified(
pass
url = reverse("jobs-list", kwargs={"project": test_repository.name})
final_url = url + ("?{}={}".format(lm_key, lm_value))
final_url = url + (f"?{lm_key}={lm_value}")
resp = client.get(final_url)
assert resp.status_code == exp_status

Просмотреть файл

@ -673,4 +673,4 @@ def dump_vars(alert_summaries, perf_data, alerts=None):
for alert in alerts:
dump(alert)
for perf_datum in perf_data:
pprint("PerfData(id={0.push_id}, push_timestamp={0.push_timestamp})".format(perf_datum))
pprint(f"PerfData(id={perf_datum.push_id}, push_timestamp={perf_datum.push_timestamp})")

Просмотреть файл

@ -31,7 +31,7 @@ def test_perf_alert_summary_onhold(test_repository_onhold, test_perf_framework):
for i in range(2):
Push.objects.create(
repository=test_repository_onhold,
revision="1234abcd{}".format(i),
revision=f"1234abcd{i}",
author="foo@bar.com",
time=datetime.now(),
)

Просмотреть файл

@ -9,12 +9,12 @@ def test_perf_bug_template_api(client, test_perf_framework):
template_dicts = []
for framework, i in zip((test_perf_framework, framework2), range(2)):
dict = {
"keywords": "keyword{}".format(i),
"status_whiteboard": "sw{}".format(i),
"default_component": "dfcom{}".format(i),
"default_product": "dfprod{}".format(i),
"cc_list": "foo{}@bar.com".format(i),
"text": "my great text {}".format(i),
"keywords": f"keyword{i}",
"status_whiteboard": f"sw{i}",
"default_component": f"dfcom{i}",
"default_product": f"dfprod{i}",
"cc_list": f"foo{i}@bar.com",
"text": f"my great text {i}",
}
PerformanceBugTemplate.objects.create(framework=framework, **dict)
dict["framework"] = framework.id
@ -27,7 +27,7 @@ def test_perf_bug_template_api(client, test_perf_framework):
# test that we can get just one (the usual case, probably)
resp = client.get(
reverse("performance-bug-template-list") + "?framework={}".format(test_perf_framework.id)
reverse("performance-bug-template-list") + f"?framework={test_perf_framework.id}"
)
assert resp.status_code == 200
assert resp.json() == [template_dicts[0]]

Просмотреть файл

@ -102,7 +102,7 @@ def test_performance_platforms_expired_test(client, test_perf_signature):
"performance-signatures-platforms-list",
kwargs={"project": test_perf_signature.repository.name},
)
+ "?interval={}".format(86400)
+ "?interval=86400"
)
assert resp.status_code == 200
assert resp.json() == []
@ -140,7 +140,7 @@ def test_performance_platforms_framework_filtering(client, test_perf_signature):
"performance-signatures-platforms-list",
kwargs={"project": test_perf_signature.repository.name},
)
+ "?framework={}".format(framework2.id)
+ f"?framework={framework2.id}"
)
assert resp.status_code == 200
assert resp.json() == ["win7-a"]
@ -259,7 +259,7 @@ def test_filter_data_by_no_retriggers(
resp = client.get(
reverse("performance-data-list", kwargs={"project": test_repository.name})
+ "?signatures={}&no_retriggers=true".format(test_perf_signature.signature_hash)
+ f"?signatures={test_perf_signature.signature_hash}&no_retriggers=true"
)
assert resp.status_code == 200
datums = resp.data[test_perf_signature.signature_hash]
@ -316,9 +316,7 @@ def test_filter_data_by_framework(
# Filtering by second framework
resp = client.get(
reverse("performance-data-list", kwargs={"project": test_repository.name})
+ "?signatures={}&framework={}".format(
test_perf_signature.signature_hash, signature2.framework.id
)
+ f"?signatures={test_perf_signature.signature_hash}&framework={signature2.framework.id}"
)
assert resp.status_code == 200
datums = resp.data[test_perf_signature.signature_hash]
@ -332,7 +330,7 @@ def test_filter_signatures_by_interval(client, test_perf_signature):
reverse(
"performance-signatures-list", kwargs={"project": test_perf_signature.repository.name}
)
+ "?interval={}".format(86400)
+ "?interval=86400"
)
assert resp.status_code == 200
assert len(resp.json().keys()) == 1
@ -354,7 +352,7 @@ def test_filter_signatures_by_range(
reverse(
"performance-signatures-list", kwargs={"project": test_perf_signature.repository.name}
)
+ "?start_date={}&end_date={}".format(start_date, end_date)
+ f"?start_date={start_date}&end_date={end_date}"
)
assert resp.status_code == 200
assert len(resp.json().keys()) == exp_count
@ -387,7 +385,7 @@ def test_filter_data_by_interval(
# going back interval of 1 day, should find 1 item
resp = client.get(
reverse("performance-data-list", kwargs={"project": test_repository.name})
+ "?signature_id={}&interval={}".format(test_perf_signature.id, interval)
+ f"?signature_id={test_perf_signature.id}&interval={interval}"
)
assert resp.status_code == 200
@ -424,9 +422,7 @@ def test_filter_data_by_range(
resp = client.get(
reverse("performance-data-list", kwargs={"project": test_repository.name})
+ "?signature_id={}&start_date={}&end_date={}".format(
test_perf_signature.id, start_date, end_date
)
+ f"?signature_id={test_perf_signature.id}&start_date={start_date}&end_date={end_date}"
)
assert resp.status_code == 200
@ -472,7 +468,7 @@ def test_filter_data_by_signature(
]:
resp = client.get(
reverse("performance-data-list", kwargs={"project": test_repository.name})
+ "?{}={}".format(param, value)
+ f"?{param}={value}"
)
assert resp.status_code == 200
assert len(resp.data.keys()) == 1
@ -719,7 +715,7 @@ def test_alert_summary_tasks_get(client, test_perf_alert_summary, test_perf_data
status=PerformanceAlert.REASSIGNED,
)
resp = client.get(
reverse("performance-alertsummary-tasks") + "?id={}".format(test_perf_alert_summary.id)
reverse("performance-alertsummary-tasks") + f"?id={test_perf_alert_summary.id}"
)
assert resp.status_code == 200
assert resp.json() == {
@ -737,9 +733,7 @@ def test_alert_summary_tasks_get_failure(client, test_perf_alert_summary):
# verify that we fail if PerformanceAlertSummary does not exist
not_exist_summary_id = test_perf_alert_summary.id
test_perf_alert_summary.delete()
resp = client.get(
reverse("performance-alertsummary-tasks") + "?id={}".format(not_exist_summary_id)
)
resp = client.get(reverse("performance-alertsummary-tasks") + f"?id={not_exist_summary_id}")
assert resp.status_code == 400
assert resp.json() == {"message": ["PerformanceAlertSummary does not exist."]}

Просмотреть файл

@ -26,7 +26,7 @@ def test_unsupported_version():
def test_correct_version():
view = RequestVersionView.as_view()
version = settings.REST_FRAMEWORK["ALLOWED_VERSIONS"][0]
request = factory.get("/endpoint/", HTTP_ACCEPT="application/json; version={0}".format(version))
request = factory.get("/endpoint/", HTTP_ACCEPT=f"application/json; version={version}")
response = view(request)
assert response.data == {"version": version}

Просмотреть файл

@ -19,7 +19,7 @@ class Changelog(models.Model):
unique_together = ("id", "remote_id", "type")
def __str__(self):
return "[%s] %s by %s" % (self.id, self.message, self.author)
return f"[{self.id}] {self.message} by {self.author}"
class ChangelogFile(models.Model):

Просмотреть файл

@ -1,4 +1,3 @@
import io
import os
import re
@ -7,7 +6,7 @@ from setuptools import setup
def read(*names, **kwargs):
# Taken from https://packaging.python.org/en/latest/single_source_version.html
with io.open(
with open(
os.path.join(os.path.dirname(__file__), *names), encoding=kwargs.get("encoding", "utf8")
) as fp:
return fp.read()

Просмотреть файл

@ -17,8 +17,8 @@ class TreeherderClient:
API_VERSION = "1.1"
REQUEST_HEADERS = {
"Accept": "application/json; version={}".format(API_VERSION),
"User-Agent": "treeherder-pyclient/{}".format(__version__),
"Accept": f"application/json; version={API_VERSION}",
"User-Agent": f"treeherder-pyclient/{__version__}",
}
PUSH_ENDPOINT = "push"
@ -43,9 +43,9 @@ class TreeherderClient:
def _get_endpoint_url(self, endpoint, project=None):
if project:
return "{}/api/project/{}/{}/".format(self.server_url, project, endpoint)
return f"{self.server_url}/api/project/{project}/{endpoint}/"
return "{}/api/{}/".format(self.server_url, endpoint)
return f"{self.server_url}/api/{endpoint}/"
def _get_json_list(self, endpoint, project=None, **params):
if "count" in params and (params["count"] is None or params["count"] > self.MAX_COUNT):

Просмотреть файл

@ -38,7 +38,7 @@ for key, value in EXCHANGE_EVENT_MAP.items():
conn_sem = BoundedSemaphore(50)
class Connection(object):
class Connection:
def __enter__(self):
conn_sem.acquire()
@ -51,15 +51,15 @@ def ingest_pr(pr_url, root_url):
_, _, _, org, repo, _, pull_number, _ = pr_url.split("/", 7)
pulse = {
"exchange": "exchange/taskcluster-github/v1/pull-request",
"routingKey": "primary.{}.{}.synchronize".format(org, repo),
"routingKey": f"primary.{org}.{repo}.synchronize",
"payload": {
"repository": repo,
"organization": org,
"action": "synchronize",
"details": {
"event.pullNumber": pull_number,
"event.base.repo.url": "https://github.com/{}/{}.git".format(org, repo),
"event.head.repo.url": "https://github.com/{}/{}.git".format(org, repo),
"event.base.repo.url": f"https://github.com/{org}/{repo}.git",
"event.head.repo.url": f"https://github.com/{org}/{repo}.git",
},
},
}
@ -233,10 +233,10 @@ def process_job_with_threads(pulse_job, root_url):
def find_task_id(index_path, root_url):
index_url = liburls.api(root_url, "index", "v1", "task/{}".format(index_path))
index_url = liburls.api(root_url, "index", "v1", f"task/{index_path}")
response = requests.get(index_url)
if response.status_code == 404:
raise Exception("Index URL {} not found".format(index_url))
raise Exception(f"Index URL {index_url} not found")
return response.json()["taskId"]
@ -248,7 +248,7 @@ def get_decision_task_id(project, revision, root_url):
def repo_meta(project):
_repo = Repository.objects.filter(name=project)[0]
assert _repo, "The project {} you specified is incorrect".format(project)
assert _repo, f"The project {project} you specified is incorrect"
splitUrl = _repo.url.split("/")
return {
"url": _repo.url,
@ -388,9 +388,7 @@ def ingest_git_pushes(project, dry_run=False):
oldest_parent_revision = info["parents"][0]["sha"]
push_to_date[oldest_parent_revision] = info["commit"]["committer"]["date"]
logger.info(
"Push: {} - Date: {}".format(
oldest_parent_revision, push_to_date[oldest_parent_revision]
)
f"Push: {oldest_parent_revision} - Date: {push_to_date[oldest_parent_revision]}"
)
push_revision.append(_commit["sha"])

Просмотреть файл

@ -33,13 +33,13 @@ class Command(BaseCommand):
userid = urlparse(connection_url).username
payload_file = options["payload_file"]
exchange_name = "exchange/{}/jobs".format(userid)
exchange_name = f"exchange/{userid}/jobs"
connection = Connection(connection_url)
exchange = Exchange(exchange_name, type="topic")
producer = Producer(connection, exchange, routing_key=routing_key, auto_declare=True)
self.stdout.write("Published to exchange: {}".format(exchange_name))
self.stdout.write(f"Published to exchange: {exchange_name}")
with open(payload_file) as f:
body = f.read()

Просмотреть файл

@ -41,7 +41,7 @@ class Command(BaseCommand):
],
)
listener_params = (JointConsumer, pulse_sources, [lambda key: "#.{}".format(key), None])
listener_params = (JointConsumer, pulse_sources, [lambda key: f"#.{key}", None])
consumer = prepare_joint_consumers(listener_params)
try:

Просмотреть файл

@ -36,7 +36,7 @@ class Command(BaseCommand):
consumers = prepare_consumers(
TaskConsumer,
task_sources,
lambda key: "#.{}".format(key),
lambda key: f"#.{key}",
)
try:

Просмотреть файл

@ -38,7 +38,7 @@ class Command(BaseCommand):
consumers = prepare_consumers(
MozciClassificationConsumer,
classification_sources,
lambda key: "#.{}".format(key),
lambda key: f"#.{key}",
)
try:

Просмотреть файл

@ -2,7 +2,7 @@ import copy
import logging
from datetime import datetime
from hashlib import sha1
from typing import List, Optional, Tuple
from typing import Optional
import simplejson as json
@ -51,7 +51,7 @@ def _get_signature_hash(signature_properties):
return sha.hexdigest()
def _order_and_concat(words: List) -> str:
def _order_and_concat(words: list) -> str:
return " ".join(sorted(words))
@ -76,7 +76,7 @@ def _create_or_update_signature(repository, signature_hash, framework, applicati
return signature
def _deduce_push_timestamp(perf_datum: dict, job_push_time: datetime) -> Tuple[datetime, bool]:
def _deduce_push_timestamp(perf_datum: dict, job_push_time: datetime) -> tuple[datetime, bool]:
is_multi_commit = False
if not settings.PERFHERDER_ENABLE_MULTIDATA_INGESTION:
# the old way of ingestion
@ -119,7 +119,7 @@ def _test_should_alert_based_on(
def _test_should_gather_replicates_based_on(
repository: Repository, suite_name: str, replicates: Optional[List] = None
repository: Repository, suite_name: str, replicates: Optional[list] = None
) -> bool:
"""
Determine if we should gather/ingest replicates. Currently, it's

Просмотреть файл

@ -58,7 +58,7 @@ class PushLoader:
return GithubPullRequestTransformer
elif "/hgpushes/" in exchange:
return HgPushTransformer
raise PulsePushError("Unsupported push exchange: {}".format(exchange))
raise PulsePushError(f"Unsupported push exchange: {exchange}")
class GithubTransformer:
@ -156,7 +156,7 @@ class GithubPushTransformer(GithubTransformer):
if self.message_body["details"].get("event.head.tag"):
return "tag"
return super(GithubPushTransformer, self).get_branch()
return super().get_branch()
def transform(self, repository):
push_data = compare_shas(

Просмотреть файл

@ -53,14 +53,14 @@ class HgPushlogProcess:
}
def run(self, source_url, repository_name, changeset=None, last_push_id=None):
cache_key = "{}:last_push_id".format(repository_name)
cache_key = f"{repository_name}:last_push_id"
if not last_push_id:
# get the last object seen from cache. this will
# reduce the number of pushes processed every time
last_push_id = cache.get(cache_key)
if not changeset and last_push_id:
startid_url = "{}&startID={}".format(source_url, last_push_id)
startid_url = f"{source_url}&startID={last_push_id}"
logger.debug(
"Extracted last push for '%s', '%s', from cache, "
"attempting to get changes only from that point at: %s",

Просмотреть файл

@ -77,7 +77,7 @@ def parseRouteInfo(prefix, taskId, routes, task):
raise PulseHandlerError(
"Could not determine Treeherder route. Either there is no route, "
+ "or more than one matching route exists."
+ "Task ID: {taskId} Routes: {routes}".format(taskId=taskId, routes=routes)
+ f"Task ID: {taskId} Routes: {routes}"
)
parsedRoute = parseRoute(matchingRoutes[0])
@ -156,7 +156,7 @@ def ignore_task(task, taskId, rootUrl, project):
break
if ignore:
logger.debug("Task to be ignored ({})".format(taskId))
logger.debug(f"Task to be ignored ({taskId})")
return ignore
@ -225,7 +225,7 @@ def buildMessage(pushInfo, task, runId, payload):
job = {
"buildSystem": "taskcluster",
"owner": task["metadata"]["owner"],
"taskId": "{taskId}/{runId}".format(taskId=slugid.decode(taskId), runId=runId),
"taskId": f"{slugid.decode(taskId)}/{runId}",
"retryId": runId,
"isRetried": False,
"display": {
@ -397,7 +397,7 @@ async def addArtifactUploadedLinks(root_url, taskId, runId, job, session):
seen[name] = [artifact["name"]]
else:
seen[name].append(artifact["name"])
name = "{name} ({length})".format(name=name, length=len(seen[name]) - 1)
name = f"{name} ({len(seen[name]) - 1})"
links.append(
{

Просмотреть файл

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
import re
# Regexp that matches all non-BMP unicode characters.
@ -19,7 +18,7 @@ def convert_unicode_character_to_ascii_repr(match_obj):
hex_value = hex_code_point.zfill(6).upper()
return "<U+{}>".format(hex_value)
return f"<U+{hex_value}>"
def astral_filter(text):

Просмотреть файл

@ -175,7 +175,7 @@ class Commenter:
)
def open_file(self, filename, load):
with open("treeherder/intermittents_commenter/{}".format(filename), "r") as myfile:
with open(f"treeherder/intermittents_commenter/{filename}") as myfile:
if load:
return json.load(myfile)
else:
@ -212,7 +212,7 @@ class Commenter:
# Use a custom HTTP adapter, so we can set a non-zero max_retries value.
session.mount("https://", requests.adapters.HTTPAdapter(max_retries=3))
session.headers = {
"User-Agent": "treeherder/{}".format(settings.SITE_HOSTNAME),
"User-Agent": f"treeherder/{settings.SITE_HOSTNAME}",
"x-bugzilla-api-key": settings.COMMENTER_API_KEY,
"Accept": "application/json",
}
@ -233,7 +233,7 @@ class Commenter:
)
response.raise_for_status()
except RequestException as e:
logger.warning("error fetching bugzilla metadata for bugs due to {}".format(e))
logger.warning(f"error fetching bugzilla metadata for bugs due to {e}")
return None
if response.headers["Content-Type"] == "text/html; charset=UTF-8":
@ -246,12 +246,12 @@ class Commenter:
return data["bugs"]
def submit_bug_changes(self, changes, bug_id):
url = "{}/rest/bug/{}".format(settings.BZ_API_URL, str(bug_id))
url = f"{settings.BZ_API_URL}/rest/bug/{str(bug_id)}"
try:
response = self.session.put(url, headers=self.session.headers, json=changes, timeout=30)
response.raise_for_status()
except RequestException as e:
logger.error("error posting comment to bugzilla for bug {} due to {}".format(bug_id, e))
logger.error(f"error posting comment to bugzilla for bug {bug_id} due to {e}")
def get_test_runs(self, startday, endday):
"""Returns an aggregate of pushes for specified date range and

Просмотреть файл

@ -53,7 +53,7 @@ def write_failure_lines(job_log, log_iter):
try:
failure_lines = create(job_log, log_list)
except DataError as e:
logger.warning("Got DataError inserting failure_line: {}".format(e.args))
logger.warning(f"Got DataError inserting failure_line: {e.args}")
except OperationalError as e:
logger.warning("Got OperationalError inserting failure_line")
# Retry iff this error is the "incorrect String Value" error

Просмотреть файл

@ -40,7 +40,7 @@ class Command(BaseCommand):
if not options["profile"]:
for name, artifact in artifact_bc.artifacts.items():
print("%s, %s" % (name, json.dumps(artifact, indent=2)))
print(f"{name}, {json.dumps(artifact, indent=2)}")
if options["profile"]:
print("Timings: %s" % times)

Просмотреть файл

@ -64,27 +64,23 @@ class ErrorParser(ParserBase):
)
RE_ERR_MATCH = re.compile(
(
r"^g?make(?:\[\d+\])?: \*\*\*"
r"|^[A-Za-z.]+Error: "
r"|^[A-Za-z.]*Exception: "
r"|^\[ FAILED \] "
r"|^remoteFailed:"
r"|^rm: cannot "
r"|^abort:"
r"|^\[taskcluster\] Error:"
r"|^\[[\w._-]+:(?:error|exception)\]"
)
r"^g?make(?:\[\d+\])?: \*\*\*"
r"|^[A-Za-z.]+Error: "
r"|^[A-Za-z.]*Exception: "
r"|^\[ FAILED \] "
r"|^remoteFailed:"
r"|^rm: cannot "
r"|^abort:"
r"|^\[taskcluster\] Error:"
r"|^\[[\w._-]+:(?:error|exception)\]"
)
RE_ERR_SEARCH = re.compile(
(
r" error\(\d*\):"
r"|:\d+: error:"
r"| error R?C\d*:"
r"|ERROR [45]\d\d:"
r"|mozmake\.(?:exe|EXE)(?:\[\d+\])?: \*\*\*"
)
r" error\(\d*\):"
r"|:\d+: error:"
r"| error R?C\d*:"
r"|ERROR [45]\d\d:"
r"|mozmake\.(?:exe|EXE)(?:\[\d+\])?: \*\*\*"
)
RE_EXCLUDE_1_SEARCH = re.compile(r"TEST-(?:INFO|PASS) ")

Просмотреть файл

@ -21,7 +21,7 @@ def validate_perf_data(performance_data: dict):
for suite in performance_data["suites"]:
# allow only one extraOption longer than 45
if len(_long_options(_extra_options(suite), *expected_range)) > 1:
raise ValidationError("Too many extra options longer than {}".format(SECOND_MAX_LENGTH))
raise ValidationError(f"Too many extra options longer than {SECOND_MAX_LENGTH}")
def _long_options(all_extra_options: list, second_max: int, first_max: int):

Просмотреть файл

@ -1,7 +1,6 @@
import logging
from abc import ABC, abstractmethod
from datetime import timedelta, datetime
from typing import List
from django.db import OperationalError, connection
from django.db.backends.utils import CursorWrapper
@ -69,9 +68,9 @@ class TreeherderCycler(DataCycler):
rs_deleted = Job.objects.cycle_data(
self.cycle_interval, self.chunk_size, self.sleep_time
)
logger.warning("Deleted {} jobs".format(rs_deleted))
logger.warning(f"Deleted {rs_deleted} jobs")
except OperationalError as e:
logger.error("Error running cycle_data: {}".format(e))
logger.error(f"Error running cycle_data: {e}")
self._remove_leftovers()
@ -79,17 +78,17 @@ class TreeherderCycler(DataCycler):
logger.warning("Pruning ancillary data: job types, groups and machines")
def prune(reference_model, id_name, model):
logger.warning("Pruning {}s".format(model.__name__))
logger.warning(f"Pruning {model.__name__}s")
used_ids = (
reference_model.objects.only(id_name).values_list(id_name, flat=True).distinct()
)
unused_ids = model.objects.exclude(id__in=used_ids).values_list("id", flat=True)
logger.warning("Removing {} records from {}".format(len(unused_ids), model.__name__))
logger.warning(f"Removing {len(unused_ids)} records from {model.__name__}")
while len(unused_ids):
delete_ids = unused_ids[: self.chunk_size]
logger.warning("deleting {} of {}".format(len(delete_ids), len(unused_ids)))
logger.warning(f"deleting {len(delete_ids)} of {len(unused_ids)}")
model.objects.filter(id__in=delete_ids).delete()
unused_ids = unused_ids[self.chunk_size :]
@ -111,7 +110,7 @@ class PerfherderCycler(DataCycler):
sleep_time: int,
is_debug: bool = None,
days: int = None,
strategies: List[RemovalStrategy] = None,
strategies: list[RemovalStrategy] = None,
**kwargs,
):
super().__init__(chunk_size, sleep_time, is_debug)
@ -223,9 +222,7 @@ class PerfherderCycler(DataCycler):
break # either finished removing all expired data or failed
else:
any_successful_attempt = True
logger.debug(
"Successfully deleted {} performance datum rows".format(deleted_rows)
)
logger.debug(f"Successfully deleted {deleted_rows} performance datum rows")
def __handle_chunk_removal_exception(
self, exception, cursor: CursorWrapper, any_successful_attempt: bool

Просмотреть файл

@ -4,7 +4,6 @@ import logging
from abc import ABC, abstractmethod
from datetime import timedelta, datetime
from itertools import cycle
from typing import List
from django.conf import settings
from django.db.backends.utils import CursorWrapper
@ -48,7 +47,7 @@ class RemovalStrategy(ABC):
pass
@staticmethod
def fabricate_all_strategies(*args, **kwargs) -> List[RemovalStrategy]:
def fabricate_all_strategies(*args, **kwargs) -> list[RemovalStrategy]:
return [
MainRemovalStrategy(*args, **kwargs),
TryDataRemoval(*args, **kwargs),
@ -364,7 +363,7 @@ class StalledDataRemoval(RemovalStrategy):
return self._target_signature
@property
def removable_signatures(self) -> List[PerformanceSignature]:
def removable_signatures(self) -> list[PerformanceSignature]:
if self._removable_signatures is None:
self._removable_signatures = list(
PerformanceSignature.objects.filter(last_updated__lte=self._max_timestamp).order_by(

Просмотреть файл

@ -1,5 +1,4 @@
import logging
from typing import List
import taskcluster
from django.conf import settings
@ -85,7 +84,7 @@ class PublicSignatureRemover:
def _send_email(self):
self._notify.email(self._email_writer.email)
def __delete_and_notify(self, signatures: List[PerformanceSignature]) -> bool:
def __delete_and_notify(self, signatures: list[PerformanceSignature]) -> bool:
"""
Atomically deletes perf signatures & notifies about this.
@return: whether atomic operation was successful or not
@ -104,5 +103,5 @@ class PublicSignatureRemover:
return True
def _prepare_notification(self, signatures: List[PerformanceSignature]):
def _prepare_notification(self, signatures: list[PerformanceSignature]):
self._email_writer.prepare_new_email(signatures)

Просмотреть файл

@ -32,7 +32,7 @@ def get_error_summary(job, queryset=None):
Caches the results if there are any.
"""
cache_key = "error-summary-{}".format(job.id)
cache_key = f"error-summary-{job.id}"
cached_error_summary = cache.get(cache_key)
if cached_error_summary is not None:
return cached_error_summary

Просмотреть файл

@ -37,7 +37,7 @@ class Command(BaseCommand):
self.is_debug = options["debug"]
days = options["days"]
self.debug("Fetching {} sets of history...".format(days))
self.debug(f"Fetching {days} sets of history...")
option_map = OptionCollection.objects.get_option_collection_map()
repository_ids = REPO_GROUPS["trunk"]

Просмотреть файл

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-03-08 11:41
import django.core.validators
import django.db.models.deletion

Просмотреть файл

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-04-30 16:50
from django.db import migrations
import django.db.models.manager

Просмотреть файл

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-18 08:11
from django.db import migrations, models

Просмотреть файл

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-18 08:11
from django.db import migrations

Просмотреть файл

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-18 08:23
from django.db import migrations

Просмотреть файл

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-18 08:30
from django.db import migrations

Просмотреть файл

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-06-05 09:29
from django.db import migrations

Просмотреть файл

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-06-05 09:40
from django.db import migrations

Просмотреть файл

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-09-18 18:21
from django.db import migrations
import django.db.models.manager

Просмотреть файл

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-09-26 21:21
from django.db import migrations

Просмотреть файл

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-06-06 09:25
from django.db import migrations

Просмотреть файл

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-12-13 20:29
from django.db import migrations, models

Просмотреть файл

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2019-01-02 23:34
from django.db import migrations, models

Просмотреть файл

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 2.2.4 on 2019-08-28 17:39
from django.db import migrations, models

Просмотреть файл

@ -4,7 +4,6 @@ import logging
import re
import time
from hashlib import sha1
from typing import List
import warnings
@ -80,7 +79,7 @@ class BuildPlatform(models.Model):
unique_together = ("os_name", "platform", "architecture")
def __str__(self):
return "{0} {1} {2}".format(self.os_name, self.platform, self.architecture)
return f"{self.os_name} {self.platform} {self.architecture}"
class Option(NamedModel):
@ -117,11 +116,11 @@ class Repository(models.Model):
verbose_name_plural = "repositories"
@classmethod
def fetch_all_names(cls) -> List[str]:
def fetch_all_names(cls) -> list[str]:
return cls.objects.values_list("name", flat=True)
def __str__(self):
return "{0} {1}".format(self.name, self.repository_group)
return f"{self.name} {self.repository_group}"
class Push(models.Model):
@ -145,7 +144,7 @@ class Push(models.Model):
unique_together = ("repository", "revision")
def __str__(self):
return "{0} {1}".format(self.repository.name, self.revision)
return f"{self.repository.name} {self.revision}"
def total_jobs(self, job_type, result):
return self.jobs.filter(job_type=job_type, result=result).count()
@ -194,7 +193,7 @@ class Commit(models.Model):
unique_together = ("push", "revision")
def __str__(self):
return "{0} {1}".format(self.push.repository.name, self.revision)
return f"{self.push.repository.name} {self.revision}"
class MachinePlatform(models.Model):
@ -208,7 +207,7 @@ class MachinePlatform(models.Model):
unique_together = ("os_name", "platform", "architecture")
def __str__(self):
return "{0} {1} {2}".format(self.os_name, self.platform, self.architecture)
return f"{self.os_name} {self.platform} {self.architecture}"
class Bugscache(models.Model):
@ -232,7 +231,7 @@ class Bugscache(models.Model):
]
def __str__(self):
return "{0}".format(self.id)
return f"{self.id}"
@classmethod
def sanitized_search_term(self, search_term):
@ -322,7 +321,7 @@ class BugzillaComponent(models.Model):
unique_together = ("product", "component")
def __str__(self):
return "{0} :: {1}".format(self.product, self.component)
return f"{self.product} :: {self.component}"
class FilesBugzillaMap(models.Model):
@ -335,7 +334,7 @@ class FilesBugzillaMap(models.Model):
verbose_name_plural = "files_bugzilla_components"
def __str__(self):
return "{0}".format(self.path)
return f"{self.path}"
class BugzillaSecurityGroup(models.Model):
@ -363,7 +362,7 @@ class JobGroup(models.Model):
unique_together = ("name", "symbol")
def __str__(self):
return "{0} ({1})".format(self.name, self.symbol)
return f"{self.name} ({self.symbol})"
class OptionCollectionManager(models.Manager):
@ -413,7 +412,7 @@ class OptionCollection(models.Model):
unique_together = ("option_collection_hash", "option")
def __str__(self):
return "{0}".format(self.option)
return f"{self.option}"
class JobType(models.Model):
@ -427,7 +426,7 @@ class JobType(models.Model):
unique_together = (("name", "symbol"),)
def __str__(self):
return "{0} ({1})".format(self.name, self.symbol)
return f"{self.name} ({self.symbol})"
class FailureClassification(NamedModel):
@ -602,7 +601,7 @@ class Job(models.Model):
return self.tier < 3
def __str__(self):
return "{0} {1} {2}".format(self.id, self.repository, self.guid)
return f"{self.id} {self.repository} {self.guid}"
def get_platform_option(self, option_collection_map=None):
if not hasattr(self, "platform_option"):
@ -723,7 +722,7 @@ class JobLog(models.Model):
unique_together = ("job", "name", "url")
def __str__(self):
return "{0} {1} {2} {3}".format(self.id, self.job.guid, self.name, self.status)
return f"{self.id} {self.job.guid} {self.name} {self.status}"
def update_status(self, status):
self.status = status
@ -793,7 +792,7 @@ class BugJobMap(models.Model):
return bug_map
def __str__(self):
return "{0} {1} {2} {3}".format(self.id, self.job.guid, self.bug_id, self.user)
return f"{self.id} {self.job.guid} {self.bug_id} {self.user}"
class JobNote(models.Model):
@ -899,9 +898,7 @@ class JobNote(models.Model):
self._ensure_classification()
def __str__(self):
return "{0} {1} {2} {3}".format(
self.id, self.job.guid, self.failure_classification, self.who
)
return f"{self.id} {self.job.guid} {self.failure_classification} {self.who}"
class FailureLine(models.Model):
@ -959,7 +956,7 @@ class FailureLine(models.Model):
unique_together = ("job_log", "line")
def __str__(self):
return "{0} {1}".format(self.id, Job.objects.get(guid=self.job_guid).id)
return f"{self.id} {Job.objects.get(guid=self.job_guid).id}"
@property
def error(self):
@ -1116,7 +1113,7 @@ class ClassifiedFailure(models.Model):
modified = models.DateTimeField(auto_now=True)
def __str__(self):
return "{0} {1}".format(self.id, self.bug_number)
return f"{self.id} {self.bug_number}"
def bug(self):
# Putting this here forces one query per object; there should be a way
@ -1256,7 +1253,7 @@ class TextLogError(models.Model):
unique_together = (("step", "line_number"), ("job", "line_number"))
def __str__(self):
return "{0} {1}".format(self.id, self.job.id)
return f"{self.id} {self.job.id}"
@property
def metadata(self):
@ -1387,7 +1384,7 @@ class TextLogErrorMatch(models.Model):
unique_together = ("text_log_error", "classified_failure", "matcher_name")
def __str__(self):
return "{0} {1}".format(self.text_log_error.id, self.classified_failure.id)
return f"{self.text_log_error.id} {self.classified_failure.id}"
class InvestigatedTests(models.Model):

Просмотреть файл

@ -1,7 +1,7 @@
import logging
from datetime import timedelta, datetime
from itertools import zip_longest, groupby
from typing import Tuple, List, Optional
from typing import Optional
import simplejson as json
from django.db.models import QuerySet, Q, F
@ -24,7 +24,7 @@ class AlertsPicker:
"""
def __init__(
self, max_alerts: int, max_improvements: int, platforms_of_interest: Tuple[str, ...]
self, max_alerts: int, max_improvements: int, platforms_of_interest: tuple[str, ...]
):
"""
:param max_alerts: the maximum number of selected alerts
@ -49,7 +49,7 @@ class AlertsPicker:
self.max_improvements = max_improvements
self.ordered_platforms_of_interest = platforms_of_interest
def extract_important_alerts(self, alerts: Tuple[PerformanceAlert, ...]):
def extract_important_alerts(self, alerts: tuple[PerformanceAlert, ...]):
if any(not isinstance(alert, PerformanceAlert) for alert in alerts):
raise ValueError("Provided parameter does not contain only PerformanceAlert objects.")
relevant_alerts = self._extract_by_relevant_platforms(alerts)
@ -57,7 +57,7 @@ class AlertsPicker:
sorted_alerts = self._multi_criterion_sort(alerts_with_distinct_jobs)
return self._ensure_alerts_variety(sorted_alerts)
def _ensure_alerts_variety(self, sorted_alerts: List[PerformanceAlert]):
def _ensure_alerts_variety(self, sorted_alerts: list[PerformanceAlert]):
"""
The alerts container must be sorted before being passed to this function.
The returned list must contain regressions and (if present) improvements.
@ -81,12 +81,12 @@ class AlertsPicker:
: self.max_improvements if improvements_only else self.max_alerts
]
def _ensure_distinct_jobs(self, alerts: List[PerformanceAlert]) -> List[PerformanceAlert]:
def _ensure_distinct_jobs(self, alerts: list[PerformanceAlert]) -> list[PerformanceAlert]:
def initial_culprit_job(alert):
return alert.initial_culprit_job
def parent_or_sibling_from(
alert_group: List[PerformanceAlert],
alert_group: list[PerformanceAlert],
) -> Optional[PerformanceAlert]:
if len(alert_group) == 0:
return None
@ -105,8 +105,8 @@ class AlertsPicker:
return list(filter(None, alerts))
def _ensure_platform_variety(
self, sorted_all_alerts: List[PerformanceAlert]
) -> List[PerformanceAlert]:
self, sorted_all_alerts: list[PerformanceAlert]
) -> list[PerformanceAlert]:
"""
Note: Ensure that the sorted_all_alerts container has only
platforms of interest (example: 'windows10', 'windows7', 'linux', 'osx', 'android').
@ -191,7 +191,7 @@ class IdentifyAlertRetriggerables:
self._time_interval = time_interval
self.log = logger or logging.getLogger(self.__class__.__name__)
def __call__(self, alert: PerformanceAlert) -> List[dict]:
def __call__(self, alert: PerformanceAlert) -> list[dict]:
"""
Main method
"""
@ -238,7 +238,7 @@ class IdentifyAlertRetriggerables:
)
return annotated_data_points
def _one_data_point_per_push(self, annotated_data_points: QuerySet) -> List[dict]:
def _one_data_point_per_push(self, annotated_data_points: QuerySet) -> list[dict]:
seen_push_ids = set()
seen_add = seen_push_ids.add
return [
@ -247,7 +247,7 @@ class IdentifyAlertRetriggerables:
if not (data_point["push_id"] in seen_push_ids or seen_add(data_point["push_id"]))
]
def _find_push_id_index(self, push_id: int, flattened_data_points: List[dict]) -> int:
def _find_push_id_index(self, push_id: int, flattened_data_points: list[dict]) -> int:
for index, data_point in enumerate(flattened_data_points):
if data_point["push_id"] == push_id:
return index
@ -261,7 +261,7 @@ class IdentifyAlertRetriggerables:
return slice(left_margin, right_margin)
def _glance_over_retrigger_range(self, data_points_to_retrigger: List[dict]):
def _glance_over_retrigger_range(self, data_points_to_retrigger: list[dict]):
retrigger_range = len(data_points_to_retrigger)
if retrigger_range < self._range_width:
self.log.warning(
@ -286,12 +286,12 @@ class BackfillReportMaintainer:
self.log = logger or logging.getLogger(self.__class__.__name__)
def provide_updated_reports(
self, since: datetime, frameworks: List[str], repositories: List[str]
) -> List[BackfillReport]:
self, since: datetime, frameworks: list[str], repositories: list[str]
) -> list[BackfillReport]:
alert_summaries = self.__fetch_summaries_to_retrigger(since, frameworks, repositories)
return self.compile_reports_for(alert_summaries)
def compile_reports_for(self, summaries_to_retrigger: QuerySet) -> List[BackfillReport]:
def compile_reports_for(self, summaries_to_retrigger: QuerySet) -> list[BackfillReport]:
reports = []
for summary in summaries_to_retrigger:
@ -317,12 +317,12 @@ class BackfillReportMaintainer:
def _pick_important_alerts(
self, from_summary: PerformanceAlertSummary
) -> List[PerformanceAlert]:
) -> list[PerformanceAlert]:
return self.alerts_picker.extract_important_alerts(
from_summary.alerts.filter(status=PerformanceAlert.UNTRIAGED)
)
def _provide_records(self, backfill_report: BackfillReport, alert_context_map: List[Tuple]):
def _provide_records(self, backfill_report: BackfillReport, alert_context_map: list[tuple]):
for alert, retrigger_context in alert_context_map:
BackfillRecord.objects.create(
alert=alert,
@ -331,7 +331,7 @@ class BackfillReportMaintainer:
)
def __fetch_summaries_to_retrigger(
self, since: datetime, frameworks: List[str], repositories: List[str]
self, since: datetime, frameworks: list[str], repositories: list[str]
) -> QuerySet:
no_reports_yet = Q(last_updated__gte=since, backfill_report__isnull=True)
with_outdated_reports = Q(last_updated__gt=F("backfill_report__last_updated"))
@ -348,7 +348,7 @@ class BackfillReportMaintainer:
.filter(filters)
)
def _associate_retrigger_context(self, important_alerts: List[PerformanceAlert]) -> List[Tuple]:
def _associate_retrigger_context(self, important_alerts: list[PerformanceAlert]) -> list[tuple]:
retrigger_map = []
incomplete_mapping = False

Просмотреть файл

@ -1,6 +1,5 @@
import logging
from datetime import datetime, timedelta
from typing import List
import simplejson as json
from django.conf import settings as django_settings
@ -22,7 +21,7 @@ class Secretary:
"""
def __init__(
self, outcome_checker: OutcomeChecker = None, supported_platforms: List[str] = None
self, outcome_checker: OutcomeChecker = None, supported_platforms: list[str] = None
):
self.outcome_checker = outcome_checker or OutcomeChecker()
self.supported_platforms = supported_platforms or django_settings.SUPPORTED_PLATFORMS

Просмотреть файл

@ -2,7 +2,6 @@ import logging
from datetime import datetime, timedelta
from json import JSONDecodeError
from logging import INFO, WARNING
from typing import List, Tuple
from django.conf import settings
from django.db.models import QuerySet
@ -35,7 +34,7 @@ class Sherlock:
backfill_tool: BackfillTool,
secretary: Secretary,
max_runtime: timedelta = None,
supported_platforms: List[str] = None,
supported_platforms: list[str] = None,
):
self.report_maintainer = report_maintainer
self.backfill_tool = backfill_tool
@ -45,7 +44,7 @@ class Sherlock:
self.supported_platforms = supported_platforms or settings.SUPPORTED_PLATFORMS
self._wake_up_time = datetime.now()
def sheriff(self, since: datetime, frameworks: List[str], repositories: List[str]):
def sheriff(self, since: datetime, frameworks: list[str], repositories: list[str]):
logger.info("Sherlock: Validating settings...")
self.secretary.validate_settings()
@ -76,15 +75,15 @@ class Sherlock:
raise MaxRuntimeExceeded("Sherlock: Max runtime exceeded.")
def _report(
self, since: datetime, frameworks: List[str], repositories: List[str]
) -> List[BackfillReport]:
self, since: datetime, frameworks: list[str], repositories: list[str]
) -> list[BackfillReport]:
return self.report_maintainer.provide_updated_reports(since, frameworks, repositories)
def _backfill(self, frameworks: List[str], repositories: List[str]):
def _backfill(self, frameworks: list[str], repositories: list[str]):
for platform in self.supported_platforms:
self.__backfill_on(platform, frameworks, repositories)
def __backfill_on(self, platform: str, frameworks: List[str], repositories: List[str]):
def __backfill_on(self, platform: str, frameworks: list[str], repositories: list[str]):
left = self.secretary.backfills_left(on_platform=platform)
total_consumed = 0
@ -110,7 +109,7 @@ class Sherlock:
@staticmethod
def __fetch_records_requiring_backfills_on(
platform: str, frameworks: List[str], repositories: List[str]
platform: str, frameworks: list[str], repositories: list[str]
) -> QuerySet:
records_to_backfill = BackfillRecord.objects.select_related(
"alert",
@ -126,7 +125,7 @@ class Sherlock:
)
return records_to_backfill
def _backfill_record(self, record: BackfillRecord, left: int) -> Tuple[int, int]:
def _backfill_record(self, record: BackfillRecord, left: int) -> tuple[int, int]:
consumed = 0
try:
@ -160,7 +159,7 @@ class Sherlock:
@staticmethod
def _note_backfill_outcome(
record: BackfillRecord, to_backfill: int, actually_backfilled: int
) -> Tuple[bool, str]:
) -> tuple[bool, str]:
success = False
record.total_actions_triggered = actually_backfilled
@ -200,7 +199,7 @@ class Sherlock:
return pending_tasks_count > acceptable_limit
@staticmethod
def __get_data_points_to_backfill(context: List[dict]) -> List[dict]:
def __get_data_points_to_backfill(context: list[dict]) -> list[dict]:
context_len = len(context)
start = None

Просмотреть файл

@ -11,7 +11,7 @@ from dataclasses import dataclass, asdict
from abc import ABC, abstractmethod
import urllib.parse
from typing import List, Union, Optional
from typing import Union, Optional
from django.conf import settings
from treeherder.perf.models import (
@ -40,7 +40,7 @@ class EmailWriter(ABC):
def __init__(self):
self._email = Email()
def prepare_new_email(self, must_mention: Union[List[object], object]) -> dict:
def prepare_new_email(self, must_mention: Union[list[object], object]) -> dict:
"""
Template method
"""
@ -64,12 +64,12 @@ class EmailWriter(ABC):
pass # pragma: no cover
@abstractmethod
def _write_content(self, must_mention: List[object]):
def _write_content(self, must_mention: list[object]):
pass # pragma: no cover
@staticmethod
def __ensure_its_list(must_mention) -> List[object]:
if not isinstance(must_mention, List):
def __ensure_its_list(must_mention) -> list[object]:
if not isinstance(must_mention, list):
must_mention = [must_mention]
return must_mention
@ -90,7 +90,7 @@ class BackfillReportContent:
def __init__(self):
self._raw_content = None
def include_records(self, records: List[BackfillRecord]):
def include_records(self, records: list[BackfillRecord]):
self._initialize_report_intro()
for record in records:
@ -216,7 +216,7 @@ class BackfillNotificationWriter(EmailWriter):
def _write_subject(self):
self._email.subject = "Automatic Backfilling Report"
def _write_content(self, must_mention: List[BackfillRecord]):
def _write_content(self, must_mention: list[BackfillRecord]):
content = BackfillReportContent()
content.include_records(must_mention)
@ -238,7 +238,7 @@ class DeletionReportContent:
def __init__(self):
self._raw_content = None
def include_signatures(self, signatures: List[PerformanceSignature]):
def include_signatures(self, signatures: list[PerformanceSignature]):
self._initialize_report_intro()
for signature in signatures:
@ -287,7 +287,7 @@ class DeletionNotificationWriter(EmailWriter):
def _write_subject(self):
self._email.subject = "Summary of deleted Performance Signatures"
def _write_content(self, must_mention: List[PerformanceSignature]):
def _write_content(self, must_mention: list[PerformanceSignature]):
content = DeletionReportContent()
content.include_signatures(must_mention)

Просмотреть файл

@ -1,7 +1,6 @@
import time
from datetime import timedelta
from typing import List
from treeherder.config import settings
from treeherder.perf.sheriffing_criteria import (
@ -15,7 +14,7 @@ from mo_times import Duration
from django.core.management.base import BaseCommand
def pretty_enumerated(formulas: List[str]) -> str:
def pretty_enumerated(formulas: list[str]) -> str:
comma = ", "
return " & ".join(comma.join(formulas).rsplit(comma, maxsplit=1))

Просмотреть файл

@ -50,14 +50,14 @@ def progress_notifier(
tabs_no=0,
):
total_items = len(iterable)
print("{0}Fetching {1} {2} item(s)...".format("\t" * tabs_no, total_items, item_name))
print("{}Fetching {} {} item(s)...".format("\t" * tabs_no, total_items, item_name))
prev_percentage = None
for idx, item in enumerate(iterable):
item_processor(item)
percentage = int((idx + 1) * 100 / total_items)
if percentage % 10 == 0 and percentage != prev_percentage:
print("{0}Fetched {1}% of {2} item(s)".format("\t" * tabs_no, percentage, item_name))
print("{}Fetched {}% of {} item(s)".format("\t" * tabs_no, percentage, item_name))
prev_percentage = percentage
@ -86,14 +86,14 @@ class Data:
def show_progress(self, queryset, map, table_name):
total_rows = int(queryset.count())
print("Fetching {0} {1}(s)...".format(total_rows, table_name))
print(f"Fetching {total_rows} {table_name}(s)...")
prev_percentage = None
for idx, obj in enumerate(list(queryset)):
map(obj)
percentage = int((idx + 1) * 100 / total_rows)
if percentage % 10 == 0 and percentage != prev_percentage:
print("Fetched {0}% of alert summaries".format(percentage))
print(f"Fetched {percentage}% of alert summaries")
prev_percentage = percentage
@ -112,19 +112,19 @@ class DecentSizedData(Data):
def delete_local_data(self):
for model in self.DECENT_SIZED_TABLES:
print("Removing elements from {0} table... ".format(model._meta.db_table))
print(f"Removing elements from {model._meta.db_table} table... ")
model.objects.using(self.target).all().delete()
def save_local_data(self):
for model in self.DECENT_SIZED_TABLES:
print("Fetching from {0} table...".format(model._meta.db_table))
print(f"Fetching from {model._meta.db_table} table...")
model.objects.using(self.target).bulk_create(model.objects.using(self.source).all())
def fillup_target(self, **filters):
print("Fetching all affordable data...\n")
# TODO: JSON dump the list
print(
"From tables {0}".format(
"From tables {}".format(
", ".join([model._meta.db_table for model in self.DECENT_SIZED_TABLES])
)
)
@ -224,7 +224,7 @@ class MassiveData(Data):
def delete_local_data(self):
for model in self.BIG_SIZED_TABLES:
print("Removing elements from {0} table... ".format(model._meta.db_table))
print(f"Removing elements from {model._meta.db_table} table... ")
model.objects.using(self.target).all().delete()
def save_local_data(self):
@ -233,7 +233,7 @@ class MassiveData(Data):
)
for table_name, properties in priority_dict.items():
print("Saving {0} data...".format(table_name))
print(f"Saving {table_name} data...")
model_values = (
properties["model"]
.objects.using(self.source)
@ -257,7 +257,7 @@ class MassiveData(Data):
# fetch all alert summaries & alerts
# with only a subset of the datum & jobs
oldest_day = datetime.datetime.now() - self.time_window
print("\nFetching data subset no older than {0}...".format(str(oldest_day)))
print(f"\nFetching data subset no older than {str(oldest_day)}...")
self.delete_local_data()
alert_summaries = list(self.query_set)
@ -293,7 +293,7 @@ class MassiveData(Data):
self.save_local_data()
def db_worker(self, process_no, alert_summaries):
print("Process no {0} up and running...".format(process_no))
print(f"Process no {process_no} up and running...")
self.progress_notifier(self.bring_in_alert_summary, alert_summaries, "alert summary", 1)
def bring_in_alert_summary(self, alert_summary):
@ -314,7 +314,7 @@ class MassiveData(Data):
if alert.id in self.models_instances["performance_alert"]:
return
print("{0}Fetching alert #{1}...".format("\t" * 2, alert.id))
print("{}Fetching alert #{}...".format("\t" * 2, alert.id))
if alert.related_summary:
if alert.related_summary not in self.models_instances["performance_alert_summary"]:
# if the alert summary identified isn't registered yet
@ -365,7 +365,7 @@ class MassiveData(Data):
if job.id in self.models_instances["job"]:
return
occasional_log("{0}Fetching job #{1}".format("\t" * 4, job.id))
occasional_log("{}Fetching job #{}".format("\t" * 4, job.id))
self.update_list("reference_data_signature", job.signature)
self.update_list("build_platform", job.build_platform)

Просмотреть файл

@ -1,6 +1,5 @@
import logging
from datetime import datetime, timedelta
from typing import List, Tuple
from django.core.management.base import BaseCommand
@ -65,7 +64,7 @@ class Command(BaseCommand):
logging.info("Sherlock: Going back to sleep.")
def _parse_args(self, **options) -> Tuple[List, List, datetime, timedelta]:
def _parse_args(self, **options) -> tuple[list, list, datetime, timedelta]:
return (
options["frameworks"],
options["repositories"],

Просмотреть файл

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-03-08 13:19
import django.core.validators
import django.db.models.deletion

Просмотреть файл

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-03-08 14:53
from django.db import migrations, models

Просмотреть файл

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-04-19 09:25
from django.db import migrations, models

Просмотреть файл

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-14 11:40
from django.db import migrations, models

Просмотреть файл

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-23 08:07
from django.db import migrations, models
import django.db.models.deletion

Просмотреть файл

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-09-28 11:41
from django.db import migrations

Просмотреть файл

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-11-06 08:20
from django.db import migrations, models

Просмотреть файл

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-02-20 15:02
from django.db import migrations

Просмотреть файл

@ -1,7 +1,7 @@
import logging
from datetime import datetime
import json
from typing import List, Tuple, Optional
from typing import Optional
from functools import reduce
from django.contrib.auth.models import User
@ -35,7 +35,7 @@ class PerformanceFramework(models.Model):
db_table = "performance_framework"
@classmethod
def fetch_all_names(cls) -> List[str]:
def fetch_all_names(cls) -> list[str]:
return cls.objects.values_list("name", flat=True)
def __str__(self):
@ -183,11 +183,11 @@ class PerformanceSignature(models.Model):
def __str__(self):
name = self.suite
if self.test:
name += " {}".format(self.test)
name += f" {self.test}"
else:
name += " summary"
return "{} {} {} {}".format(self.signature_hash, name, self.platform, self.last_updated)
return f"{self.signature_hash} {name} {self.platform} {self.last_updated}"
class PerformanceDatum(models.Model):
@ -224,7 +224,7 @@ class PerformanceDatum(models.Model):
self.signature.save()
def __str__(self):
return "{} {}".format(self.value, self.push_timestamp)
return f"{self.value} {self.push_timestamp}"
class PerformanceDatumReplicate(models.Model):
@ -254,7 +254,7 @@ class IssueTracker(models.Model):
db_table = "issue_tracker"
def __str__(self):
return "{} (tasks via {})".format(self.name, self.task_base_url)
return f"{self.name} (tasks via {self.task_base_url})"
class PerformanceAlertSummary(models.Model):
@ -317,7 +317,7 @@ class PerformanceAlertSummary(models.Model):
issue_tracker = models.ForeignKey(IssueTracker, on_delete=models.PROTECT, default=1) # Bugzilla
def __init__(self, *args, **kwargs):
super(PerformanceAlertSummary, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
# allows updating timestamps only on new values
self.__prev_bug_number = self.bug_number
@ -333,7 +333,7 @@ class PerformanceAlertSummary(models.Model):
self.triage_due_date = triage_due
if self.bug_due_date != bug_due:
self.bug_due_date = bug_due
super(PerformanceAlertSummary, self).save(*args, **kwargs)
super().save(*args, **kwargs)
self.__prev_bug_number = self.bug_number
def update_status(self, using=None):
@ -418,9 +418,7 @@ class PerformanceAlertSummary(models.Model):
unique_together = ("repository", "framework", "prev_push", "push")
def __str__(self):
return "{} {} {}-{}".format(
self.framework, self.repository, self.prev_push.revision, self.push.revision
)
return f"{self.framework} {self.repository} {self.prev_push.revision}-{self.push.revision}"
class PerformanceAlert(models.Model):
@ -582,7 +580,7 @@ class PerformanceAlert(models.Model):
unique_together = ("summary", "series_signature")
def __str__(self):
return "{} {} {}%".format(self.summary, self.series_signature, self.amount_pct)
return f"{self.summary} {self.series_signature} {self.amount_pct}%"
class PerformanceTag(models.Model):
@ -615,7 +613,7 @@ class PerformanceBugTemplate(models.Model):
db_table = "performance_bug_template"
def __str__(self):
return "{} bug template".format(self.framework.name)
return f"{self.framework.name} bug template"
# TODO: we actually need this name for the Sherlock' s hourly report
@ -649,9 +647,7 @@ class BackfillReport(models.Model):
db_table = "backfill_report"
def __str__(self):
return "BackfillReport(summary #{}, last update {})".format(
self.summary.id, self.last_updated
)
return f"BackfillReport(summary #{self.summary.id}, last update {self.last_updated})"
class BackfillRecord(models.Model):
@ -750,7 +746,7 @@ class BackfillRecord(models.Model):
self.job_platform_option = job.get_platform_option()
self.save()
def get_context_border_info(self, context_property: str) -> Tuple[str, str]:
def get_context_border_info(self, context_property: str) -> tuple[str, str]:
"""
Provides border(first and last) information from context based on the property
"""
@ -760,7 +756,7 @@ class BackfillRecord(models.Model):
return from_info, to_info
def get_pushes_in_context_range(self) -> List[Push]:
def get_pushes_in_context_range(self) -> list[Push]:
from_time, to_time = self.get_context_border_info("push_timestamp")
return Push.objects.filter(
@ -779,10 +775,10 @@ class BackfillRecord(models.Model):
return ",".join(search_terms)
def get_context(self) -> List[dict]:
def get_context(self) -> list[dict]:
return json.loads(self.context)
def set_context(self, value: List[dict]):
def set_context(self, value: list[dict]):
self.context = json.dumps(value, default=str)
def set_log_details(self, value: dict):
@ -801,7 +797,7 @@ class BackfillRecord(models.Model):
db_table = "backfill_record"
def __str__(self):
return "BackfillRecord(alert #{}, from {})".format(self.alert.id, self.report)
return f"BackfillRecord(alert #{self.alert.id}, from {self.report})"
class BackfillNotificationRecord(models.Model):

Просмотреть файл

@ -1,7 +1,6 @@
from abc import ABC, abstractmethod
from copy import deepcopy
from datetime import timedelta, datetime
from typing import Tuple, List
import requests
from django.conf import settings
@ -32,7 +31,7 @@ class NonBlockableSession(Session):
# IP when making many queries with this
self.headers = {
"Referer": f"{referer}",
"User-Agent": "treeherder/{}".format(settings.SITE_HOSTNAME),
"User-Agent": f"treeherder/{settings.SITE_HOSTNAME}",
"Accept": "application/json",
}
@ -91,7 +90,7 @@ class BugzillaFormula(ABC):
return result
def breakdown(self) -> Tuple[list, list]:
def breakdown(self) -> tuple[list, list]:
breakdown_items = (self._denominator_bugs, self._numerator_bugs)
if None in breakdown_items:
raise RuntimeError("Cannot breakdown results without running calculus first")
@ -107,11 +106,11 @@ class BugzillaFormula(ABC):
return creation_time <= datetime.now() - self._bug_cooldown
@abstractmethod
def _filter_numerator_bugs(self, all_filed_bugs: List[dict]) -> List[dict]:
def _filter_numerator_bugs(self, all_filed_bugs: list[dict]) -> list[dict]:
pass
@abstractmethod
def _filter_denominator_bugs(self, all_filed_bugs: List[dict]) -> List[dict]:
def _filter_denominator_bugs(self, all_filed_bugs: list[dict]) -> list[dict]:
pass
def _create_default_session(self) -> NonBlockableSession:
@ -120,12 +119,12 @@ class BugzillaFormula(ABC):
"""
return NonBlockableSession()
def __fetch_cooled_down_bugs(self, framework: str, suite: str, test: str = None) -> List[dict]:
def __fetch_cooled_down_bugs(self, framework: str, suite: str, test: str = None) -> list[dict]:
quantified_bugs = self.__fetch_quantified_bugs(framework, suite, test)
cooled_bugs = self.__filter_cooled_down_bugs(quantified_bugs)
return cooled_bugs
def __fetch_quantified_bugs(self, framework: str, suite: str, test: str = None) -> List[dict]:
def __fetch_quantified_bugs(self, framework: str, suite: str, test: str = None) -> list[dict]:
test_moniker = " ".join(filter(None, (suite, test)))
test_id_fragments = filter(None, [framework, test_moniker])
creation_time = datetime.strftime(self.oldest_timestamp, BZ_DATETIME_FORMAT)
@ -153,7 +152,7 @@ class BugzillaFormula(ABC):
else:
return bugs_resp.json()["bugs"]
def __filter_cooled_down_bugs(self, bugs: List[dict]) -> List[dict]:
def __filter_cooled_down_bugs(self, bugs: list[dict]) -> list[dict]:
return [bug for bug in bugs if self.has_cooled_down(bug)]
def __reset_breakdown(self):
@ -165,7 +164,7 @@ class BugzillaFormula(ABC):
class EngineerTractionFormula(BugzillaFormula):
def _filter_numerator_bugs(self, cooled_bugs: List[dict]) -> List[dict]:
def _filter_numerator_bugs(self, cooled_bugs: list[dict]) -> list[dict]:
tracted_bugs = []
for bug in cooled_bugs:
bug_history = self._fetch_history(bug["id"])
@ -177,7 +176,7 @@ class EngineerTractionFormula(BugzillaFormula):
return tracted_bugs
def _filter_denominator_bugs(self, all_filed_bugs: List[dict]) -> List[dict]:
def _filter_denominator_bugs(self, all_filed_bugs: list[dict]) -> list[dict]:
return all_filed_bugs
def _fetch_history(self, bug_id: int) -> list:
@ -193,7 +192,7 @@ class EngineerTractionFormula(BugzillaFormula):
body = history_resp.json()
return body["bugs"][0]["history"]
def _notice_any_status_change_in(self, bug_history: List[dict], up_to: datetime) -> bool:
def _notice_any_status_change_in(self, bug_history: list[dict], up_to: datetime) -> bool:
def during_interval(change: dict) -> bool:
when = datetime.strptime(change["when"], BZ_DATETIME_FORMAT)
return when <= up_to
@ -213,7 +212,7 @@ class EngineerTractionFormula(BugzillaFormula):
class FixRatioFormula(BugzillaFormula):
def _filter_numerator_bugs(self, all_filed_bugs: List[dict]) -> List[dict]:
def _filter_numerator_bugs(self, all_filed_bugs: list[dict]) -> list[dict]:
# select only RESOLVED - FIXED bugs
return [
bug
@ -221,7 +220,7 @@ class FixRatioFormula(BugzillaFormula):
if bug.get("status") == "RESOLVED" and bug.get("resolution") == "FIXED"
]
def _filter_denominator_bugs(self, all_filed_bugs: List[dict]) -> List[dict]:
def _filter_denominator_bugs(self, all_filed_bugs: list[dict]) -> list[dict]:
# select RESOLVED bugs, no matter what resolution they have
return [bug for bug in all_filed_bugs if bug.get("status") == "RESOLVED"]

Просмотреть файл

@ -4,7 +4,7 @@ import logging
from multiprocessing import cpu_count
from multiprocessing.pool import Pool, ThreadPool, AsyncResult
import time
from typing import Tuple, Dict, Union, List
from typing import Union
from datetime import datetime, timedelta
@ -49,7 +49,7 @@ class CriteriaRecord:
class RecordComputer:
def __init__(
self,
formula_map: Dict[str, BugzillaFormula],
formula_map: dict[str, BugzillaFormula],
time_until_expires: timedelta,
webservice_rest_time: timedelta,
logger=None,
@ -162,7 +162,7 @@ class ResultsChecker:
self.__last_change = 0
self.__since_last_change = timedelta(seconds=0)
def wait_for_results(self, results: List[AsyncResult]):
def wait_for_results(self, results: list[AsyncResult]):
self.__reset_change_track()
while True:
@ -180,7 +180,7 @@ class ResultsChecker:
f"Haven't computed updates for all records yet (only {len(ready)} out of {len(results)}). Still waiting..."
)
def __updates_stagnated(self, results: List[AsyncResult], last_check_on: float) -> bool:
def __updates_stagnated(self, results: list[AsyncResult], last_check_on: float) -> bool:
ready_amount = len([r for r in results if r.ready()])
total_results = len(results)
new_change = total_results - ready_amount
@ -213,7 +213,7 @@ class CriteriaTracker:
def __init__(
self,
formula_map: Dict[str, BugzillaFormula] = None,
formula_map: dict[str, BugzillaFormula] = None,
record_path: str = None,
webservice_rest_time: timedelta = None,
multiprocessed: bool = False,
@ -236,7 +236,7 @@ class CriteriaTracker:
if not callable(formula):
raise TypeError("Must provide callable as sheriffing criteria formula")
def get_test_moniker(self, record: CriteriaRecord) -> Tuple[str, str, str]:
def get_test_moniker(self, record: CriteriaRecord) -> tuple[str, str, str]:
return record.Framework, record.Suite, record.Test
def __iter__(self):
@ -247,7 +247,7 @@ class CriteriaTracker:
self.log.info(f"Loading records from {self._record_path}...")
self._records_map = {} # reset them
with open(self._record_path, "r") as csv_file:
with open(self._record_path) as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
test_moniker = row.get("Framework"), row.get("Suite"), row.get("Test")
@ -283,7 +283,7 @@ class CriteriaTracker:
record = self._computer.apply_formulas(record)
return record
def create_formula_map(self) -> Dict[str, BugzillaFormula]:
def create_formula_map(self) -> dict[str, BugzillaFormula]:
return {
self.ENGINEER_TRACTION: EngineerTractionFormula(),
self.FIX_RATIO: FixRatioFormula(),

Просмотреть файл

@ -107,7 +107,7 @@ class RevisionDatum:
def __repr__(self):
values_str = "[ %s ]" % ", ".join(["%.3f" % value for value in self.values])
return "<%s: %s, %s, %.3f, %s>" % (
return "<{}: {}, {}, {:.3f}, {}>".format(
self.push_timestamp,
self.push_id,
values_str,

Просмотреть файл

@ -103,12 +103,10 @@ def get_current_test_failures(push, option_map, jobs, investigatedTests=None):
job_symbol = job.job_type.symbol
job_group = job.job_group.name
job_group_symbol = job.job_group.symbol
job.job_key = "{}{}{}{}".format(config, platform, job_name, job_group)
job.job_key = f"{config}{platform}{job_name}{job_group}"
all_failed_jobs[job.id] = job
# The 't' ensures the key starts with a character, as required for a query selector
test_key = re.sub(
r"\W+", "", "t{}{}{}{}{}".format(test_name, config, platform, job_name, job_group)
)
test_key = re.sub(r"\W+", "", f"t{test_name}{config}{platform}{job_name}{job_group}")
isClassifiedIntermittent = any(
job["failure_classification_id"] == 4 for job in jobs[job_name]
)
@ -215,7 +213,7 @@ def get_test_failures(
jobs,
result_status=set(),
):
logger.debug("Getting test failures for push: {}".format(push.id))
logger.debug(f"Getting test failures for push: {push.id}")
# query for jobs for the last two weeks excluding today
# find tests that have failed in the last 14 days
# this is very cache-able for reuse on other pushes.

Просмотреть файл

@ -37,7 +37,7 @@ def get_usage():
nrql = "SELECT%20max(needInvestigation)%20FROM%20push_health_need_investigation%20FACET%20revision%20SINCE%201%20DAY%20AGO%20TIMESERIES%20where%20repo%3D'{}'%20AND%20appName%3D'{}'".format(
"try", "treeherder-prod"
)
new_relic_url = "{}?nrql={}".format(settings.NEW_RELIC_INSIGHTS_API_URL, nrql)
new_relic_url = f"{settings.NEW_RELIC_INSIGHTS_API_URL}?nrql={nrql}"
headers = {
"Accept": "application/json",
"Content-Type": "application/json",

Просмотреть файл

@ -31,7 +31,7 @@ def clean_test(test, signature, message):
elif clean_name.startswith("http://10.0"):
left = "/tests/".join(left.split("/tests/")[1:])
right = "/tests/".join(right.split("/tests/")[1:])
clean_name = "%s%s%s" % (left, splitter, right)
clean_name = f"{left}{splitter}{right}"
if "test_end for" in clean_name:
clean_name = clean_name.split()[2]

Просмотреть файл

@ -59,7 +59,7 @@ class PulseConsumer(ConsumerMixin):
self.connection = Connection(source["pulse_url"], virtual_host=source.get("vhost", "/"))
self.consumers = []
self.queue = None
self.queue_name = "queue/{}/{}".format(self.connection.userid, self.queue_suffix)
self.queue_name = f"queue/{self.connection.userid}/{self.queue_suffix}"
self.root_url = source["root_url"]
self.source = source
self.build_routing_key = build_routing_key
@ -110,7 +110,7 @@ class PulseConsumer(ConsumerMixin):
# get the binding key for this consumer
binding = self.get_binding_str(exchange.name, routing_key)
logger.info("Pulse queue {} bound to: {}".format(self.queue_name, binding))
logger.info(f"Pulse queue {self.queue_name} bound to: {binding}")
return binding
@ -146,11 +146,11 @@ class PulseConsumer(ConsumerMixin):
def get_binding_str(self, exchange, routing_key):
"""Use consistent string format for binding comparisons"""
return "{} {}".format(exchange, routing_key)
return f"{exchange} {routing_key}"
def get_bindings(self, queue_name):
"""Get list of bindings from the pulse API"""
return fetch_json("{}queue/{}/bindings".format(PULSE_GUARDIAN_URL, queue_name))
return fetch_json(f"{PULSE_GUARDIAN_URL}queue/{queue_name}/bindings")
class TaskConsumer(PulseConsumer):
@ -227,7 +227,7 @@ class JointConsumer(PulseConsumer):
thread, so we use multiple threads, one per consumer.
"""
queue_suffix = env("PULSE_QUEUE_NAME", default="queue_{}".format(socket.gethostname()))
queue_suffix = env("PULSE_QUEUE_NAME", default=f"queue_{socket.gethostname()}")
def bindings(self):
rv = []

Просмотреть файл

@ -1,7 +1,6 @@
import logging
import uuid
from abc import ABC, abstractmethod
from typing import List, Tuple
import requests
import jsone
@ -169,7 +168,7 @@ class TaskclusterModelImpl(TaskclusterModel):
)
@classmethod
def _task_in_context(cls, context: List[dict], task_tags: dict) -> bool:
def _task_in_context(cls, context: list[dict], task_tags: dict) -> bool:
"""
A task (as defined by its tags) is said to match a tag-set if its
tags are a super-set of the tag-set. A tag-set is a set of key-value pairs.
@ -254,7 +253,7 @@ def notify_client_factory(
return NotifyNullObject()
def autofind_unprovided(access_token, client_id) -> Tuple[str, str]:
def autofind_unprovided(access_token, client_id) -> tuple[str, str]:
client_id = client_id or settings.NOTIFY_CLIENT_ID
access_token = access_token or settings.NOTIFY_ACCESS_TOKEN
return client_id, access_token

Просмотреть файл

@ -4,31 +4,31 @@ from treeherder.utils.http import fetch_json
def fetch_api(path, params=None):
if GITHUB_TOKEN:
headers = {"Authorization": "token {}".format(GITHUB_TOKEN)}
headers = {"Authorization": f"token {GITHUB_TOKEN}"}
else:
headers = {}
return fetch_json("https://api.github.com/{}".format(path), params, headers)
return fetch_json(f"https://api.github.com/{path}", params, headers)
def get_releases(owner, repo, params=None):
return fetch_api("repos/{}/{}/releases".format(owner, repo), params)
return fetch_api(f"repos/{owner}/{repo}/releases", params)
def get_repo(owner, repo, params=None):
return fetch_api("repos/{}/{}".format(owner, repo), params)
return fetch_api(f"repos/{owner}/{repo}", params)
def compare_shas(owner, repo, base, head):
return fetch_api("repos/{}/{}/compare/{}...{}".format(owner, repo, base, head))
return fetch_api(f"repos/{owner}/{repo}/compare/{base}...{head}")
def get_all_commits(owner, repo, params=None):
return fetch_api("repos/{}/{}/commits".format(owner, repo), params)
return fetch_api(f"repos/{owner}/{repo}/commits", params)
def get_commit(owner, repo, sha, params=None):
return fetch_api("repos/{}/{}/commits/{}".format(owner, repo, sha), params)
return fetch_api(f"repos/{owner}/{repo}/commits/{sha}", params)
def get_pull_request(owner, repo, sha, params=None):
return fetch_api("repos/{}/{}/pulls/{}/commits".format(owner, repo, sha), params)
return fetch_api(f"repos/{owner}/{repo}/pulls/{sha}/commits", params)

Просмотреть файл

@ -6,7 +6,7 @@ from django.conf import settings
def make_request(url, method="GET", headers=None, timeout=30, **kwargs):
"""A wrapper around requests to set defaults & call raise_for_status()."""
headers = headers or {}
headers["User-Agent"] = "treeherder/{}".format(settings.SITE_HOSTNAME)
headers["User-Agent"] = f"treeherder/{settings.SITE_HOSTNAME}"
response = requests.request(method, url, headers=headers, timeout=timeout, **kwargs)
if response.history:
params = {

Просмотреть файл

@ -5,7 +5,7 @@ from treeherder.utils.http import fetch_json, fetch_text, make_request
def get_task_definition(root_url, task_id):
task_url = taskcluster_urls.api(root_url, "queue", "v1", "task/{}".format(task_id))
task_url = taskcluster_urls.api(root_url, "queue", "v1", f"task/{task_id}")
return fetch_json(task_url)
@ -16,9 +16,7 @@ def download_artifact(root_url, task_id, path):
Returns either the parsed json, the parsed yaml or the plain response.
"""
artifact_url = taskcluster_urls.api(
root_url, "queue", "v1", "task/{}/artifacts/{}".format(task_id, path)
)
artifact_url = taskcluster_urls.api(root_url, "queue", "v1", f"task/{task_id}/artifacts/{path}")
if path.endswith(".json"):
return fetch_json(artifact_url)

Просмотреть файл

@ -1,5 +1,3 @@
# coding: utf-8
import requests
from django.conf import settings
from rest_framework import viewsets

Просмотреть файл

@ -37,6 +37,6 @@ class InfraCompareQuerySerializers(serializers.Serializer):
Repository.objects.get(name=project)
except ObjectDoesNotExist:
raise serializers.ValidationError("{} does not exist.".format(project))
raise serializers.ValidationError(f"{project} does not exist.")
return project

Просмотреть файл

@ -26,14 +26,10 @@ class InvestigatedViewSet(viewsets.ModelViewSet):
return queryset
except Push.DoesNotExist:
return Response(
"No push with revision: {0}".format(revision), status=HTTP_404_NOT_FOUND
)
return Response(f"No push with revision: {revision}", status=HTTP_404_NOT_FOUND)
except InvestigatedTests.DoesNotExist:
return Response(
"No push with revision: {0}".format(revision), status=HTTP_404_NOT_FOUND
)
return Response(f"No push with revision: {revision}", status=HTTP_404_NOT_FOUND)
def create(self, request, *args, **kwargs):
project = kwargs["project"]
@ -52,19 +48,13 @@ class InvestigatedViewSet(viewsets.ModelViewSet):
return Response(serializer.data, status=status.HTTP_201_CREATED)
except IntegrityError:
return Response(
"{0} already marked investigated".format(test), status=HTTP_400_BAD_REQUEST
)
return Response(f"{test} already marked investigated", status=HTTP_400_BAD_REQUEST)
except Push.DoesNotExist:
return Response(
"No push with revision: {0}".format(revision), status=HTTP_404_NOT_FOUND
)
return Response(f"No push with revision: {revision}", status=HTTP_404_NOT_FOUND)
except JobType.DoesNotExist:
return Response(
"No JobType with job name: {0}".format(jobName), status=HTTP_404_NOT_FOUND
)
return Response(f"No JobType with job name: {jobName}", status=HTTP_404_NOT_FOUND)
def destroy(self, request, project, pk=None):
try:

Просмотреть файл

@ -279,7 +279,7 @@ class JobsProjectViewSet(viewsets.ViewSet):
repository__name=project, id=pk
)
except Job.DoesNotExist:
return Response("No job with id: {0}".format(pk), status=HTTP_404_NOT_FOUND)
return Response(f"No job with id: {pk}", status=HTTP_404_NOT_FOUND)
resp = serializers.JobProjectSerializer(job, read_only=True).data
@ -333,7 +333,7 @@ class JobsProjectViewSet(viewsets.ViewSet):
parser.parse(param_value)
except ValueError:
return Response(
"Invalid date value for `last_modified`: {}".format(param_value),
f"Invalid date value for `last_modified`: {param_value}",
status=HTTP_400_BAD_REQUEST,
)
filter_params[param_key] = param_value
@ -349,14 +349,14 @@ class JobsProjectViewSet(viewsets.ViewSet):
return_type = filter_params.get("return_type", "dict").lower()
if count > MAX_JOBS_COUNT:
msg = "Specified count exceeds API MAX_JOBS_COUNT value: {}".format(MAX_JOBS_COUNT)
msg = f"Specified count exceeds API MAX_JOBS_COUNT value: {MAX_JOBS_COUNT}"
return Response({"detail": msg}, status=HTTP_400_BAD_REQUEST)
try:
repository = Repository.objects.get(name=project)
except Repository.DoesNotExist:
return Response(
{"detail": "No project with name {}".format(project)}, status=HTTP_404_NOT_FOUND
{"detail": f"No project with name {project}"}, status=HTTP_404_NOT_FOUND
)
jobs = JobFilter(
{k: v for (k, v) in filter_params.items()},
@ -379,7 +379,7 @@ class JobsProjectViewSet(viewsets.ViewSet):
try:
job = Job.objects.get(repository__name=project, id=pk)
except ObjectDoesNotExist:
return Response("No job with id: {0}".format(pk), status=HTTP_404_NOT_FOUND)
return Response(f"No job with id: {pk}", status=HTTP_404_NOT_FOUND)
textlog_steps = (
TextLogStep.objects.filter(job=job)
@ -398,7 +398,7 @@ class JobsProjectViewSet(viewsets.ViewSet):
try:
job = Job.objects.get(repository__name=project, id=pk)
except Job.DoesNotExist:
return Response("No job with id: {0}".format(pk), status=HTTP_404_NOT_FOUND)
return Response(f"No job with id: {pk}", status=HTTP_404_NOT_FOUND)
textlog_errors = (
TextLogError.objects.filter(job=job)
.select_related("_metadata", "_metadata__failure_line")
@ -417,7 +417,7 @@ class JobsProjectViewSet(viewsets.ViewSet):
try:
job = Job.objects.get(repository__name=project, id=pk)
except ObjectDoesNotExist:
return Response("No job with id: {0}".format(pk), status=HTTP_404_NOT_FOUND)
return Response(f"No job with id: {pk}", status=HTTP_404_NOT_FOUND)
return Response(get_error_summary(job))
@ -430,13 +430,13 @@ class JobsProjectViewSet(viewsets.ViewSet):
repository = Repository.objects.get(name=project)
except Repository.DoesNotExist:
return Response(
{"detail": "No project with name {}".format(project)}, status=HTTP_404_NOT_FOUND
{"detail": f"No project with name {project}"}, status=HTTP_404_NOT_FOUND
)
try:
job = Job.objects.get(repository=repository, id=pk)
except ObjectDoesNotExist:
return Response("No job with id: {0}".format(pk), status=HTTP_404_NOT_FOUND)
return Response(f"No job with id: {pk}", status=HTTP_404_NOT_FOUND)
filter_params = request.query_params.copy()

Просмотреть файл

@ -34,7 +34,7 @@ class NoteViewSet(viewsets.ViewSet):
serializer = JobNoteSerializer(JobNote.objects.get(id=pk))
return Response(serializer.data)
except JobNote.DoesNotExist:
return Response("No note with id: {0}".format(pk), status=HTTP_404_NOT_FOUND)
return Response(f"No note with id: {pk}", status=HTTP_404_NOT_FOUND)
def list(self, request, project):
"""
@ -116,7 +116,7 @@ class NoteViewSet(viewsets.ViewSet):
exc_info=True,
)
return Response({"message": "note stored for job {0}".format(request.data["job_id"])})
return Response({"message": "note stored for job {}".format(request.data["job_id"])})
def destroy(self, request, project, pk=None):
"""
@ -127,4 +127,4 @@ class NoteViewSet(viewsets.ViewSet):
note.delete()
return Response({"message": "Note deleted"})
except JobNote.DoesNotExist:
return Response("No note with id: {0}".format(pk), status=HTTP_404_NOT_FOUND)
return Response(f"No note with id: {pk}", status=HTTP_404_NOT_FOUND)

Просмотреть файл

@ -30,16 +30,16 @@ PERFHERDER_TIMERANGES = [
def get_test_suite(suite, test):
return suite if test == "" or test == suite else "{} {}".format(suite, test)
return suite if test == "" or test == suite else f"{suite} {test}"
def get_header_name(extra_options, option_name, test_suite):
name = "{} {} {}".format(test_suite, option_name, extra_options)
name = f"{test_suite} {option_name} {extra_options}"
return name
def get_sig_identifier(header, platform):
return "{} {}".format(header, platform)
return f"{header} {platform}"
def get_option_collection_map():

Просмотреть файл

@ -1,7 +1,6 @@
import datetime
import time
from collections import defaultdict
from typing import List
from urllib.parse import urlencode
import django_filters
@ -819,7 +818,7 @@ class PerformanceSummary(generics.ListAPIView):
return Response(data=serialized_data)
@staticmethod
def _filter_out_retriggers(serialized_data: List[dict]) -> List[dict]:
def _filter_out_retriggers(serialized_data):
"""
Removes data points resulted from retriggers
"""
@ -889,7 +888,7 @@ class PerfCompareResults(generics.ListAPIView):
new_push = models.Push.objects.get(revision=new_rev, repository__name=new_repo_name)
except models.Push.DoesNotExist:
return Response(
"No new push with revision {} from repo {}.".format(new_rev, new_repo_name),
f"No new push with revision {new_rev} from repo {new_repo_name}.",
status=HTTP_400_BAD_REQUEST,
)
@ -910,7 +909,7 @@ class PerfCompareResults(generics.ListAPIView):
end_day = new_push.time
except models.Push.DoesNotExist:
return Response(
"No base push with revision {} from repo {}.".format(base_rev, base_repo_name),
f"No base push with revision {base_rev} from repo {base_repo_name}.",
status=HTTP_400_BAD_REQUEST,
)
@ -1179,7 +1178,7 @@ class PerfCompareResults(generics.ListAPIView):
)
@staticmethod
def _get_signatures_values(signatures: List[PerformanceSignature]):
def _get_signatures_values(signatures):
return signatures.values(
"framework_id",
"id",

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше