N802: Function name should be lowercase

This commit is contained in:
Yoann Schneider 2024-02-27 19:43:01 +01:00 коммит произвёл Sebastian Hengst
Родитель 547dad3e09
Коммит 0d8335d790
11 изменённых файлов: 80 добавлений и 80 удалений

Просмотреть файл

@ -39,8 +39,8 @@ select = [
"F",
# pyupgrade
"UP",
# pep-naming
"N806", "N803", "N801", "N815", "N811", "N818", "N804", "N813", "N816"
# pep8-naming
"N"
]
ignore = [

Просмотреть файл

@ -6,7 +6,7 @@ import responses
import slugid
from treeherder.etl.job_loader import JobLoader
from treeherder.etl.taskcluster_pulse.handler import handleMessage
from treeherder.etl.taskcluster_pulse.handler import handle_message
from treeherder.model.models import Job, JobLog, TaskclusterMetadata
from django.core.exceptions import ObjectDoesNotExist
@ -62,9 +62,9 @@ async def new_pulse_jobs(sample_data, test_repository, push_stored):
task_id = message["payload"]["status"]["taskId"]
task = tasks[task_id]
# If we pass task to handleMessage we won't hit the network
task_runs = await handleMessage(message, task)
# handleMessage returns [] when it is a task that is not meant for Treeherder
# If we pass task to handle_message we won't hit the network
task_runs = await handle_message(message, task)
# handle_message returns [] when it is a task that is not meant for Treeherder
for run in reversed(task_runs):
mock_artifact(task_id, run["retryId"], "public/logs/live_backing.log")
run["origin"]["project"] = test_repository.name

Просмотреть файл

@ -9,7 +9,7 @@ from treeherder.services.pulse.consumers import Consumers, JointConsumer, PulseC
from .utils import create_and_destroy_exchange
def test_Consumers():
def test_consumers():
class TestConsumer:
def prepare(self):
self.prepared = True
@ -30,7 +30,7 @@ def test_Consumers():
@pytest.mark.skipif(IS_WINDOWS, reason="celery does not work on windows")
def test_PulseConsumer(pulse_connection):
def test_pulse_consumer(pulse_connection):
class TestConsumer(PulseConsumer):
queue_suffix = "test"
@ -51,7 +51,7 @@ def test_PulseConsumer(pulse_connection):
cons.prepare()
def test_JointConsumer_on_message_do_not_call_classification_ingestion(monkeypatch):
def test_joint_consumer_on_message_do_not_call_classification_ingestion(monkeypatch):
mock_called = False
def mock_store_pulse_tasks_classification(args, queue):
@ -87,7 +87,7 @@ def test_JointConsumer_on_message_do_not_call_classification_ingestion(monkeypat
assert not mock_called
def test_JointConsumer_on_message_call_classification_ingestion(monkeypatch):
def test_joint_consumer_on_message_call_classification_ingestion(monkeypatch):
mock_called = False
def mock_store_pulse_tasks_classification(args, queue):

Просмотреть файл

@ -1,9 +1,9 @@
import pytest
from treeherder.utils.taskcluster_lib_scopes import patternMatch, satisfiesExpression
from treeherder.utils.taskcluster_lib_scopes import pattern_match, satisfies_expression
# satisfiesExpression()
# satisfies_expression()
@pytest.mark.parametrize(
"scopeset, expression",
[
@ -36,7 +36,7 @@ from treeherder.utils.taskcluster_lib_scopes import patternMatch, satisfiesExpre
],
)
def test_expression_is_satisfied(scopeset, expression):
assert satisfiesExpression(scopeset, expression) is True
assert satisfies_expression(scopeset, expression) is True
@pytest.mark.parametrize(
@ -58,7 +58,7 @@ def test_expression_is_satisfied(scopeset, expression):
],
)
def test_expression_is_not_satisfied(scopeset, expression):
assert not satisfiesExpression(scopeset, expression)
assert not satisfies_expression(scopeset, expression)
@pytest.mark.parametrize(
@ -72,19 +72,19 @@ def test_expression_is_not_satisfied(scopeset, expression):
)
def test_wrong_scopeset_type_raises_exception(scopeset):
with pytest.raises(TypeError):
satisfiesExpression(scopeset, "in-tree:hook-action:{hook_group_id}/{hook_id}")
satisfies_expression(scopeset, "in-tree:hook-action:{hook_group_id}/{hook_id}")
# patternMatch()
# pattern_match()
def test_identical_scope_and_pattern_are_matching():
assert patternMatch("mock:scope", "mock:scope") is True
assert pattern_match("mock:scope", "mock:scope") is True
@pytest.mark.parametrize(
"pattern, scope", [("matching*", "matching"), ("matching*", "matching/scope")]
)
def test_starred_patterns_are_matching(pattern, scope):
assert patternMatch(pattern, scope) is True
assert pattern_match(pattern, scope) is True
@pytest.mark.parametrize(
@ -92,4 +92,4 @@ def test_starred_patterns_are_matching(pattern, scope):
[("matching*", "mismatching"), ("match*ing", "matching"), ("*matching", "matching")],
)
def test_starred_patterns_dont_matching(pattern, scope):
assert not patternMatch(pattern, scope)
assert not pattern_match(pattern, scope)

Просмотреть файл

@ -92,7 +92,7 @@ def ingest_hg_push(options):
gecko_decision_task = get_decision_task_id(project, commit, repo.tc_root_url)
logger.info("## START ##")
loop = asyncio.get_event_loop()
loop.run_until_complete(processTasks(gecko_decision_task, repo.tc_root_url))
loop.run_until_complete(process_tasks(gecko_decision_task, repo.tc_root_url))
logger.info("## END ##")
else:
logger.info("You can ingest all tasks for a push with -a/--ingest-all-tasks.")
@ -120,7 +120,7 @@ async def ingest_task(task_id, root_url):
async with taskcluster.aio.createSession(connector=conn, timeout=timeout) as session:
async_queue = taskcluster.aio.Queue({"rootUrl": root_url}, session=session)
results = await asyncio.gather(async_queue.status(task_id), async_queue.task(task_id))
await handleTask(
await handle_task(
{
"status": results[0]["status"],
"task": results[1],
@ -129,7 +129,7 @@ async def ingest_task(task_id, root_url):
)
async def handleTask(task, root_url):
async def handle_task(task, root_url):
task_id = task["status"]["taskId"]
runs = task["status"]["runs"]
# If we iterate in order of the runs, we will not be able to mark older runs as
@ -160,7 +160,7 @@ async def handleTask(task, root_url):
await await_futures(job_futures)
async def fetchGroupTasks(task_group_id, root_url):
async def fetch_group_tasks(task_group_id, root_url):
tasks = []
query = {}
continuation_token = ""
@ -182,9 +182,9 @@ async def fetchGroupTasks(task_group_id, root_url):
return tasks
async def processTasks(task_group_id, root_url):
async def process_tasks(task_group_id, root_url):
try:
tasks = await fetchGroupTasks(task_group_id, root_url)
tasks = await fetch_group_tasks(task_group_id, root_url)
logger.info("We have %s tasks to process", len(tasks))
except Exception as e:
logger.exception(e)
@ -193,7 +193,7 @@ async def processTasks(task_group_id, root_url):
return
# Schedule and run tasks inside the thread pool executor
task_futures = [routine_to_future(handleTask, task, root_url) for task in tasks]
task_futures = [routine_to_future(handle_task, task, root_url) for task in tasks]
await await_futures(task_futures)

Просмотреть файл

@ -10,7 +10,7 @@ import taskcluster.aio
import taskcluster_urls
from treeherder.etl.schema import get_json_schema
from treeherder.etl.taskcluster_pulse.parse_route import parseRoute
from treeherder.etl.taskcluster_pulse.parse_route import parse_route
env = environ.Env()
logger = logging.getLogger(__name__)
@ -33,11 +33,11 @@ class PulseHandlerError(Exception):
pass
def stateFromRun(job_run):
def state_from_run(job_run):
return "completed" if job_run["state"] in ("exception", "failed") else job_run["state"]
def resultFromRun(job_run):
def result_from_run(job_run):
run_to_result = {
"completed": "success",
"failed": "fail",
@ -56,7 +56,7 @@ def resultFromRun(job_run):
# Creates a log entry for Treeherder to retrieve and parse. This log is
# displayed on the Treeherder Log Viewer once parsed.
def createLogReference(root_url, task_id, run_id):
def create_log_reference(root_url, task_id, run_id):
log_url = taskcluster_urls.api(
root_url, "queue", "v1", "task/{taskId}/runs/{runId}/artifacts/public/logs/live_backing.log"
).format(taskId=task_id, runId=run_id)
@ -70,7 +70,7 @@ def createLogReference(root_url, task_id, run_id):
# the route is parsed into distinct parts used for constructing the
# Treeherder job message.
# TODO: Refactor https://bugzilla.mozilla.org/show_bug.cgi?id=1560596
def parseRouteInfo(prefix, task_id, routes, task):
def parse_route_info(prefix, task_id, routes, task):
matching_routes = list(filter(lambda route: route.split(".")[0] == "tc-treeherder", routes))
if len(matching_routes) != 1:
@ -80,12 +80,12 @@ def parseRouteInfo(prefix, task_id, routes, task):
+ f"Task ID: {task_id} Routes: {routes}"
)
parsed_route = parseRoute(matching_routes[0])
parsed_route = parse_route(matching_routes[0])
return parsed_route
def validateTask(task):
def validate_task(task):
treeherder_metadata = task.get("extra", {}).get("treeherder")
if not treeherder_metadata:
logger.debug("Task metadata is missing Treeherder job configuration.")
@ -166,7 +166,7 @@ def ignore_task(task, task_id, root_url, project):
# Only messages that contain the properly formatted routing key and contains
# treeherder job information in task.extra.treeherder are accepted
# This will generate a list of messages that need to be ingested by Treeherder
async def handleMessage(message, task_definition=None):
async def handle_message(message, task_definition=None):
async with taskcluster.aio.createSession() as session:
jobs = []
task_id = message["payload"]["status"]["taskId"]
@ -174,7 +174,7 @@ async def handleMessage(message, task_definition=None):
task = (await async_queue.task(task_id)) if not task_definition else task_definition
try:
parsed_route = parseRouteInfo("tc-treeherder", task_id, task["routes"], task)
parsed_route = parse_route_info("tc-treeherder", task_id, task["routes"], task)
except PulseHandlerError as e:
logger.debug("%s", str(e))
return jobs
@ -185,7 +185,7 @@ async def handleMessage(message, task_definition=None):
logger.debug("Message received for task %s", task_id)
# Validation failures are common and logged, so do nothing more.
if not validateTask(task):
if not validate_task(task):
return jobs
task_type = EXCHANGE_EVENT_MAP.get(message["exchange"])
@ -196,18 +196,18 @@ async def handleMessage(message, task_definition=None):
# This will only work if the previous run has not yet been processed by Treeherder
# since _remove_existing_jobs() will prevent it
if message["payload"]["runId"] > 0:
jobs.append(await handleTaskRerun(parsed_route, task, message, session))
jobs.append(await handle_task_rerun(parsed_route, task, message, session))
if not task_type:
raise Exception("Unknown exchange: {exchange}".format(exchange=message["exchange"]))
elif task_type == "pending":
jobs.append(handleTaskPending(parsed_route, task, message))
jobs.append(handle_task_pending(parsed_route, task, message))
elif task_type == "running":
jobs.append(handleTaskRunning(parsed_route, task, message))
jobs.append(handle_task_running(parsed_route, task, message))
elif task_type in ("completed", "failed"):
jobs.append(await handleTaskCompleted(parsed_route, task, message, session))
jobs.append(await handle_task_completed(parsed_route, task, message, session))
elif task_type == "exception":
jobs.append(await handleTaskException(parsed_route, task, message, session))
jobs.append(await handle_task_exception(parsed_route, task, message, session))
return jobs
@ -217,7 +217,7 @@ async def handleMessage(message, task_definition=None):
#
# Specific handlers for each message type will add/remove information necessary
# for the type of task event..
def buildMessage(push_info, task, run_id, payload):
def build_message(push_info, task, run_id, payload):
task_id = payload["status"]["taskId"]
job_run = payload["status"]["runs"][run_id]
treeherder_config = task["extra"]["treeherder"]
@ -236,8 +236,8 @@ def buildMessage(push_info, task, run_id, payload):
# Maximum job name length is 140 chars...
"jobName": task["metadata"]["name"][0:139],
},
"state": stateFromRun(job_run),
"result": resultFromRun(job_run),
"state": state_from_run(job_run),
"result": result_from_run(job_run),
"tier": treeherder_config.get("tier", 1),
"timeScheduled": task["created"],
"jobKind": treeherder_config.get("jobKind", "other"),
@ -289,50 +289,50 @@ def buildMessage(push_info, task, run_id, payload):
return job
def handleTaskPending(push_info, task, message):
def handle_task_pending(push_info, task, message):
payload = message["payload"]
return buildMessage(push_info, task, payload["runId"], payload)
return build_message(push_info, task, payload["runId"], payload)
async def handleTaskRerun(push_info, task, message, session):
async def handle_task_rerun(push_info, task, message, session):
payload = message["payload"]
job = buildMessage(push_info, task, payload["runId"] - 1, payload)
job = build_message(push_info, task, payload["runId"] - 1, payload)
job["state"] = "completed"
job["result"] = "fail"
job["isRetried"] = True
# reruns often have no logs, so in the interest of not linking to a 404'ing artifact,
# don't include a link
job["logs"] = []
job = await addArtifactUploadedLinks(
job = await add_artifact_uploaded_links(
message["root_url"], payload["status"]["taskId"], payload["runId"] - 1, job, session
)
return job
def handleTaskRunning(push_info, task, message):
def handle_task_running(push_info, task, message):
payload = message["payload"]
job = buildMessage(push_info, task, payload["runId"], payload)
job = build_message(push_info, task, payload["runId"], payload)
job["timeStarted"] = payload["status"]["runs"][payload["runId"]]["started"]
return job
async def handleTaskCompleted(push_info, task, message, session):
async def handle_task_completed(push_info, task, message, session):
payload = message["payload"]
job_run = payload["status"]["runs"][payload["runId"]]
job = buildMessage(push_info, task, payload["runId"], payload)
job = build_message(push_info, task, payload["runId"], payload)
job["timeStarted"] = job_run["started"]
job["timeCompleted"] = job_run["resolved"]
job["logs"] = [
createLogReference(message["root_url"], payload["status"]["taskId"], job_run["runId"]),
create_log_reference(message["root_url"], payload["status"]["taskId"], job_run["runId"]),
]
job = await addArtifactUploadedLinks(
job = await add_artifact_uploaded_links(
message["root_url"], payload["status"]["taskId"], payload["runId"], job, session
)
return job
async def handleTaskException(push_info, task, message, session):
async def handle_task_exception(push_info, task, message, session):
payload = message["payload"]
job_run = payload["status"]["runs"][payload["runId"]]
# Do not report runs that were created as an exception. Such cases
@ -340,7 +340,7 @@ async def handleTaskException(push_info, task, message, session):
if job_run["reasonCreated"] == "exception":
return
job = buildMessage(push_info, task, payload["runId"], payload)
job = build_message(push_info, task, payload["runId"], payload)
# Jobs that get cancelled before running don't have a started time
if job_run.get("started"):
job["timeStarted"] = job_run["started"]
@ -348,13 +348,13 @@ async def handleTaskException(push_info, task, message, session):
# exceptions generally have no logs, so in the interest of not linking to a 404'ing artifact,
# don't include a link
job["logs"] = []
job = await addArtifactUploadedLinks(
job = await add_artifact_uploaded_links(
message["root_url"], payload["status"]["taskId"], payload["runId"], job, session
)
return job
async def fetchArtifacts(root_url, task_id, run_id, session):
async def fetch_artifacts(root_url, task_id, run_id, session):
async_queue = taskcluster.aio.Queue({"rootUrl": root_url}, session=session)
res = await async_queue.listArtifacts(task_id, run_id)
artifacts = res["artifacts"]
@ -378,10 +378,10 @@ async def fetchArtifacts(root_url, task_id, run_id, session):
# fetch them in order to determine if there is an error_summary log;
# TODO refactor this when there is a way to only retrieve the error_summary
# artifact: https://bugzilla.mozilla.org/show_bug.cgi?id=1629716
async def addArtifactUploadedLinks(root_url, task_id, run_id, job, session):
async def add_artifact_uploaded_links(root_url, task_id, run_id, job, session):
artifacts = []
try:
artifacts = await fetchArtifacts(root_url, task_id, run_id, session)
artifacts = await fetch_artifacts(root_url, task_id, run_id, session)
except Exception:
logger.debug("Artifacts could not be found for task: %s run: %s", task_id, run_id)
return job

Просмотреть файл

@ -11,7 +11,7 @@
# Note: pushes on a branch on Github would not have a PR ID
# Function extracted from
# https://github.com/taskcluster/taskcluster/blob/32629c562f8d6f5a6b608a3141a8ee2e0984619f/services/treeherder/src/util/route_parser.js
def parseRoute(route):
def parse_route(route):
id = None
owner = None
parsed_project = None

Просмотреть файл

@ -8,7 +8,7 @@ import newrelic.agent
from treeherder.etl.classification_loader import ClassificationLoader
from treeherder.etl.job_loader import JobLoader
from treeherder.etl.push_loader import PushLoader
from treeherder.etl.taskcluster_pulse.handler import handleMessage
from treeherder.etl.taskcluster_pulse.handler import handle_message
from treeherder.workers.task import retryable_task
# NOTE: default values for root_url parameters can be removed once all tasks that lack
@ -25,9 +25,9 @@ def store_pulse_tasks(
loop = asyncio.get_event_loop()
newrelic.agent.add_custom_attribute("exchange", exchange)
newrelic.agent.add_custom_attribute("routing_key", routing_key)
# handleMessage expects messages in this format
# handle_message expects messages in this format
runs = loop.run_until_complete(
handleMessage(
handle_message(
{
"exchange": exchange,
"payload": pulse_job,

Просмотреть файл

@ -18,7 +18,7 @@ logger = logging.getLogger(__name__)
class RemovalStrategy(ABC):
@property
@abstractmethod
def CYCLE_INTERVAL(self) -> int:
def cycle_interval(self) -> int:
"""
expressed in days
"""
@ -26,7 +26,7 @@ class RemovalStrategy(ABC):
@has_valid_explicit_days
def __init__(self, chunk_size: int, days: int = None):
days = days or self.CYCLE_INTERVAL
days = days or self.cycle_interval
self._cycle_interval = timedelta(days=days)
self._chunk_size = chunk_size
@ -65,7 +65,7 @@ class MainRemovalStrategy(RemovalStrategy):
"""
@property
def CYCLE_INTERVAL(self) -> int:
def cycle_interval(self) -> int:
# WARNING!! Don't override this without proper approval!
return 365 # days #
########################################################
@ -127,7 +127,7 @@ class TryDataRemoval(RemovalStrategy):
SIGNATURE_BULK_SIZE = 10
@property
def CYCLE_INTERVAL(self) -> int:
def cycle_interval(self) -> int:
# WARNING!! Don't override this without proper approval!
return 42 # days #
########################################################
@ -246,7 +246,7 @@ class IrrelevantDataRemoval(RemovalStrategy):
]
@property
def CYCLE_INTERVAL(self) -> int:
def cycle_interval(self) -> int:
# WARNING!! Don't override this without proper approval!
return 180 # days #
########################################################
@ -340,7 +340,7 @@ class StalledDataRemoval(RemovalStrategy):
"""
@property
def CYCLE_INTERVAL(self) -> int:
def cycle_interval(self) -> int:
# WARNING!! Don't override this without proper approval!
return 120 # days #
########################################################

Просмотреть файл

@ -7,7 +7,7 @@ import jsone
import taskcluster
from django.conf import settings
from treeherder.utils.taskcluster_lib_scopes import satisfiesExpression
from treeherder.utils.taskcluster_lib_scopes import satisfies_expression
logger = logging.getLogger(__name__)
@ -117,7 +117,7 @@ class TaskclusterModelImpl(TaskclusterModel):
expansion = self.auth.expandScopes({"scopes": decision_task["scopes"]})
expression = f"in-tree:hook-action:{hook_group_id}/{hook_id}"
if not satisfiesExpression(expansion["scopes"], expression):
if not satisfies_expression(expansion["scopes"], expression):
raise RuntimeError(
f"Action is misconfigured: decision task's scopes do not satisfy {expression}"
)

Просмотреть файл

@ -4,25 +4,25 @@ TODO: Extract this module into a dedicated PyPI package, acting as the
"""
def satisfiesExpression(scopeset, expression):
def satisfies_expression(scopeset, expression):
if not isinstance(scopeset, list):
raise TypeError("Scopeset must be an array.")
def isSatisfied(expr):
def is_satisfied(expr):
if isinstance(expr, str):
return any([patternMatch(s, expr) for s in scopeset])
return any([pattern_match(s, expr) for s in scopeset])
return (
"AllOf" in expr
and all([isSatisfied(e) for e in expr["AllOf"]])
and all([is_satisfied(e) for e in expr["AllOf"]])
or "AnyOf" in expr
and any([isSatisfied(e) for e in expr["AnyOf"]])
and any([is_satisfied(e) for e in expr["AnyOf"]])
)
return isSatisfied(expression)
return is_satisfied(expression)
def patternMatch(pattern: str, scope):
def pattern_match(pattern: str, scope):
if scope == pattern:
return True