Refactored logging statements to use lazy % formatting (#3335)

This commit is contained in:
Joshua Hassan 2023-03-09 11:58:37 +01:00 коммит произвёл GitHub
Родитель 21a77f4e8d
Коммит a8bacbe04d
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
22 изменённых файлов: 113 добавлений и 87 удалений

Просмотреть файл

@ -50,7 +50,7 @@ def is_different_schema(path):
r = requests.get(url)
if not r.ok:
logger.info(f"Version file is not yet available to download for {path}")
logger.info("Version file is not yet available to download for %s", path)
return True
prev_version = int(r.text)
@ -67,7 +67,7 @@ def download_support_file(path, file_name, extract=True):
url = urljoin(DATABASES[path]["url"], file_name)
path = os.path.join(os.path.dirname(path), file_name)
logger.info(f"Downloading {url} to {path}")
logger.info("Downloading %s to %s", url, path)
updated = utils.download_check_etag(url, path)
if extract and updated and path.endswith(".zst"):
@ -77,7 +77,7 @@ def download_support_file(path, file_name, extract=True):
return True
except requests.exceptions.HTTPError:
logger.info(
f"{file_name} is not yet available to download for {path}", exc_info=True
"%s is not yet available to download for %s", file_name, path, exc_info=True
)
return False
@ -92,7 +92,7 @@ def download(path, support_files_too=False, extract=True):
url = DATABASES[path]["url"]
try:
logger.info(f"Downloading {url} to {zst_path}")
logger.info("Downloading %s to %s", url, zst_path)
updated = utils.download_check_etag(url, zst_path)
if extract and updated:
@ -106,7 +106,7 @@ def download(path, support_files_too=False, extract=True):
return successful
except requests.exceptions.HTTPError:
logger.info(f"{url} is not yet available to download", exc_info=True)
logger.info("%s is not yet available to download", url, exc_info=True)
return False

Просмотреть файл

@ -58,7 +58,7 @@ class Github:
def fetch_events(self, events_url: str) -> list:
self.api_limit()
logger.info(f"Fetching {events_url}")
logger.info("Fetching %s", events_url)
headers = {"Authorization": "token {}".format(self.get_token())}
response = get_session("github").get(events_url, headers=headers)
response.raise_for_status()
@ -78,7 +78,7 @@ class Github:
if isinstance(data, dict):
data = [data]
logger.info(f"Fetching {url}")
logger.info("Fetching %s", url)
if retrieve_events:
for item in data:

Просмотреть файл

@ -92,11 +92,11 @@ class FixTimeModel(BugModel):
quantiles = _quantiles(2)
logger.info(
f"Max fix time: {max(fix_time for bug_id, fix_time in bug_fix_times)}"
"Max fix time: %s", max(fix_time for bug_id, fix_time in bug_fix_times)
)
logger.info(f"Fix time quantiles: {quantiles}")
logger.info(f"Fix time quartiles: {_quantiles(4)}")
logger.info(f"Fix time deciles: {_quantiles(10)}")
logger.info("Fix time quantiles: %s", quantiles)
logger.info("Fix time quartiles: %s", _quantiles(4))
logger.info("Fix time deciles: %s", _quantiles(10))
classes = {}
for bug_id, fix_time in bug_fix_times:
@ -110,7 +110,9 @@ class FixTimeModel(BugModel):
for i in range(len(quantiles) + 1):
logger.info(
f"{sum(1 for label in classes.values() if label == i)} bugs are in the {i}th quantile"
"%d bugs are in the %dth quantile",
sum(1 for label in classes.values() if label == i),
i,
)
return classes, list(range(len(quantiles) + 1))

Просмотреть файл

@ -963,7 +963,7 @@ def _hg_log(revs: list[bytes], branch: str = "tip") -> tuple[Commit, ...]:
def get_revs(hg, rev_start=0, rev_end="tip"):
logger.info(f"Getting revs from {rev_start} to {rev_end}...")
logger.info("Getting revs from %s to %s...", rev_start, rev_end)
args = hglib.util.cmdbuilder(
b"log",
@ -1021,7 +1021,7 @@ class Experiences:
def calculate_experiences(
commits: Collection[Commit], first_pushdate: datetime, save: bool = True
) -> None:
logger.info(f"Analyzing seniorities from {len(commits)} commits...")
logger.info("Analyzing seniorities from %d commits...", len(commits))
experiences = Experiences(save)
@ -1034,7 +1034,7 @@ def calculate_experiences(
time_lapse = commit.pushdate - experiences[key]
commit.seniority_author = time_lapse.total_seconds()
logger.info(f"Analyzing experiences from {len(commits)} commits...")
logger.info("Analyzing experiences from %d commits...", len(commits))
# Note: In the case of files, directories, components, we can't just use the sum of previous commits, as we could end
# up overcounting them. For example, consider a commit A which modifies "dir1" and "dir2", a commit B which modifies
@ -1365,10 +1365,10 @@ def download_commits(
first_pushdate = get_first_pushdate(repo_dir)
logger.info(f"Mining {len(revs)} commits...")
logger.info("Mining %d commits...", len(revs))
if not use_single_process:
logger.info(f"Using {os.cpu_count()} processes...")
logger.info("Using %d processes...", os.cpu_count())
commits = hg_log_multi(repo_dir, revs, branch)
else:
commits = hg_log(hg, revs, branch)
@ -1377,7 +1377,7 @@ def download_commits(
commits_num = len(commits)
logger.info(f"Mining {commits_num} patches...")
logger.info("Mining %d patches...", commits_num)
global code_analysis_server
@ -1485,9 +1485,9 @@ def clone(
# Pull, to make sure the pushlog is generated.
with hglib.open(repo_dir) as hg:
logger.info(f"Pulling {repo_dir}")
logger.info("Pulling %s", repo_dir)
hg.pull(update=update)
logger.info(f"{repo_dir} pulled")
logger.info("%s pulled", repo_dir)
return
except hglib.error.ServerError as e:
@ -1506,7 +1506,7 @@ def clone(
)
subprocess.run(cmd, check=True)
logger.info(f"{repo_dir} cloned")
logger.info("%s cloned", repo_dir)
def pull(repo_dir: str, branch: str, revision: str) -> None:

Просмотреть файл

@ -66,7 +66,7 @@ def download_similarity_model(model_name):
path = f"{model_name_to_class[model_name].__name__.lower()}.similaritymodel"
url = f"https://community-tc.services.mozilla.com/api/index/v1/task/project.bugbug.train_similarity.latest/artifacts/public/{path}.zst"
logger.info(f"Downloading similarity model from {url}...")
logger.info("Downloading similarity model from %s...", url)
updated = download_check_etag(url)
if updated:
zstd_decompress(path)

Просмотреть файл

@ -207,7 +207,7 @@ def get_push_data(
push_data_queue.append(elem)
logger.info(f"push data nodes: {push_data_count}")
logger.info("Push data nodes: %d", push_data_count)
push_data = [
(
@ -235,7 +235,7 @@ def get_push_data(
tuple(all_groups_set), cast(Set[Runnable], all_groups_set), "group"
)
all_groups_set = set(all_groups)
logger.info(f"{len(all_groups_set)} manifests run in the last 28 pushes")
logger.info("%d manifests run in the last 28 pushes", len(all_groups_set))
all_runnables_set = set(
sum((list(push_runnables) for _, _, push_runnables, _, _ in push_data), [])
@ -245,7 +245,7 @@ def get_push_data(
tuple(all_runnables_set), all_runnables_set, granularity
)
all_runnables_set = set(all_runnables)
logger.info(f"{len(all_runnables_set)} runnables run in the last 28 pushes")
logger.info("%d runnables run in the last 28 pushes", len(all_runnables_set))
def push_data_iter() -> Iterator[PushResult]:
return (
@ -447,7 +447,7 @@ def generate_failing_together_probabilities(
stats[couple] = (support, confidence)
logger.info(f"{skipped} couples skipped because their support was too low")
logger.info("%d couples skipped because their support was too low", skipped)
logger.info("Redundancies with the highest support and confidence:")
for couple, (support, confidence) in sorted(
@ -456,7 +456,13 @@ def generate_failing_together_probabilities(
failure_count = count_both_failures[couple]
run_count = count_runs[couple]
logger.info(
f"{couple[0]} - {couple[1]} redundancy confidence {confidence}, support {support} ({failure_count} over {run_count})."
"%s - %s redundancy confidence %f, support %d (%d over %d).",
couple[0],
couple[1],
confidence,
support,
failure_count,
run_count,
)
logger.info("Redundancies with the highest confidence and lowest support:")
@ -466,7 +472,13 @@ def generate_failing_together_probabilities(
failure_count = count_both_failures[couple]
run_count = count_runs[couple]
logger.info(
f"{couple[0]} - {couple[1]} redundancy confidence {confidence}, support {support} ({failure_count} over {run_count})."
"%s - %s redundancy confidence %f, support %d (%d over %d).",
couple[0],
couple[1],
confidence,
support,
failure_count,
run_count,
)
failing_together: dict = {}
@ -515,7 +527,7 @@ def generate_failing_together_probabilities(
failing_together[couple[0]][couple[1]] = (support, confidence)
for percentage, count in count_redundancies.most_common():
logger.info(f"{count} with {percentage} confidence")
logger.info("%d with %f%% confidence", count, percentage)
failing_together_db = get_failing_together_db(granularity, False)

Просмотреть файл

@ -224,7 +224,7 @@ def download_model(model_name: str) -> str:
path = f"{model_name}model"
url = f"https://community-tc.services.mozilla.com/api/index/v1/task/project.bugbug.train_{model_name}.{version}/artifacts/public/{path}.zst"
logger.info(f"Downloading {url}...")
logger.info("Downloading %s...", url)
updated = download_check_etag(url)
if updated:
zstd_decompress(path)

Просмотреть файл

@ -21,7 +21,7 @@ logger = logging.getLogger(__name__)
def boot_worker() -> None:
# Clone autoland
def clone_autoland() -> None:
logger.info(f"Cloning autoland in {REPO_DIR}...")
logger.info("Cloning autoland in %s...", REPO_DIR)
repository.clone(REPO_DIR, "https://hg.mozilla.org/integration/autoland")
def extract_past_failures_label() -> None:
@ -118,7 +118,7 @@ def boot_worker() -> None:
for push_id, push_obj in r.json()["pushes"].items()
]
logger.info(f"Retrieving known tasks from {revs}")
logger.info("Retrieving known tasks from %s...", revs)
# Store in a file the list of tasks in the latest autoland pushes.
# We use more than one to protect ourselves from broken decision tasks.
@ -130,7 +130,7 @@ def boot_worker() -> None:
if r.ok:
known_tasks.update(r.json())
logger.info(f"Retrieved {len(known_tasks)} tasks")
logger.info("Retrieved %d tasks", len(known_tasks))
assert len(known_tasks) > 0
@ -198,7 +198,7 @@ def boot_worker() -> None:
logger.info("Touched together DB updated.")
except Exception as e:
# It's not ideal, but better not to crash the service!
logger.error(f"Exception while updating commits DB: {e}")
logger.error("Exception while updating commits DB: %s", e)
# Wait list of schedulable tasks to be downloaded and written to disk.
retrieve_schedulable_tasks_future.result()

Просмотреть файл

@ -181,7 +181,7 @@ def schedule_tests(branch: str, rev: str) -> str:
from bugbug_http.app import JobInfo
job = JobInfo(schedule_tests, branch, rev)
LOGGER.info(f"Processing {job}...")
LOGGER.info("Processing %s...", job)
# Pull the revision to the local repository
LOGGER.info("Pulling commits from the remote repository...")
@ -256,7 +256,7 @@ def get_config_specific_groups(config: str) -> str:
from bugbug_http.app import JobInfo
job = JobInfo(get_config_specific_groups, config)
LOGGER.info(f"Processing {job}...")
LOGGER.info("Processing %s...", job)
equivalence_sets = testselect._get_equivalence_sets(0.9)

Просмотреть файл

@ -29,7 +29,7 @@ def classify_bugs(model_name: str, classifier: str, bug_id: int) -> None:
model_file_name = f"{model_name}model"
if not os.path.exists(model_file_name):
logger.info(f"{model_file_name} does not exist. Downloading the model....")
logger.info("%s does not exist. Downloading the model....", model_file_name)
try:
download_model(model_name)
except requests.HTTPError:

Просмотреть файл

@ -42,7 +42,7 @@ class Retriever(object):
else:
changed_ids = set()
logger.info(f"Retrieved {len(changed_ids)} IDs.")
logger.info("Retrieved %d IDs.", len(changed_ids))
all_components = bugzilla.get_product_component_count(9999)
@ -60,17 +60,17 @@ class Retriever(object):
two_years_and_six_months_ago = datetime.utcnow() - relativedelta(
years=2, months=6
)
logger.info(f"Retrieving bug IDs since {two_years_and_six_months_ago}")
logger.info("Retrieving bug IDs since %s", two_years_and_six_months_ago)
timespan_ids = bugzilla.get_ids_between(two_years_and_six_months_ago)
if limit:
timespan_ids = timespan_ids[-limit:]
logger.info(f"Retrieved {len(timespan_ids)} IDs.")
logger.info("Retrieved %d IDs.", len(timespan_ids))
# Get IDs of labelled bugs.
labelled_bug_ids = labels.get_all_bug_ids()
if limit:
labelled_bug_ids = labelled_bug_ids[-limit:]
logger.info(f"{len(labelled_bug_ids)} labelled bugs to download.")
logger.info("%d labelled bugs to download.", len(labelled_bug_ids))
# Get the commits DB, as we need it to get the bug IDs linked to recent commits.
# XXX: Temporarily avoid downloading the commits DB when a limit is set, to avoid the integration test fail when the commits DB is bumped.
@ -89,7 +89,7 @@ class Retriever(object):
)
if limit:
commit_bug_ids = commit_bug_ids[-limit:]
logger.info(f"{len(commit_bug_ids)} bugs linked to commits to download.")
logger.info("%d bugs linked to commits to download.", len(commit_bug_ids))
# Get IDs of bugs which are regressions, bugs which caused regressions (useful for the regressor model),
# and blocked bugs.
@ -119,7 +119,7 @@ class Retriever(object):
]
if limit:
test_failure_bug_ids = test_failure_bug_ids[-limit:]
logger.info(f"{len(test_failure_bug_ids)} bugs about test failures.")
logger.info("%d bugs about test failures.", len(test_failure_bug_ids))
all_ids = (
timespan_ids

Просмотреть файл

@ -207,7 +207,7 @@ class CommitClassifier(object):
assert self.testfailure_model is not None
def clone_git_repo(self, repo_url, repo_dir, rev="origin/branches/default/tip"):
logger.info(f"Cloning {repo_url}...")
logger.info("Cloning %s...", repo_url)
if not os.path.exists(repo_dir):
tenacity.retry(
@ -306,7 +306,7 @@ class CommitClassifier(object):
if hg_base:
hg.update(rev=hg_base, clean=True)
logger.info(f"Updated repo to {hg_base}")
logger.info("Updated repo to %s", hg_base)
if self.git_repo_dir and hg_base != "tip":
try:
@ -318,16 +318,18 @@ class CommitClassifier(object):
check=True,
cwd=self.git_repo_dir,
)
logger.info(f"Updated git repo to {self.git_base}")
logger.info("Updated git repo to %s", self.git_base)
except Exception as e:
logger.info(f"Updating git repo to Mercurial {hg_base} failed: {e}")
logger.info(
"Updating git repo to Mercurial %s failed: %s", hg_base, e
)
def load_user(phid):
if phid.startswith("PHID-USER"):
return phabricator_api.load_user(user_phid=phid)
elif phid.startswith("PHID-PROJ"):
# TODO: Support group reviewers somehow.
logger.info(f"Skipping group reviewer {phid}")
logger.info("Skipping group reviewer %s", phid)
else:
raise Exception(f"Unsupported reviewer {phid}")
@ -453,25 +455,30 @@ class CommitClassifier(object):
logger.info("Feature: {}".format(name))
logger.info("Shap value: {}{}".format("+" if (is_positive) else "-", val))
logger.info(f"spearman: {spearman}")
logger.info(f"value: {value}")
logger.info(f"overall mean: {np.mean(X)}")
logger.info(f"overall median: {np.median(X)}")
logger.info(f"mean for y == 0: {np.mean(clean_X)}")
logger.info(f"mean for y == 1: {np.mean(buggy_X)}")
logger.info(f"median for y == 0: {np.median(clean_X)}")
logger.info(f"median for y == 1: {np.median(buggy_X)}")
logger.info("spearman: %f", spearman)
logger.info("value: %f", value)
logger.info("overall mean: %f", np.mean(X))
logger.info("overall median: %f", np.median(X))
logger.info("mean for y == 0: %f", np.mean(clean_X))
logger.info("mean for y == 1: %f", np.mean(buggy_X))
logger.info("median for y == 0: %f", np.median(clean_X))
logger.info("median for y == 1: %f", np.median(buggy_X))
logger.info(
f"perc_buggy_values_higher_than_median: {perc_buggy_values_higher_than_median}"
"perc_buggy_values_higher_than_median: %f",
perc_buggy_values_higher_than_median,
)
logger.info(
f"perc_buggy_values_lower_than_median: {perc_buggy_values_lower_than_median}"
"perc_buggy_values_lower_than_median: %f",
perc_buggy_values_lower_than_median,
)
logger.info(
f"perc_clean_values_higher_than_median: {perc_clean_values_higher_than_median}"
"perc_clean_values_higher_than_median: %f",
perc_clean_values_higher_than_median,
)
logger.info(
f"perc_clean_values_lower_than_median: {perc_clean_values_lower_than_median}"
"perc_clean_values_lower_than_median: %f",
perc_clean_values_lower_than_median,
)
features.append(
@ -651,7 +658,7 @@ class CommitClassifier(object):
commits[-1], probabilities=True
)
logger.info(f"Test failure risk: {testfailure_probs[0][1]}")
logger.info("Test failure risk: %f", testfailure_probs[0][1])
if not runnable_jobs_path:
runnable_jobs = {}

Просмотреть файл

@ -363,7 +363,7 @@ class LandingsRiskReportGenerator(object):
last_commit_by_bug[commit["bug_id"]] = push_date
logger.info(f"Retrieving bug IDs since {days} days ago")
logger.info("Retrieving bug IDs since %d days ago", days)
timespan_ids = bugzilla.get_ids_between(since, resolution=["---", "FIXED"])
return list(set(commit["bug_id"] for commit in commits) | set(timespan_ids))
@ -845,7 +845,7 @@ class LandingsRiskReportGenerator(object):
meta_bugs = self.get_meta_bugs(days)
last_modified = db.last_modified(bugzilla.BUGS_DB)
logger.info(f"Deleting bugs modified since the last run on {last_modified}")
logger.info("Deleting bugs modified since the last run on %s", last_modified)
changed_ids = bugzilla.get_ids(
{"f1": "delta_ts", "o1": "greaterthaneq", "v1": last_modified.date()}
)
@ -893,7 +893,7 @@ class LandingsRiskReportGenerator(object):
logger.info("Download bugs of interest...")
bugzilla.download_bugs(all_ids)
logger.info(f"{len(bugs)} bugs to analyze.")
logger.info("%d bugs to analyze.", len(bugs))
bugs_set = set(bugs + test_info_bugs + meta_bugs)
@ -1839,7 +1839,7 @@ Report bugs or enhancement requests on [https://github.com/mozilla/bugbug](https
receivers = team_to_receivers[team]
logger.info(f"Sending email to {team}")
logger.info("Sending email to %s", team)
from_email = sendgrid.helpers.mail.From(get_secret("NOTIFICATION_SENDER"))
to_emails = [sendgrid.helpers.mail.To(receivers[0])] + [
sendgrid.helpers.mail.Cc(receiver) for receiver in receivers[1:]
@ -1856,9 +1856,9 @@ Report bugs or enhancement requests on [https://github.com/mozilla/bugbug](https
from_email, to_emails, subject, plain_text_content, html_content
)
response = send_grid_client.send(message=message)
logger.info(f"Status code: {response.status_code}")
logger.info(f"Headers: {response.headers}")
logger.info(f"Body: {response.body}")
logger.info("Status code: %s", response.status_code)
logger.info("Headers: %s", response.headers)
logger.info("Body: %s", response.body)
except Exception:
traceback.print_exc()
failure = True

Просмотреть файл

@ -22,7 +22,7 @@ def classify_issues(
model_file_name = f"{model_name}model"
if not os.path.exists(model_file_name):
logger.info(f"{model_file_name} does not exist. Downloading the model....")
logger.info("%s does not exist. Downloading the model....", model_file_name)
try:
download_model(model_name)
except requests.HTTPError:

Просмотреть файл

@ -41,7 +41,7 @@ class PastBugsCollector(object):
if bug_fixing_commit["type"] in ("d", "r")
)
logger.info(f"{len(bug_fixing_commits_nodes)} bug-fixing commits to analyze")
logger.info("%d bug-fixing commits to analyze", len(bug_fixing_commits_nodes))
all_bug_ids = set(commit["bug_id"] for commit in repository.get_commits())

Просмотреть файл

@ -535,7 +535,7 @@ def evaluate(bug_introducing_commits):
for bug in tqdm(bugzilla.get_bugs()):
if bug["regressed_by"]:
known_regressors[bug["id"]] = bug["regressed_by"]
logger.info(f"Loaded {len(known_regressors)} known regressors")
logger.info("Loaded %d known regressors", len(known_regressors))
fix_to_regressors_map = defaultdict(list)
for bug_introducing_commit in bug_introducing_commits:
@ -546,9 +546,10 @@ def evaluate(bug_introducing_commits):
bug_introducing_commit["bug_introducing_rev"]
)
logger.info(f"{len(fix_to_regressors_map)} fixes linked to regressors")
logger.info("%d fixes linked to regressors", len(fix_to_regressors_map))
logger.info(
f"{sum(len(regressors) for regressors in fix_to_regressors_map.values())} regressors linked to fixes"
"%d regressors linked to fixes",
sum(len(regressors) for regressors in fix_to_regressors_map.values()),
)
logger.info("Measuring how many known regressors SZZ was able to find correctly...")
@ -609,7 +610,7 @@ def evaluate(bug_introducing_commits):
logger.info(
f"Perfectly found {perfect_regressors} regressors out of {all_regressors}"
)
logger.info(f"Found {found_regressors} regressors out of {all_regressors}")
logger.info("Found %d regressors out of %d", found_regressors, all_regressors)
logger.info(
f"Misassigned {misassigned_regressors} regressors out of {all_regressors}"
)

Просмотреть файл

@ -26,7 +26,7 @@ logging.basicConfig(level=logging.INFO)
def get_task_metrics_from_uri(index_uri):
index_url = BASE_URL.format(index_uri)
LOGGER.info(f"Retrieving metrics from {index_url}")
LOGGER.info("Retrieving metrics from %s", index_url)
r = requests.get(index_url)
if r.status_code == 404:

Просмотреть файл

@ -74,7 +74,7 @@ def go(months: int) -> None:
)
pushes = [push for push in pushes if push.rev not in known_scheduler_stats]
logger.info(f"{len(pushes)} left to analyze")
logger.info("%d left to analyze", len(pushes))
def compress_and_upload() -> None:
utils.zstd_compress(SHADOW_SCHEDULER_STATS_DB)

Просмотреть файл

@ -37,7 +37,7 @@ def main(args):
model_file_name = f"{similarity.model_name_to_class[args.algorithm].__name__.lower()}.similaritymodel"
if not os.path.exists(model_file_name):
logger.info(f"{model_file_name} does not exist. Downloading the model....")
logger.info("%s does not exist. Downloading the model....", model_file_name)
try:
download_check_etag(URL.format(model_file_name))
except requests.HTTPError:

Просмотреть файл

@ -88,7 +88,9 @@ class Retriever(object):
else:
num_errors += 1
else:
logger.info(f"Analyzing {push.rev} at the {granularity} level...")
logger.info(
"Analyzing %s at the %s level...", push.rev, granularity
)
key = cache_key(push)
@ -129,8 +131,10 @@ class Retriever(object):
progress_bar.update(1)
logger.info(f"{num_cached} pushes were already cached out of {num_pushes}")
logger.info(f"There were errors in {num_errors} pushes")
logger.info(
"%d pushes were already cached out of %d", num_cached, num_pushes
)
logger.info("There were errors in %d pushes", num_errors)
def retrieve_from_cache(push):
return mozci.config.cache.get(cache_key(push))
@ -325,10 +329,10 @@ class Retriever(object):
except StopIteration:
pass
logger.info(f"saved push data nodes: {len(saved_nodes)}")
logger.info(f"skipped {skipped_no_commits} (no commits in our DB)")
logger.info(f"skipped {skipped_too_big_commits} (too big commits)")
logger.info(f"skipped {skipped_no_runnables} (no interesting runnables)")
logger.info("saved push data nodes: %d", len(saved_nodes))
logger.info("skipped %d (no commits in our DB)", skipped_no_commits)
logger.info("skipped %d (too big commits)", skipped_too_big_commits)
logger.info("skipped %d (no interesting runnables)", skipped_no_runnables)
past_failures["push_num"] = push_num
past_failures.close()

Просмотреть файл

@ -90,7 +90,7 @@ class TestingPolicyStatsGenerator(object):
for commit in commits
if repository.get_revision_id(commit) in revision_map
]
logger.info(f"{len(commits)} revisions")
logger.info("%d revisions", len(commits))
# Filter-out commits with no testing tags.
commits = [
@ -101,7 +101,7 @@ class TestingPolicyStatsGenerator(object):
)
is not None
]
logger.info(f"{len(commits)} revisions with testing tags")
logger.info("%d revisions with testing tags", len(commits))
def list_testing_projects(
commits: Iterable[repository.CommitDict],

Просмотреть файл

@ -47,7 +47,7 @@ class Trainer(object):
else:
logger.info("Skipping download of the databases")
logger.info(f"Training *{model_name}* model")
logger.info("Training *%s* model", model_name)
metrics = model_obj.train(limit=args.limit)
# Save the metrics as a file that can be uploaded as an artifact.