2019-09-11 21:17:02 +03:00
|
|
|
# -*- coding: utf-8 -*-
|
2020-03-27 17:10:06 +03:00
|
|
|
# This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
|
|
# You can obtain one at http://mozilla.org/MPL/2.0/.
|
2019-09-11 21:17:02 +03:00
|
|
|
|
|
|
|
import argparse
|
2020-04-07 03:57:21 +03:00
|
|
|
import collections
|
2020-04-28 13:29:24 +03:00
|
|
|
import concurrent.futures
|
2020-04-07 03:57:21 +03:00
|
|
|
import itertools
|
2020-02-29 13:27:01 +03:00
|
|
|
import math
|
2019-09-11 21:17:02 +03:00
|
|
|
import os
|
2020-04-07 03:57:21 +03:00
|
|
|
import struct
|
2020-02-24 16:38:15 +03:00
|
|
|
import traceback
|
2019-09-18 14:10:22 +03:00
|
|
|
from datetime import datetime
|
2019-09-11 21:17:02 +03:00
|
|
|
from logging import INFO, basicConfig, getLogger
|
2020-05-06 13:37:22 +03:00
|
|
|
from typing import Dict, Generator, List, NewType, Tuple
|
2019-09-11 21:17:02 +03:00
|
|
|
|
2020-02-24 16:38:15 +03:00
|
|
|
import adr
|
2019-09-18 14:10:22 +03:00
|
|
|
import dateutil.parser
|
2020-02-25 19:52:30 +03:00
|
|
|
import mozci.push
|
2019-09-18 14:10:22 +03:00
|
|
|
from dateutil.relativedelta import relativedelta
|
2019-09-19 02:32:01 +03:00
|
|
|
from tqdm import tqdm
|
2019-09-11 21:17:02 +03:00
|
|
|
|
2019-11-10 21:57:05 +03:00
|
|
|
from bugbug import commit_features, db, repository, test_scheduling
|
2020-05-06 13:37:22 +03:00
|
|
|
from bugbug.utils import create_tar_zst, zstd_compress
|
|
|
|
|
|
|
|
Revision = NewType("Revision", str)
|
|
|
|
TaskName = NewType("TaskName", str)
|
|
|
|
PushResult = Tuple[List[Revision], List[TaskName], List[TaskName], List[TaskName]]
|
2019-09-11 21:17:02 +03:00
|
|
|
|
|
|
|
basicConfig(level=INFO)
|
|
|
|
logger = getLogger(__name__)
|
|
|
|
|
2019-09-24 23:42:44 +03:00
|
|
|
JOBS_TO_CONSIDER = ("test-", "build-")
|
2020-04-06 18:45:54 +03:00
|
|
|
JOBS_TO_IGNORE = (
|
|
|
|
"build-docker-image-",
|
2020-04-14 19:12:27 +03:00
|
|
|
"-android-hw-",
|
2020-04-06 18:45:54 +03:00
|
|
|
"-awsy-",
|
|
|
|
"-raptor-",
|
|
|
|
"-talos-",
|
2020-04-20 18:16:55 +03:00
|
|
|
"backlog",
|
2020-05-03 14:21:20 +03:00
|
|
|
# inclusive test suites -- these *only* run when certain files have changed
|
|
|
|
"-test-verify-",
|
|
|
|
"-test-coverage-",
|
|
|
|
"jittest",
|
|
|
|
"jsreftest",
|
|
|
|
"android-hw-gfx",
|
2020-04-06 18:45:54 +03:00
|
|
|
)
|
2019-09-18 14:10:22 +03:00
|
|
|
|
2020-03-25 19:13:46 +03:00
|
|
|
# The mozci version (to bump whenever we change the mozci regression algorithm),
|
|
|
|
# so we can keep track of which version of mozci was used to analyze a given push
|
|
|
|
# and we can decide when we want to regenerate parts of the dataset.
|
2020-03-31 17:18:20 +03:00
|
|
|
MOZCI_VERSION = 2
|
2019-09-11 21:17:02 +03:00
|
|
|
|
2020-02-29 13:27:01 +03:00
|
|
|
TRAINING_MONTHS = {
|
2020-03-16 13:44:31 +03:00
|
|
|
"label": 7,
|
2020-04-11 19:05:23 +03:00
|
|
|
"group": 7,
|
2020-02-29 13:27:01 +03:00
|
|
|
}
|
2019-09-24 23:32:16 +03:00
|
|
|
|
2019-09-11 21:17:02 +03:00
|
|
|
|
2020-02-19 15:15:03 +03:00
|
|
|
def filter_runnables(runnables, all_runnables, granularity):
|
2019-11-10 21:57:05 +03:00
|
|
|
return tuple(
|
2020-02-19 15:15:03 +03:00
|
|
|
runnable
|
|
|
|
for runnable in runnables
|
|
|
|
if runnable in all_runnables
|
|
|
|
and (
|
|
|
|
granularity == "group"
|
|
|
|
or (
|
|
|
|
any(runnable.startswith(j) for j in JOBS_TO_CONSIDER)
|
2020-04-03 17:18:54 +03:00
|
|
|
and not any(j in runnable for j in JOBS_TO_IGNORE)
|
2020-02-19 15:15:03 +03:00
|
|
|
)
|
|
|
|
)
|
2019-11-10 21:57:05 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
|
2020-02-04 22:44:11 +03:00
|
|
|
# Handle "meaningless" labeling changes ("meaningless" as they shouldn't really affect test scheduling).
|
|
|
|
def rename_tasks(tasks):
|
|
|
|
return [task.replace("test-linux64-", "test-linux1804-64-") for task in tasks]
|
|
|
|
|
|
|
|
|
2019-09-11 21:17:02 +03:00
|
|
|
class Retriever(object):
|
2020-05-06 13:37:22 +03:00
|
|
|
def generate_push_data(self, granularity: str) -> None:
|
2020-03-16 19:54:45 +03:00
|
|
|
# We keep in the cache the fact that we failed to analyze a push for 10
|
|
|
|
# days, so if we re-run often we don't retry the same pushes many times.
|
|
|
|
MISSING_CACHE_RETENTION = 10 * 24 * 60
|
|
|
|
|
2020-02-29 13:27:01 +03:00
|
|
|
# We'll use the past TRAINING_MONTHS months only for training the model,
|
|
|
|
# but we use half TRAINING_MONTHS months more than that to calculate the
|
|
|
|
# failure statistics.
|
2020-05-06 13:37:22 +03:00
|
|
|
from_months = TRAINING_MONTHS[granularity] + math.floor(
|
|
|
|
TRAINING_MONTHS[granularity] / 2
|
2020-02-29 13:27:01 +03:00
|
|
|
)
|
|
|
|
|
2020-04-15 01:50:56 +03:00
|
|
|
# We use the actual date instead of 'today-X' aliases to avoid adr caching
|
|
|
|
# this query.
|
|
|
|
from_date = datetime.utcnow() - relativedelta(months=from_months)
|
|
|
|
to_date = datetime.utcnow() - relativedelta(days=3)
|
|
|
|
|
2020-02-24 16:38:15 +03:00
|
|
|
pushes = mozci.push.make_push_objects(
|
2020-04-15 01:50:56 +03:00
|
|
|
from_date=from_date.strftime("%Y-%m-%d"),
|
|
|
|
to_date=to_date.strftime("%Y-%m-%d"),
|
2020-02-24 16:38:15 +03:00
|
|
|
branch="autoland",
|
2019-09-11 21:17:02 +03:00
|
|
|
)
|
|
|
|
|
2020-05-06 13:37:22 +03:00
|
|
|
if granularity == "label":
|
|
|
|
push_data_db = test_scheduling.PUSH_DATA_LABEL_DB
|
|
|
|
elif granularity == "group":
|
|
|
|
push_data_db = test_scheduling.PUSH_DATA_GROUP_DB
|
2020-02-24 16:38:15 +03:00
|
|
|
|
2020-05-06 13:37:22 +03:00
|
|
|
cache: Dict[mozci.push.Push, Tuple[PushResult, int]] = {}
|
2020-02-24 16:38:15 +03:00
|
|
|
|
2020-05-06 13:37:22 +03:00
|
|
|
def cache_key(push: mozci.push.Push) -> str:
|
|
|
|
return f"push_data.{granularity}.{push.rev}"
|
2020-03-31 03:13:17 +03:00
|
|
|
|
2020-04-28 13:29:24 +03:00
|
|
|
with concurrent.futures.ThreadPoolExecutor() as executor:
|
|
|
|
future_to_push = {
|
|
|
|
executor.submit(
|
|
|
|
lambda push: adr.config.cache.get(cache_key(push)), push
|
|
|
|
): push
|
|
|
|
for push in pushes
|
|
|
|
}
|
|
|
|
|
|
|
|
for future in tqdm(
|
|
|
|
concurrent.futures.as_completed(future_to_push),
|
|
|
|
total=len(future_to_push),
|
|
|
|
):
|
|
|
|
push = future_to_push[future]
|
|
|
|
|
|
|
|
exc = future.exception()
|
|
|
|
if exc is not None:
|
|
|
|
logger.info(f"Exception {exc} while getting {push.rev}")
|
|
|
|
for f in future_to_push.keys():
|
|
|
|
f.cancel()
|
|
|
|
|
|
|
|
cache[push] = future.result()
|
|
|
|
|
2020-03-31 03:13:17 +03:00
|
|
|
# Regenerating a large amount of data when we update the mozci regression detection
|
|
|
|
# algorithm is currently pretty slow, so we only regenerate 1000 pushes whenever we
|
|
|
|
# run.
|
2020-04-29 16:31:27 +03:00
|
|
|
"""to_regenerate = 0
|
2020-04-28 13:29:24 +03:00
|
|
|
for push in pushes[::-1]:
|
|
|
|
cached = cache[push]
|
2020-03-31 03:13:17 +03:00
|
|
|
if not cached:
|
|
|
|
continue
|
|
|
|
|
2020-04-01 14:43:01 +03:00
|
|
|
value, mozci_version = cached
|
2020-04-28 20:23:38 +03:00
|
|
|
if mozci_version != MOZCI_VERSION and to_regenerate < 1000:
|
2020-04-28 13:29:24 +03:00
|
|
|
cache[push] = None
|
2020-04-29 16:31:27 +03:00
|
|
|
to_regenerate += 1"""
|
2020-03-31 03:13:17 +03:00
|
|
|
|
2020-05-08 00:36:21 +03:00
|
|
|
to_regenerate = 0
|
|
|
|
for push in pushes[::-1]:
|
|
|
|
cached = cache[push]
|
|
|
|
if not cached:
|
|
|
|
continue
|
|
|
|
|
|
|
|
if to_regenerate < 1000:
|
|
|
|
del cache[push]
|
|
|
|
adr.config.cache.put(push.push_uuid, {}, 0)
|
|
|
|
to_regenerate += 1
|
|
|
|
|
2020-05-06 13:37:22 +03:00
|
|
|
def generate() -> Generator[PushResult, None, None]:
|
|
|
|
num_cached = 0
|
2020-02-24 02:34:19 +03:00
|
|
|
|
2020-05-06 13:37:22 +03:00
|
|
|
for push in tqdm(pushes):
|
|
|
|
key = cache_key(push)
|
2020-02-26 12:45:45 +03:00
|
|
|
|
2020-05-08 00:36:21 +03:00
|
|
|
if push in cache and cache[push] is not None:
|
2020-05-06 13:37:22 +03:00
|
|
|
num_cached += 1
|
|
|
|
cached = cache[push]
|
|
|
|
if cached:
|
|
|
|
value, mozci_version = cached
|
|
|
|
yield value
|
|
|
|
else:
|
|
|
|
logger.info(f"Analyzing {push.rev} at the {granularity} level...")
|
|
|
|
|
|
|
|
try:
|
|
|
|
if granularity == "label":
|
|
|
|
runnables = push.task_labels
|
|
|
|
elif granularity == "group":
|
|
|
|
runnables = push.group_summaries.keys()
|
|
|
|
|
|
|
|
value = (
|
|
|
|
push.revs,
|
|
|
|
list(runnables),
|
|
|
|
list(push.get_possible_regressions(granularity)),
|
|
|
|
list(push.get_likely_regressions(granularity)),
|
|
|
|
)
|
|
|
|
adr.config.cache.put(
|
|
|
|
key,
|
|
|
|
(value, MOZCI_VERSION),
|
|
|
|
adr.config["cache"]["retention"],
|
|
|
|
)
|
|
|
|
yield value
|
|
|
|
except adr.errors.MissingDataError:
|
|
|
|
logger.warning(
|
|
|
|
f"Tasks for push {push.rev} can't be found on ActiveData"
|
|
|
|
)
|
|
|
|
adr.config.cache.put(key, (), MISSING_CACHE_RETENTION)
|
|
|
|
except Exception:
|
|
|
|
traceback.print_exc()
|
|
|
|
adr.config.cache.put(key, (), MISSING_CACHE_RETENTION)
|
|
|
|
|
|
|
|
logger.info(f"{num_cached} pushes were already cached out of {len(pushes)}")
|
|
|
|
|
|
|
|
db.write(push_data_db, generate())
|
|
|
|
zstd_compress(push_data_db)
|
|
|
|
|
|
|
|
def retrieve_push_data(self) -> None:
|
2020-02-29 13:27:01 +03:00
|
|
|
self.generate_push_data("label")
|
|
|
|
self.generate_push_data("group")
|
2020-04-18 16:38:35 +03:00
|
|
|
|
2020-02-19 15:15:03 +03:00
|
|
|
def generate_test_scheduling_history(self, granularity):
|
2019-10-18 15:08:08 +03:00
|
|
|
# Get the commits DB.
|
2019-11-19 21:07:42 +03:00
|
|
|
assert db.download(repository.COMMITS_DB)
|
2019-10-18 15:08:08 +03:00
|
|
|
|
2020-02-29 13:27:01 +03:00
|
|
|
HISTORY_DATE_START = datetime.now() - relativedelta(
|
|
|
|
months=TRAINING_MONTHS[granularity]
|
|
|
|
)
|
2019-09-18 14:10:22 +03:00
|
|
|
|
2020-02-19 15:15:03 +03:00
|
|
|
if granularity == "label":
|
2020-05-06 13:37:22 +03:00
|
|
|
push_data_db = test_scheduling.PUSH_DATA_LABEL_DB
|
2020-02-19 15:15:03 +03:00
|
|
|
test_scheduling_db = test_scheduling.TEST_LABEL_SCHEDULING_DB
|
|
|
|
past_failures_db = os.path.join(
|
|
|
|
"data", test_scheduling.PAST_FAILURES_LABEL_DB
|
|
|
|
)
|
2020-04-08 20:34:50 +03:00
|
|
|
failing_together_db = os.path.join(
|
|
|
|
"data", test_scheduling.FAILING_TOGETHER_LABEL_DB
|
|
|
|
)
|
2020-02-19 15:15:03 +03:00
|
|
|
elif granularity == "group":
|
2020-05-06 13:37:22 +03:00
|
|
|
push_data_db = test_scheduling.PUSH_DATA_GROUP_DB
|
2020-02-19 15:15:03 +03:00
|
|
|
test_scheduling_db = test_scheduling.TEST_GROUP_SCHEDULING_DB
|
|
|
|
past_failures_db = os.path.join(
|
|
|
|
"data", test_scheduling.PAST_FAILURES_GROUP_DB
|
|
|
|
)
|
2020-03-06 00:28:53 +03:00
|
|
|
touched_together_db = os.path.join(
|
|
|
|
"data", test_scheduling.TOUCHED_TOGETHER_DB
|
|
|
|
)
|
2020-02-19 15:15:03 +03:00
|
|
|
|
2020-05-06 13:37:22 +03:00
|
|
|
assert db.download(push_data_db)
|
|
|
|
|
2020-02-19 15:15:03 +03:00
|
|
|
db.download(test_scheduling_db, support_files_too=True)
|
2019-10-18 14:59:24 +03:00
|
|
|
|
2019-11-19 21:07:42 +03:00
|
|
|
last_node = None
|
2020-04-09 21:46:23 +03:00
|
|
|
for revs, _ in test_scheduling.get_test_scheduling_history(granularity):
|
|
|
|
last_node = revs[0]
|
2019-10-18 14:59:24 +03:00
|
|
|
|
2020-04-07 03:57:21 +03:00
|
|
|
def generate_failing_together_probabilities(push_data):
|
|
|
|
# TODO: we should consider the probabilities of `task1 failure -> task2 failure` and
|
|
|
|
# `task2 failure -> task1 failure` separately, as they could be different.
|
|
|
|
|
|
|
|
count_runs = collections.Counter()
|
|
|
|
count_single_failures = collections.Counter()
|
|
|
|
count_both_failures = collections.Counter()
|
|
|
|
|
|
|
|
for revisions, tasks, likely_regressions, candidate_regressions in tqdm(
|
|
|
|
push_data
|
|
|
|
):
|
|
|
|
failures = set(likely_regressions + candidate_regressions)
|
|
|
|
all_tasks = list(set(tasks) | failures)
|
|
|
|
|
|
|
|
for task1, task2 in itertools.combinations(sorted(all_tasks), 2):
|
|
|
|
count_runs[(task1, task2)] += 1
|
|
|
|
|
|
|
|
if task1 in failures:
|
|
|
|
if task2 in failures:
|
|
|
|
count_both_failures[(task1, task2)] += 1
|
|
|
|
else:
|
|
|
|
count_single_failures[(task1, task2)] += 1
|
|
|
|
elif task2 in failures:
|
|
|
|
count_single_failures[(task1, task2)] += 1
|
|
|
|
|
|
|
|
stats = {}
|
|
|
|
|
|
|
|
skipped = 0
|
|
|
|
|
|
|
|
for couple, run_count in count_runs.most_common():
|
|
|
|
failure_count = count_both_failures[couple]
|
|
|
|
support = failure_count / run_count
|
|
|
|
|
|
|
|
if support < 1 / 700:
|
|
|
|
skipped += 1
|
|
|
|
continue
|
|
|
|
|
|
|
|
if failure_count != 0:
|
|
|
|
confidence = failure_count / (
|
|
|
|
count_single_failures[couple] + failure_count
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
confidence = 0.0
|
|
|
|
|
|
|
|
stats[couple] = (support, confidence)
|
|
|
|
|
|
|
|
logger.info(f"{skipped} couples skipped because their support was too low")
|
|
|
|
|
|
|
|
logger.info("Redundancies with the highest support and confidence:")
|
|
|
|
for couple, (support, confidence) in sorted(
|
|
|
|
stats.items(), key=lambda k: (-k[1][1], -k[1][0])
|
|
|
|
)[:7]:
|
|
|
|
failure_count = count_both_failures[couple]
|
|
|
|
run_count = count_runs[couple]
|
|
|
|
logger.info(
|
|
|
|
f"{couple[0]} - {couple[1]} redundancy confidence {confidence}, support {support} ({failure_count} over {run_count})."
|
|
|
|
)
|
|
|
|
|
|
|
|
logger.info("Redundancies with the highest confidence and lowest support:")
|
|
|
|
for couple, (support, confidence) in sorted(
|
|
|
|
stats.items(), key=lambda k: (-k[1][1], k[1][0])
|
|
|
|
)[:7]:
|
|
|
|
failure_count = count_both_failures[couple]
|
|
|
|
run_count = count_runs[couple]
|
|
|
|
logger.info(
|
|
|
|
f"{couple[0]} - {couple[1]} redundancy confidence {confidence}, support {support} ({failure_count} over {run_count})."
|
|
|
|
)
|
|
|
|
|
|
|
|
failing_together = test_scheduling.get_failing_together_db()
|
|
|
|
count_redundancies = collections.Counter()
|
|
|
|
for couple, (support, confidence) in stats.items():
|
|
|
|
if confidence == 1.0:
|
|
|
|
count_redundancies["==100%"] += 1
|
|
|
|
if confidence > 0.9:
|
|
|
|
count_redundancies[">=90%"] += 1
|
|
|
|
if confidence > 0.8:
|
|
|
|
count_redundancies[">=80%"] += 1
|
|
|
|
if confidence > 0.7:
|
|
|
|
count_redundancies[">=70%"] += 1
|
|
|
|
|
|
|
|
if confidence < 0.7:
|
|
|
|
continue
|
|
|
|
|
|
|
|
failing_together[
|
|
|
|
f"{couple[0]}${couple[1]}".encode("utf-8")
|
|
|
|
] = struct.pack("ff", support, confidence)
|
|
|
|
|
|
|
|
for percentage, count in count_redundancies.most_common():
|
|
|
|
logger.info(f"{count} with {percentage} confidence")
|
|
|
|
|
|
|
|
test_scheduling.close_failing_together_db()
|
|
|
|
|
2019-11-18 20:33:44 +03:00
|
|
|
def generate_all_data():
|
2020-02-19 15:15:03 +03:00
|
|
|
past_failures = test_scheduling.get_past_failures(granularity)
|
2019-10-09 17:02:58 +03:00
|
|
|
|
2019-11-18 20:33:44 +03:00
|
|
|
push_num = past_failures["push_num"] if "push_num" in past_failures else 0
|
2019-09-18 14:10:22 +03:00
|
|
|
|
2019-10-18 14:59:24 +03:00
|
|
|
# We can start once we get to the last revision we added in the previous run.
|
|
|
|
can_start = True if last_node is None else False
|
2019-09-18 14:10:22 +03:00
|
|
|
|
2019-11-10 21:57:05 +03:00
|
|
|
commit_map = {}
|
|
|
|
for commit_data in tqdm(repository.get_commits()):
|
|
|
|
if not can_start:
|
|
|
|
if last_node == commit_data["node"]:
|
|
|
|
can_start = True
|
2019-10-22 19:42:38 +03:00
|
|
|
|
2019-10-18 14:59:24 +03:00
|
|
|
continue
|
|
|
|
|
2019-11-10 21:57:05 +03:00
|
|
|
commit_map[commit_data["node"]] = commit_data
|
|
|
|
|
2020-05-06 13:37:22 +03:00
|
|
|
push_data = list(db.read(push_data_db))
|
2019-11-10 21:57:05 +03:00
|
|
|
|
|
|
|
logger.info(f"push data nodes: {len(push_data)}")
|
|
|
|
|
2020-02-19 15:15:03 +03:00
|
|
|
if granularity == "label":
|
|
|
|
push_data = [
|
|
|
|
(
|
|
|
|
revisions,
|
|
|
|
rename_tasks(push_tasks),
|
|
|
|
rename_tasks(possible_regressions),
|
|
|
|
rename_tasks(likely_regressions),
|
|
|
|
)
|
|
|
|
for revisions, push_tasks, possible_regressions, likely_regressions in push_data
|
|
|
|
]
|
|
|
|
|
2020-04-14 19:23:08 +03:00
|
|
|
# In the last 14 pushes, we definitely run all possible runnables.
|
2020-04-14 19:22:16 +03:00
|
|
|
all_runnables_set = set(
|
2020-04-14 19:23:08 +03:00
|
|
|
sum((push_runnables for _, push_runnables, _, _ in push_data[-14:]), [])
|
2020-04-14 19:22:16 +03:00
|
|
|
)
|
2020-02-19 15:15:03 +03:00
|
|
|
# Filter runnables we don't need.
|
|
|
|
all_runnables = filter_runnables(
|
|
|
|
list(all_runnables_set), all_runnables_set, granularity
|
|
|
|
)
|
|
|
|
all_runnables_set = set(all_runnables_set)
|
2020-04-14 19:23:08 +03:00
|
|
|
logger.info(f"{len(all_runnables_set)} runnables run in the last 14 pushes")
|
2020-02-19 15:15:03 +03:00
|
|
|
|
2020-04-07 03:57:21 +03:00
|
|
|
push_data = [
|
|
|
|
(
|
|
|
|
revisions,
|
|
|
|
filter_runnables(push_tasks, all_runnables_set, granularity),
|
|
|
|
filter_runnables(
|
|
|
|
possible_regressions, all_runnables_set, granularity
|
|
|
|
),
|
|
|
|
filter_runnables(
|
|
|
|
likely_regressions, all_runnables_set, granularity
|
|
|
|
),
|
|
|
|
)
|
|
|
|
for revisions, push_tasks, possible_regressions, likely_regressions in push_data
|
|
|
|
]
|
|
|
|
|
2020-04-07 13:02:55 +03:00
|
|
|
if granularity == "label":
|
|
|
|
generate_failing_together_probabilities(push_data)
|
2020-04-07 03:57:21 +03:00
|
|
|
|
2020-02-19 15:15:03 +03:00
|
|
|
# Store all runnables in the past_failures DB so it can be used in the evaluation phase.
|
|
|
|
past_failures["all_runnables"] = all_runnables
|
|
|
|
# XXX: Should we recreate the DB from scratch if the previous all_runnables are not the
|
2019-11-19 02:27:06 +03:00
|
|
|
# same as the current ones?
|
|
|
|
|
2019-11-18 20:33:44 +03:00
|
|
|
saved_nodes = set()
|
|
|
|
skipped_no_commits = 0
|
|
|
|
skipped_too_big_commits = 0
|
2020-02-19 15:15:03 +03:00
|
|
|
skipped_no_runnables = 0
|
2019-11-18 20:33:44 +03:00
|
|
|
|
2019-11-10 21:57:05 +03:00
|
|
|
# We can start once we get to the last revision we added in the previous run.
|
|
|
|
can_start = True if last_node is None else False
|
|
|
|
|
2020-03-06 00:28:53 +03:00
|
|
|
if granularity == "group":
|
|
|
|
update_touched_together_gen = test_scheduling.update_touched_together()
|
|
|
|
next(update_touched_together_gen)
|
|
|
|
|
2019-11-10 21:57:05 +03:00
|
|
|
for i in tqdm(range(len(push_data))):
|
|
|
|
(
|
|
|
|
revisions,
|
2020-02-19 15:15:03 +03:00
|
|
|
push_runnables,
|
2019-11-10 21:57:05 +03:00
|
|
|
possible_regressions,
|
|
|
|
likely_regressions,
|
|
|
|
) = push_data.pop(0)
|
|
|
|
|
2019-10-18 14:59:24 +03:00
|
|
|
if not can_start:
|
2019-11-10 21:57:05 +03:00
|
|
|
if last_node == revisions[0]:
|
|
|
|
can_start = True
|
|
|
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
push_num += 1
|
|
|
|
|
|
|
|
# XXX: Some commits are skipped in the repository mining, e.g. merges and backouts. Maybe we should not skip them.
|
|
|
|
commits = tuple(
|
|
|
|
commit_map.pop(revision)
|
|
|
|
for revision in revisions
|
|
|
|
if revision in commit_map
|
|
|
|
)
|
|
|
|
if len(commits) == 0:
|
|
|
|
skipped_no_commits += 1
|
2019-10-18 14:59:24 +03:00
|
|
|
continue
|
|
|
|
|
2019-11-10 21:57:05 +03:00
|
|
|
merged_commits = commit_features.merge_commits(commits)
|
|
|
|
|
|
|
|
# XXX: For now, skip commits which are too large.
|
|
|
|
# In the future we can either:
|
|
|
|
# - Improve shelve perf and go back to consider all files;
|
|
|
|
# - Consider only files which appear with a given frequency, like the "files" feature in commit_features;
|
|
|
|
# - Keep a limit of number of files.
|
2019-11-21 14:53:55 +03:00
|
|
|
if len(merged_commits["files"]) > 50:
|
2019-11-10 21:57:05 +03:00
|
|
|
skipped_too_big_commits += 1
|
2019-09-18 14:10:22 +03:00
|
|
|
continue
|
|
|
|
|
2020-02-19 15:15:03 +03:00
|
|
|
# If we considered all_runnables, we'd generate a huge amount of data.
|
2020-04-06 17:36:30 +03:00
|
|
|
# We consider only the runnables which run in this push, and the possible and likely regressions
|
|
|
|
# from this push. We can't consider all runnables because we can't be sure that a task that didn't
|
|
|
|
# run on a push would have been successful.
|
2020-02-19 15:15:03 +03:00
|
|
|
runnables_to_consider = list(
|
|
|
|
set(push_runnables + possible_regressions + likely_regressions)
|
|
|
|
)
|
2019-09-18 14:10:22 +03:00
|
|
|
|
2020-02-19 15:15:03 +03:00
|
|
|
if len(runnables_to_consider) == 0:
|
|
|
|
skipped_no_runnables += 1
|
2019-11-10 21:57:05 +03:00
|
|
|
continue
|
2019-09-18 14:10:22 +03:00
|
|
|
|
2019-11-18 16:49:34 +03:00
|
|
|
# Sync DB every 250 pushes, so we cleanup the shelve cache (we'd run OOM otherwise!).
|
|
|
|
if i % 250 == 0:
|
2019-11-10 21:57:05 +03:00
|
|
|
past_failures.sync()
|
2019-09-18 14:10:22 +03:00
|
|
|
|
2019-11-10 21:57:05 +03:00
|
|
|
pushdate = dateutil.parser.parse(merged_commits["pushdate"])
|
2019-11-05 14:31:04 +03:00
|
|
|
|
2020-03-06 00:28:53 +03:00
|
|
|
if granularity == "group":
|
|
|
|
update_touched_together_gen.send(commits[0]["node"])
|
|
|
|
|
2020-04-12 18:22:51 +03:00
|
|
|
result = {
|
|
|
|
"revs": revisions,
|
|
|
|
"data": [],
|
|
|
|
}
|
2019-11-18 20:33:44 +03:00
|
|
|
for data in test_scheduling.generate_data(
|
|
|
|
past_failures,
|
|
|
|
merged_commits,
|
|
|
|
push_num,
|
2020-02-19 15:15:03 +03:00
|
|
|
runnables_to_consider,
|
2019-11-18 20:33:44 +03:00
|
|
|
possible_regressions,
|
|
|
|
likely_regressions,
|
|
|
|
):
|
2019-09-18 14:10:22 +03:00
|
|
|
if pushdate > HISTORY_DATE_START:
|
2020-04-12 18:22:51 +03:00
|
|
|
result["data"].append(data)
|
|
|
|
|
|
|
|
if pushdate > HISTORY_DATE_START:
|
|
|
|
saved_nodes.add(i)
|
|
|
|
yield result
|
2019-09-18 14:10:22 +03:00
|
|
|
|
2020-03-15 14:34:33 +03:00
|
|
|
if granularity == "group":
|
|
|
|
try:
|
|
|
|
update_touched_together_gen.send(None)
|
|
|
|
except StopIteration:
|
|
|
|
pass
|
2020-03-13 02:52:01 +03:00
|
|
|
|
2019-09-18 14:10:22 +03:00
|
|
|
logger.info(f"saved push data nodes: {len(saved_nodes)}")
|
2019-11-10 21:57:05 +03:00
|
|
|
logger.info(f"skipped {skipped_no_commits} (no commits in our DB)")
|
|
|
|
logger.info(f"skipped {skipped_too_big_commits} (too big commits)")
|
2020-02-19 15:15:03 +03:00
|
|
|
logger.info(f"skipped {skipped_no_runnables} (no interesting runnables)")
|
2019-09-18 14:10:22 +03:00
|
|
|
|
2019-11-18 20:33:44 +03:00
|
|
|
past_failures["push_num"] = push_num
|
|
|
|
past_failures.close()
|
|
|
|
|
2020-02-19 15:15:03 +03:00
|
|
|
db.append(test_scheduling_db, generate_all_data())
|
2019-09-18 14:10:22 +03:00
|
|
|
|
2020-02-19 15:15:03 +03:00
|
|
|
zstd_compress(test_scheduling_db)
|
2020-04-18 22:22:22 +03:00
|
|
|
create_tar_zst(past_failures_db)
|
2019-10-18 14:59:24 +03:00
|
|
|
|
2020-03-06 00:28:53 +03:00
|
|
|
if granularity == "group":
|
2020-04-18 22:22:22 +03:00
|
|
|
create_tar_zst(touched_together_db)
|
2020-03-06 00:28:53 +03:00
|
|
|
|
2020-04-08 20:34:50 +03:00
|
|
|
if granularity == "label":
|
2020-04-18 22:22:22 +03:00
|
|
|
create_tar_zst(failing_together_db)
|
2020-04-08 20:34:50 +03:00
|
|
|
|
2019-09-11 21:17:02 +03:00
|
|
|
|
|
|
|
def main():
|
|
|
|
description = "Retrieve and extract the test scheduling history from ActiveData"
|
|
|
|
parser = argparse.ArgumentParser(description=description)
|
|
|
|
|
2019-10-18 15:33:53 +03:00
|
|
|
parser.add_argument(
|
|
|
|
"op", help="Which operation to perform.", choices=["retrieve", "generate"]
|
|
|
|
)
|
2020-02-19 15:15:03 +03:00
|
|
|
parser.add_argument(
|
|
|
|
"--granularity",
|
|
|
|
help="Which test granularity to use.",
|
|
|
|
choices=["label", "group"],
|
|
|
|
)
|
2019-10-18 15:33:53 +03:00
|
|
|
|
|
|
|
args = parser.parse_args()
|
2019-09-11 21:17:02 +03:00
|
|
|
|
|
|
|
retriever = Retriever()
|
2019-10-18 15:33:53 +03:00
|
|
|
if args.op == "retrieve":
|
|
|
|
retriever.retrieve_push_data()
|
|
|
|
elif args.op == "generate":
|
2020-02-19 15:15:03 +03:00
|
|
|
assert args.granularity is not None
|
|
|
|
retriever.generate_test_scheduling_history(args.granularity)
|
2019-09-11 21:17:02 +03:00
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
main()
|