зеркало из https://github.com/mozilla/treeherder.git
Apply black formating to the whole repo (#6332)
* Automatic black fixes * Add conflicting rule exceptions between black and flake8
This commit is contained in:
Родитель
d01a99bd2f
Коммит
368c112266
|
@ -36,8 +36,14 @@ if settings_queues != procfile_queues:
|
|||
print("ERROR - mismatches found")
|
||||
missing_procfile = procfile_queues - settings_queues
|
||||
if missing_procfile:
|
||||
print("The following queues were in the Procfile, but not in the settings file:\n%s\n" % "\n".join(missing_procfile))
|
||||
print(
|
||||
"The following queues were in the Procfile, but not in the settings file:\n%s\n"
|
||||
% "\n".join(missing_procfile)
|
||||
)
|
||||
missing_settings = settings_queues - procfile_queues
|
||||
if missing_settings:
|
||||
print("The following queues were in the settings, but not in the Procfile:\n%s\n" % "\n".join(missing_settings))
|
||||
print(
|
||||
"The following queues were in the settings, but not in the Procfile:\n%s\n"
|
||||
% "\n".join(missing_settings)
|
||||
)
|
||||
sys.exit(1)
|
||||
|
|
|
@ -21,7 +21,9 @@ warnings.filterwarnings('ignore', category=DeprecationWarning, module='markdown.
|
|||
# jinja2/runtime.py -> https://github.com/pallets/jinja/pull/867
|
||||
# orderedmultidict/orderedmultidict.py -> https://github.com/gruns/orderedmultidict/pull/20
|
||||
# promise/promise_list.py -> https://github.com/syrusakbary/promise/pull/67
|
||||
warnings.filterwarnings('ignore', category=DeprecationWarning, message=r'Using or importing the ABCs .*')
|
||||
warnings.filterwarnings(
|
||||
'ignore', category=DeprecationWarning, message=r'Using or importing the ABCs .*'
|
||||
)
|
||||
|
||||
# "the imp module is deprecated in favour of importlib; see the module's documentation for alternative uses"
|
||||
warnings.filterwarnings('ignore', category=DeprecationWarning, module='celery.utils.imports')
|
||||
|
|
|
@ -16,7 +16,7 @@ logger.setLevel(logging.INFO)
|
|||
HOSTS = {
|
||||
"localhost": "http://localhost:8000",
|
||||
"stage": "https://treeherder.allizom.org",
|
||||
"production": "https://treeherder.mozilla.org"
|
||||
"production": "https://treeherder.mozilla.org",
|
||||
}
|
||||
|
||||
|
||||
|
@ -46,24 +46,28 @@ def main(args):
|
|||
difference = DeepDiff(pushes[index], production_pushes[index])
|
||||
if difference:
|
||||
logger.info(difference.to_json())
|
||||
logger.info("{}/#/jobs?repo={}&revision={}".format(
|
||||
compare_to_client.server_url,
|
||||
_project,
|
||||
pushes[index]["revision"]))
|
||||
logger.info("{}/#/jobs?repo={}&revision={}".format(
|
||||
production_client.server_url,
|
||||
_project,
|
||||
production_pushes[index]["revision"]))
|
||||
logger.info(
|
||||
"{}/#/jobs?repo={}&revision={}".format(
|
||||
compare_to_client.server_url, _project, pushes[index]["revision"]
|
||||
)
|
||||
)
|
||||
logger.info(
|
||||
"{}/#/jobs?repo={}&revision={}".format(
|
||||
production_client.server_url, _project, production_pushes[index]["revision"]
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def get_args():
|
||||
parser = argparse.ArgumentParser("Compare a push from a Treeherder instance to the production instance.")
|
||||
parser.add_argument("--host",
|
||||
default="stage",
|
||||
help="Host to compare. It defaults to stage")
|
||||
parser.add_argument("--projects",
|
||||
default="android-components,fenix",
|
||||
help="Projects (comma separated) to compare. It defaults to android-components & fenix")
|
||||
parser = argparse.ArgumentParser(
|
||||
"Compare a push from a Treeherder instance to the production instance."
|
||||
)
|
||||
parser.add_argument("--host", default="stage", help="Host to compare. It defaults to stage")
|
||||
parser.add_argument(
|
||||
"--projects",
|
||||
default="android-components,fenix",
|
||||
help="Projects (comma separated) to compare. It defaults to android-components & fenix",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
|
|
@ -16,7 +16,7 @@ logger = logging.getLogger(__name__)
|
|||
HOSTS = {
|
||||
"localhost": "http://localhost:8000",
|
||||
"stage": "https://treeherder.allizom.org",
|
||||
"production": "https://treeherder.mozilla.org"
|
||||
"production": "https://treeherder.mozilla.org",
|
||||
}
|
||||
|
||||
|
||||
|
@ -58,19 +58,26 @@ def print_url_to_taskcluster(job_guid):
|
|||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser("Compare a push from a Treeherder instance to the production instance.")
|
||||
parser.add_argument("--host", default="localhost",
|
||||
help="Host to compare. It defaults to localhost")
|
||||
parser.add_argument("--revision", required=True,
|
||||
help="Revision to compare")
|
||||
parser.add_argument("--project", default="mozilla-central",
|
||||
help="Project to compare. It defaults to mozilla-central")
|
||||
parser = argparse.ArgumentParser(
|
||||
"Compare a push from a Treeherder instance to the production instance."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--host", default="localhost", help="Host to compare. It defaults to localhost"
|
||||
)
|
||||
parser.add_argument("--revision", required=True, help="Revision to compare")
|
||||
parser.add_argument(
|
||||
"--project",
|
||||
default="mozilla-central",
|
||||
help="Project to compare. It defaults to mozilla-central",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
th_instance = TreeherderClient(server_url=HOSTS[args.host])
|
||||
th_instance_pushid = th_instance.get_pushes(args.project, revision=args.revision)[0]["id"]
|
||||
th_instance_jobs = th_instance.get_jobs(args.project, push_id=th_instance_pushid, count=None) or []
|
||||
th_instance_jobs = (
|
||||
th_instance.get_jobs(args.project, push_id=th_instance_pushid, count=None) or []
|
||||
)
|
||||
|
||||
production = TreeherderClient(server_url=HOSTS["production"])
|
||||
production_pushid = production.get_pushes(args.project, revision=args.revision)[0]["id"]
|
||||
|
@ -103,7 +110,9 @@ if __name__ == "__main__":
|
|||
logger.info("We have found: %s jobs on the production instance.", len(production_jobs))
|
||||
|
||||
if production_dict:
|
||||
logger.info("There are the first 10 production jobs we do not have th_instancely. Follow the link to investigate.")
|
||||
logger.info(
|
||||
"There are the first 10 production jobs we do not have th_instancely. Follow the link to investigate."
|
||||
)
|
||||
for job in list(production_dict.values())[0:10]:
|
||||
print_url_to_taskcluster(job["job_guid"])
|
||||
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
[flake8]
|
||||
exclude = */.*/,.*/,__pycache__,node_modules
|
||||
# E129: visually indented line with same indent as next logical line
|
||||
# E203 whitespace before ':'
|
||||
# E231: missing whitespace after ','
|
||||
# E501: line too long
|
||||
extend_ignore = E129,E501
|
||||
extend_ignore = E129,E203,E231,E501
|
||||
max-line-length = 100
|
||||
|
||||
[tool:pytest]
|
||||
|
|
|
@ -1,16 +1,14 @@
|
|||
from treeherder.autoclassify.autoclassify import match_errors
|
||||
from treeherder.autoclassify.matchers import (crash_signature_matcher,
|
||||
precise_matcher)
|
||||
from treeherder.model.models import (BugJobMap,
|
||||
ClassifiedFailure,
|
||||
JobNote,
|
||||
TextLogError,
|
||||
TextLogErrorMatch)
|
||||
from treeherder.autoclassify.matchers import crash_signature_matcher, precise_matcher
|
||||
from treeherder.model.models import (
|
||||
BugJobMap,
|
||||
ClassifiedFailure,
|
||||
JobNote,
|
||||
TextLogError,
|
||||
TextLogErrorMatch,
|
||||
)
|
||||
|
||||
from .utils import (crash_line,
|
||||
create_lines,
|
||||
log_line,
|
||||
test_line)
|
||||
from .utils import crash_line, create_lines, log_line, test_line
|
||||
|
||||
|
||||
def do_autoclassify(job, test_failure_lines, matchers, status="testfailed"):
|
||||
|
@ -24,18 +22,18 @@ def do_autoclassify(job, test_failure_lines, matchers, status="testfailed"):
|
|||
item.refresh_from_db()
|
||||
|
||||
|
||||
def test_classify_test_failure(text_log_errors_failure_lines,
|
||||
classified_failures,
|
||||
test_job_2):
|
||||
def test_classify_test_failure(text_log_errors_failure_lines, classified_failures, test_job_2):
|
||||
# Ensure that running autoclassify on a new job classifies lines that
|
||||
# exactly match previous classifications
|
||||
|
||||
# The first two lines match classified failures created in teh fixtures
|
||||
lines = [(test_line, {}),
|
||||
(test_line, {"subtest": "subtest2"}),
|
||||
(test_line, {"status": "TIMEOUT"}),
|
||||
(test_line, {"expected": "ERROR"}),
|
||||
(test_line, {"message": "message2"})]
|
||||
lines = [
|
||||
(test_line, {}),
|
||||
(test_line, {"subtest": "subtest2"}),
|
||||
(test_line, {"status": "TIMEOUT"}),
|
||||
(test_line, {"expected": "ERROR"}),
|
||||
(test_line, {"message": "message2"}),
|
||||
]
|
||||
test_error_lines, test_failure_lines = create_lines(test_job_2, lines)
|
||||
|
||||
do_autoclassify(test_job_2, test_failure_lines, [precise_matcher])
|
||||
|
@ -43,25 +41,28 @@ def test_classify_test_failure(text_log_errors_failure_lines,
|
|||
expected_classified = test_error_lines[:2], test_failure_lines[:2]
|
||||
expected_unclassified = test_error_lines[2:], test_failure_lines[2:]
|
||||
|
||||
for (error_line, failure_line), expected in zip(zip(*expected_classified),
|
||||
classified_failures):
|
||||
for (error_line, failure_line), expected in zip(zip(*expected_classified), classified_failures):
|
||||
assert list(error_line.classified_failures.values_list('id', flat=True)) == [expected.id]
|
||||
assert list(failure_line.error.classified_failures.values_list('id', flat=True)) == [expected.id]
|
||||
assert list(failure_line.error.classified_failures.values_list('id', flat=True)) == [
|
||||
expected.id
|
||||
]
|
||||
|
||||
for error_line, failure_line in zip(*expected_unclassified):
|
||||
assert error_line.classified_failures.count() == 0
|
||||
assert failure_line.error.classified_failures.count() == 0
|
||||
|
||||
|
||||
def test_no_autoclassify_job_success(text_log_errors_failure_lines,
|
||||
classified_failures,
|
||||
test_job_2):
|
||||
def test_no_autoclassify_job_success(
|
||||
text_log_errors_failure_lines, classified_failures, test_job_2
|
||||
):
|
||||
# Ensure autoclassification doesn't occur for successful jobs
|
||||
lines = [(test_line, {}),
|
||||
(test_line, {"subtest": "subtest2"}),
|
||||
(test_line, {"status": "TIMEOUT"}),
|
||||
(test_line, {"expected": "ERROR"}),
|
||||
(test_line, {"message": "message2"})]
|
||||
lines = [
|
||||
(test_line, {}),
|
||||
(test_line, {"subtest": "subtest2"}),
|
||||
(test_line, {"status": "TIMEOUT"}),
|
||||
(test_line, {"expected": "ERROR"}),
|
||||
(test_line, {"message": "message2"}),
|
||||
]
|
||||
test_error_lines, test_failure_lines = create_lines(test_job_2, lines)
|
||||
|
||||
do_autoclassify(test_job_2, test_failure_lines, [precise_matcher], status="success")
|
||||
|
@ -69,10 +70,11 @@ def test_no_autoclassify_job_success(text_log_errors_failure_lines,
|
|||
expected_classified = [], []
|
||||
expected_unclassified = test_error_lines, test_failure_lines
|
||||
|
||||
for (error_line, failure_line), expected in zip(zip(*expected_classified),
|
||||
classified_failures):
|
||||
for (error_line, failure_line), expected in zip(zip(*expected_classified), classified_failures):
|
||||
assert list(error_line.classified_failures.values_list('id', flat=True)) == [expected.id]
|
||||
assert list(failure_line.error.classified_failures.values_list('id', flat=True)) == [expected.id]
|
||||
assert list(failure_line.error.classified_failures.values_list('id', flat=True)) == [
|
||||
expected.id
|
||||
]
|
||||
|
||||
for error_line, failure_line in zip(*expected_unclassified):
|
||||
assert error_line.classified_failures.count() == 0
|
||||
|
@ -95,38 +97,32 @@ def test_autoclassify_update_job_classification(failure_lines, classified_failur
|
|||
assert BugJobMap.objects.filter(job=test_job_2).count() == 0
|
||||
|
||||
|
||||
def test_autoclassify_no_update_job_classification(test_job, test_job_2,
|
||||
text_log_errors_failure_lines,
|
||||
classified_failures):
|
||||
def test_autoclassify_no_update_job_classification(
|
||||
test_job, test_job_2, text_log_errors_failure_lines, classified_failures
|
||||
):
|
||||
|
||||
lines = [(test_line, {})]
|
||||
test_error_lines, test_failure_lines = create_lines(test_job_2, lines)
|
||||
TextLogError.objects.create(step=test_error_lines[0].step,
|
||||
line="Some error that isn't in the structured logs",
|
||||
line_number=2)
|
||||
TextLogError.objects.create(
|
||||
step=test_error_lines[0].step,
|
||||
line="Some error that isn't in the structured logs",
|
||||
line_number=2,
|
||||
)
|
||||
|
||||
do_autoclassify(test_job_2, test_failure_lines, [precise_matcher])
|
||||
|
||||
assert JobNote.objects.filter(job=test_job_2).count() == 0
|
||||
|
||||
|
||||
def test_autoclassified_after_manual_classification(test_user,
|
||||
test_job_2,
|
||||
text_log_errors_failure_lines,
|
||||
failure_classifications, bugs):
|
||||
def test_autoclassified_after_manual_classification(
|
||||
test_user, test_job_2, text_log_errors_failure_lines, failure_classifications, bugs
|
||||
):
|
||||
lines = [(test_line, {})]
|
||||
test_error_lines, test_failure_lines = create_lines(test_job_2, lines)
|
||||
bug = bugs.first()
|
||||
|
||||
BugJobMap.create(
|
||||
job_id=test_job_2.id,
|
||||
bug_id=bug.id,
|
||||
user=test_user
|
||||
)
|
||||
JobNote.objects.create(job=test_job_2,
|
||||
failure_classification_id=4,
|
||||
user=test_user,
|
||||
text="")
|
||||
BugJobMap.create(job_id=test_job_2.id, bug_id=bug.id, user=test_user)
|
||||
JobNote.objects.create(job=test_job_2, failure_classification_id=4, user=test_user, text="")
|
||||
|
||||
for error_line, failure_line in zip(test_error_lines, test_failure_lines):
|
||||
error_line.refresh_from_db()
|
||||
|
@ -145,17 +141,14 @@ def test_autoclassified_after_manual_classification(test_user,
|
|||
assert fl1.text_log_error_metadata.best_is_verified
|
||||
|
||||
|
||||
def test_autoclassified_no_update_after_manual_classification_1(test_job_2,
|
||||
test_user,
|
||||
failure_classifications):
|
||||
def test_autoclassified_no_update_after_manual_classification_1(
|
||||
test_job_2, test_user, failure_classifications
|
||||
):
|
||||
# Line type won't be detected by the matchers we have registered
|
||||
lines = [(log_line, {})]
|
||||
test_error_lines, test_failure_lines = create_lines(test_job_2, lines)
|
||||
|
||||
JobNote.objects.create(job=test_job_2,
|
||||
failure_classification_id=4,
|
||||
user=test_user,
|
||||
text="")
|
||||
JobNote.objects.create(job=test_job_2, failure_classification_id=4, user=test_user, text="")
|
||||
|
||||
for error_line, failure_line in zip(test_error_lines, test_failure_lines):
|
||||
error_line.refresh_from_db()
|
||||
|
@ -165,17 +158,15 @@ def test_autoclassified_no_update_after_manual_classification_1(test_job_2,
|
|||
assert not test_failure_lines[0].error.matches.all().exists()
|
||||
|
||||
|
||||
def test_autoclassified_no_update_after_manual_classification_2(test_user, test_job_2,
|
||||
failure_classifications):
|
||||
def test_autoclassified_no_update_after_manual_classification_2(
|
||||
test_user, test_job_2, failure_classifications
|
||||
):
|
||||
# Too many failure lines
|
||||
_, test_failure_lines = create_lines(test_job_2,
|
||||
[(log_line, {}),
|
||||
(test_line, {"subtest": "subtest2"})])
|
||||
_, test_failure_lines = create_lines(
|
||||
test_job_2, [(log_line, {}), (test_line, {"subtest": "subtest2"})]
|
||||
)
|
||||
|
||||
JobNote.objects.create(job=test_job_2,
|
||||
failure_classification_id=4,
|
||||
user=test_user,
|
||||
text="")
|
||||
JobNote.objects.create(job=test_job_2, failure_classification_id=4, user=test_user, text="")
|
||||
|
||||
for item in test_failure_lines:
|
||||
item.refresh_from_db()
|
||||
|
@ -183,18 +174,16 @@ def test_autoclassified_no_update_after_manual_classification_2(test_user, test_
|
|||
assert not test_failure_lines[0].error.matches.all().exists()
|
||||
|
||||
|
||||
def test_classify_skip_ignore(test_job_2,
|
||||
text_log_errors_failure_lines,
|
||||
classified_failures):
|
||||
def test_classify_skip_ignore(test_job_2, text_log_errors_failure_lines, classified_failures):
|
||||
|
||||
text_log_errors, failure_lines = text_log_errors_failure_lines
|
||||
text_log_errors[1].metadata.best_is_verified = True
|
||||
text_log_errors[1].metadata.best_classification = None
|
||||
text_log_errors[1].metadata.save()
|
||||
|
||||
_, test_failure_lines = create_lines(test_job_2,
|
||||
[(test_line, {}),
|
||||
(test_line, {"subtest": "subtest2"})])
|
||||
_, test_failure_lines = create_lines(
|
||||
test_job_2, [(test_line, {}), (test_line, {"subtest": "subtest2"})]
|
||||
)
|
||||
|
||||
do_autoclassify(test_job_2, test_failure_lines, [precise_matcher])
|
||||
|
||||
|
@ -209,9 +198,9 @@ def test_classify_skip_ignore(test_job_2,
|
|||
|
||||
|
||||
def test_classify_multiple(test_job_2, failure_lines, classified_failures):
|
||||
_, test_failure_lines = create_lines(test_job_2,
|
||||
[(test_line, {}),
|
||||
(test_line, {"message": "message 1.2"})])
|
||||
_, test_failure_lines = create_lines(
|
||||
test_job_2, [(test_line, {}), (test_line, {"message": "message 1.2"})]
|
||||
)
|
||||
|
||||
expected_classified_precise = [test_failure_lines[0]]
|
||||
|
||||
|
@ -223,27 +212,34 @@ def test_classify_multiple(test_job_2, failure_lines, classified_failures):
|
|||
|
||||
|
||||
def test_classify_crash(test_repository, test_job, test_job_2, test_matcher):
|
||||
error_lines_ref, failure_lines_ref = create_lines(test_job,
|
||||
[(crash_line, {})])
|
||||
error_lines_ref, failure_lines_ref = create_lines(test_job, [(crash_line, {})])
|
||||
|
||||
_, failure_lines = create_lines(test_job_2,
|
||||
[(crash_line, {}),
|
||||
(crash_line, {"test": "test1"}),
|
||||
(crash_line, {"signature": "signature1"}),
|
||||
(crash_line, {"signature": None})])
|
||||
_, failure_lines = create_lines(
|
||||
test_job_2,
|
||||
[
|
||||
(crash_line, {}),
|
||||
(crash_line, {"test": "test1"}),
|
||||
(crash_line, {"signature": "signature1"}),
|
||||
(crash_line, {"signature": None}),
|
||||
],
|
||||
)
|
||||
|
||||
classified_failure = ClassifiedFailure.objects.create()
|
||||
TextLogErrorMatch.objects.create(text_log_error=error_lines_ref[0],
|
||||
classified_failure=classified_failure,
|
||||
matcher_name=test_matcher.__class__.__name__,
|
||||
score=1.0)
|
||||
TextLogErrorMatch.objects.create(
|
||||
text_log_error=error_lines_ref[0],
|
||||
classified_failure=classified_failure,
|
||||
matcher_name=test_matcher.__class__.__name__,
|
||||
score=1.0,
|
||||
)
|
||||
do_autoclassify(test_job_2, failure_lines, [crash_signature_matcher])
|
||||
|
||||
expected_classified = failure_lines[0:2]
|
||||
expected_unclassified = failure_lines[2:]
|
||||
|
||||
for actual in expected_classified:
|
||||
assert list(actual.error.classified_failures.values_list('id', flat=True)) == [classified_failure.id]
|
||||
assert list(actual.error.classified_failures.values_list('id', flat=True)) == [
|
||||
classified_failure.id
|
||||
]
|
||||
|
||||
for item in expected_unclassified:
|
||||
assert item.error.classified_failures.count() == 0
|
||||
|
|
|
@ -4,12 +4,9 @@ from first import first
|
|||
|
||||
from treeherder.autoclassify.matchers import precise_matcher
|
||||
from treeherder.autoclassify.utils import score_matches
|
||||
from treeherder.model.models import (FailureLine,
|
||||
TextLogErrorMatch,
|
||||
TextLogErrorMetadata)
|
||||
from treeherder.model.models import FailureLine, TextLogErrorMatch, TextLogErrorMetadata
|
||||
|
||||
from .utils import (create_failure_lines,
|
||||
create_text_log_errors)
|
||||
from .utils import create_failure_lines, create_text_log_errors
|
||||
|
||||
|
||||
def test_precise_matcher_with_matches(classified_failures):
|
||||
|
|
|
@ -7,7 +7,7 @@ def test_time_boxed_enough_budget():
|
|||
an_iterable = range(3)
|
||||
|
||||
def quick_sleep(x):
|
||||
time.sleep(.1)
|
||||
time.sleep(0.1)
|
||||
return x
|
||||
|
||||
items = list(time_boxed(quick_sleep, an_iterable, time_budget=5000))
|
||||
|
|
|
@ -2,15 +2,23 @@ import datetime
|
|||
|
||||
from mozlog.formatters.tbplformatter import TbplFormatter
|
||||
|
||||
from treeherder.model.models import (FailureLine,
|
||||
Job,
|
||||
JobLog,
|
||||
TextLogError,
|
||||
TextLogErrorMetadata,
|
||||
TextLogStep)
|
||||
from treeherder.model.models import (
|
||||
FailureLine,
|
||||
Job,
|
||||
JobLog,
|
||||
TextLogError,
|
||||
TextLogErrorMetadata,
|
||||
TextLogStep,
|
||||
)
|
||||
|
||||
test_line = {"action": "test_result", "test": "test1", "subtest": "subtest1",
|
||||
"status": "FAIL", "expected": "PASS", "message": "message1"}
|
||||
test_line = {
|
||||
"action": "test_result",
|
||||
"test": "test1",
|
||||
"subtest": "subtest1",
|
||||
"status": "FAIL",
|
||||
"expected": "PASS",
|
||||
"message": "message1",
|
||||
}
|
||||
log_line = {"action": "log", "level": "ERROR", "message": "message1"}
|
||||
crash_line = {"action": "crash", "signature": "signature", "test": "test1"}
|
||||
group_line = {"action": "test_groups"}
|
||||
|
@ -21,8 +29,7 @@ def create_lines(test_job, lines):
|
|||
failure_lines = create_failure_lines(test_job, lines)
|
||||
|
||||
for error_line, failure_line in zip(error_lines, failure_lines):
|
||||
TextLogErrorMetadata.objects.create(text_log_error=error_line,
|
||||
failure_line=failure_line)
|
||||
TextLogErrorMetadata.objects.create(text_log_error=error_line, failure_line=failure_line)
|
||||
|
||||
test_job.autoclassify_status = Job.CROSSREFERENCED
|
||||
test_job.save()
|
||||
|
@ -30,13 +37,10 @@ def create_lines(test_job, lines):
|
|||
return error_lines, failure_lines
|
||||
|
||||
|
||||
def create_failure_lines(job, failure_line_list,
|
||||
start_line=0):
|
||||
def create_failure_lines(job, failure_line_list, start_line=0):
|
||||
failure_lines = []
|
||||
for i, (base_data, updates) in enumerate(failure_line_list[start_line:]):
|
||||
data = {"job_guid": job.guid,
|
||||
"repository": job.repository,
|
||||
"line": i + start_line}
|
||||
data = {"job_guid": job.guid, "repository": job.repository, "line": i + start_line}
|
||||
data.update(base_data)
|
||||
data.update(updates)
|
||||
failure_line = FailureLine(**data)
|
||||
|
@ -44,7 +48,7 @@ def create_failure_lines(job, failure_line_list,
|
|||
job=job,
|
||||
name='{}{}'.format(base_data.get('test'), job.id),
|
||||
url='bar{}'.format(i),
|
||||
status=1
|
||||
status=1,
|
||||
)
|
||||
print('create jobLog for job id: {}'.format(job.id))
|
||||
failure_line.job_log = job_log
|
||||
|
@ -82,7 +86,8 @@ def create_text_log_errors(job, failure_line_list):
|
|||
finished_line_number=10,
|
||||
started=datetime.datetime.now(),
|
||||
finished=datetime.datetime.now(),
|
||||
result=TextLogStep.TEST_FAILED)
|
||||
result=TextLogStep.TEST_FAILED,
|
||||
)
|
||||
|
||||
formatter = TbplFormatter()
|
||||
errors = []
|
||||
|
@ -90,9 +95,9 @@ def create_text_log_errors(job, failure_line_list):
|
|||
data = get_data(base_data, updates)
|
||||
if not data:
|
||||
continue
|
||||
error = TextLogError.objects.create(step=step,
|
||||
line=formatter(data).split("\n")[0],
|
||||
line_number=i)
|
||||
error = TextLogError.objects.create(
|
||||
step=step, line=formatter(data).split("\n")[0], line_number=i
|
||||
)
|
||||
errors.append(error)
|
||||
|
||||
return errors
|
||||
|
|
|
@ -6,7 +6,6 @@ from treeherder.client.thclient import PerfherderClient
|
|||
|
||||
|
||||
class PerfherderClientTest(unittest.TestCase):
|
||||
|
||||
@responses.activate
|
||||
def test_get_performance_signatures(self):
|
||||
pc = PerfherderClient()
|
||||
|
@ -14,34 +13,33 @@ class PerfherderClientTest(unittest.TestCase):
|
|||
content = {
|
||||
'signature1': {'cheezburgers': 1},
|
||||
'signature2': {'hamburgers': 2},
|
||||
'signature3': {'cheezburgers': 2}
|
||||
'signature3': {'cheezburgers': 2},
|
||||
}
|
||||
responses.add(responses.GET, url, json=content, match_querystring=True, status=200)
|
||||
|
||||
sigs = pc.get_performance_signatures('mozilla-central')
|
||||
self.assertEqual(len(sigs), 3)
|
||||
self.assertEqual(sigs.get_signature_hashes(), ['signature1',
|
||||
'signature2',
|
||||
'signature3'])
|
||||
self.assertEqual(sigs.get_property_names(),
|
||||
set(['cheezburgers', 'hamburgers']))
|
||||
self.assertEqual(sigs.get_signature_hashes(), ['signature1', 'signature2', 'signature3'])
|
||||
self.assertEqual(sigs.get_property_names(), set(['cheezburgers', 'hamburgers']))
|
||||
self.assertEqual(sigs.get_property_values('cheezburgers'), set([1, 2]))
|
||||
|
||||
@responses.activate
|
||||
def test_get_performance_data(self):
|
||||
pc = PerfherderClient()
|
||||
|
||||
url = '{}?{}'.format(pc._get_endpoint_url(pc.PERFORMANCE_DATA_ENDPOINT, project='mozilla-central'),
|
||||
'signatures=signature1&signatures=signature2')
|
||||
url = '{}?{}'.format(
|
||||
pc._get_endpoint_url(pc.PERFORMANCE_DATA_ENDPOINT, project='mozilla-central'),
|
||||
'signatures=signature1&signatures=signature2',
|
||||
)
|
||||
content = {
|
||||
'signature1': [{'value': 1}, {'value': 2}],
|
||||
'signature2': [{'value': 2}, {'value': 1}]
|
||||
'signature2': [{'value': 2}, {'value': 1}],
|
||||
}
|
||||
responses.add(responses.GET, url, json=content, match_querystring=True, status=200)
|
||||
|
||||
series_list = pc.get_performance_data('mozilla-central',
|
||||
signatures=['signature1',
|
||||
'signature2'])
|
||||
series_list = pc.get_performance_data(
|
||||
'mozilla-central', signatures=['signature1', 'signature2']
|
||||
)
|
||||
self.assertEqual(len(series_list), 2)
|
||||
self.assertEqual(series_list['signature1']['value'], [1, 2])
|
||||
self.assertEqual(series_list['signature2']['value'], [2, 1])
|
||||
|
|
|
@ -6,24 +6,16 @@ from treeherder.client.thclient import TreeherderClient
|
|||
|
||||
|
||||
class TreeherderClientTest(unittest.TestCase):
|
||||
JOB_RESULTS = [{"jobDetail1": 1},
|
||||
{"jobDetail2": 2},
|
||||
{"jobDetail3": 3}
|
||||
]
|
||||
PUSHES = [{"push1": 1},
|
||||
{"push2": 2},
|
||||
{"push3": 3}
|
||||
]
|
||||
JOB_RESULTS = [{"jobDetail1": 1}, {"jobDetail2": 2}, {"jobDetail3": 3}]
|
||||
PUSHES = [{"push1": 1}, {"push2": 2}, {"push3": 3}]
|
||||
|
||||
@responses.activate
|
||||
def test_get_job(self):
|
||||
tdc = TreeherderClient()
|
||||
url = tdc._get_endpoint_url(tdc.JOBS_ENDPOINT, project='mozilla-inbound')
|
||||
content = {
|
||||
"meta": {"count": 3,
|
||||
"repository": "mozilla-inbound",
|
||||
"offset": 0},
|
||||
"results": self.JOB_RESULTS
|
||||
"meta": {"count": 3, "repository": "mozilla-inbound", "offset": 0},
|
||||
"results": self.JOB_RESULTS,
|
||||
}
|
||||
responses.add(responses.GET, url, json=content, match_querystring=True, status=200)
|
||||
|
||||
|
@ -36,9 +28,8 @@ class TreeherderClientTest(unittest.TestCase):
|
|||
tdc = TreeherderClient()
|
||||
url = tdc._get_endpoint_url(tdc.PUSH_ENDPOINT, project='mozilla-inbound')
|
||||
content = {
|
||||
"meta": {"count": 3, "repository": "mozilla-inbound",
|
||||
"offset": 0},
|
||||
"results": self.PUSHES
|
||||
"meta": {"count": 3, "repository": "mozilla-inbound", "offset": 0},
|
||||
"results": self.PUSHES,
|
||||
}
|
||||
responses.add(responses.GET, url, json=content, match_querystring=True, status=200)
|
||||
|
||||
|
|
|
@ -14,19 +14,23 @@ from rest_framework.test import APIClient
|
|||
from treeherder.autoclassify.autoclassify import mark_best_classification
|
||||
from treeherder.etl.jobs import store_job_data
|
||||
from treeherder.etl.push import store_push_data
|
||||
from treeherder.model.models import (Commit,
|
||||
JobNote,
|
||||
Option,
|
||||
OptionCollection,
|
||||
Push,
|
||||
TextLogErrorMetadata,
|
||||
User)
|
||||
from treeherder.perf.models import (IssueTracker,
|
||||
PerformanceAlert,
|
||||
PerformanceAlertSummary,
|
||||
PerformanceDatum,
|
||||
PerformanceFramework,
|
||||
PerformanceSignature)
|
||||
from treeherder.model.models import (
|
||||
Commit,
|
||||
JobNote,
|
||||
Option,
|
||||
OptionCollection,
|
||||
Push,
|
||||
TextLogErrorMetadata,
|
||||
User,
|
||||
)
|
||||
from treeherder.perf.models import (
|
||||
IssueTracker,
|
||||
PerformanceAlert,
|
||||
PerformanceAlertSummary,
|
||||
PerformanceDatum,
|
||||
PerformanceFramework,
|
||||
PerformanceSignature,
|
||||
)
|
||||
from treeherder.services.pulse.exchange import get_exchange
|
||||
|
||||
IS_WINDOWS = "windows" in platform.system().lower()
|
||||
|
@ -34,9 +38,7 @@ IS_WINDOWS = "windows" in platform.system().lower()
|
|||
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption(
|
||||
"--runslow",
|
||||
action="store_true",
|
||||
help="run slow tests",
|
||||
"--runslow", action="store_true", help="run slow tests",
|
||||
)
|
||||
|
||||
|
||||
|
@ -51,6 +53,7 @@ def pytest_runtest_setup(item):
|
|||
pytest.skip("need --runslow option to run")
|
||||
|
||||
from django.core.cache import cache
|
||||
|
||||
cache.clear()
|
||||
|
||||
|
||||
|
@ -61,6 +64,7 @@ def block_unmocked_requests():
|
|||
|
||||
Helps avoid inadvertent dependencies on external resources during the test run.
|
||||
"""
|
||||
|
||||
def mocked_send(*args, **kwargs):
|
||||
raise RuntimeError('Tests must mock all HTTP requests!')
|
||||
|
||||
|
@ -78,6 +82,7 @@ def block_unmocked_requests():
|
|||
def sample_data():
|
||||
"""Returns a SampleData() object"""
|
||||
from .sampledata import SampleData
|
||||
|
||||
return SampleData()
|
||||
|
||||
|
||||
|
@ -94,26 +99,26 @@ def sample_push(sample_data):
|
|||
@pytest.fixture(name='create_push')
|
||||
def fixture_create_push():
|
||||
"""Return a function to create a push"""
|
||||
def create(repository,
|
||||
revision='4c45a777949168d16c03a4cba167678b7ab65f76',
|
||||
author='foo@bar.com'):
|
||||
|
||||
def create(
|
||||
repository, revision='4c45a777949168d16c03a4cba167678b7ab65f76', author='foo@bar.com'
|
||||
):
|
||||
return Push.objects.create(
|
||||
repository=repository,
|
||||
revision=revision,
|
||||
author=author,
|
||||
time=datetime.datetime.now())
|
||||
repository=repository, revision=revision, author=author, time=datetime.datetime.now()
|
||||
)
|
||||
|
||||
return create
|
||||
|
||||
|
||||
@pytest.fixture(name='create_commit')
|
||||
def fixture_create_commit():
|
||||
"""Return a function to create a commit"""
|
||||
|
||||
def create(push, comments='Bug 12345 - This is a message'):
|
||||
return Commit.objects.create(
|
||||
push=push,
|
||||
revision=push.revision,
|
||||
author=push.author,
|
||||
comments=comments)
|
||||
push=push, revision=push.revision, author=push.author, comments=comments
|
||||
)
|
||||
|
||||
return create
|
||||
|
||||
|
||||
|
@ -121,10 +126,7 @@ def fixture_create_commit():
|
|||
def test_repository(transactional_db):
|
||||
from treeherder.model.models import Repository, RepositoryGroup
|
||||
|
||||
RepositoryGroup.objects.create(
|
||||
name="development",
|
||||
description=""
|
||||
)
|
||||
RepositoryGroup.objects.create(name="development", description="")
|
||||
|
||||
r = Repository.objects.create(
|
||||
dvcs_type="hg",
|
||||
|
@ -143,8 +145,7 @@ def test_repository(transactional_db):
|
|||
@pytest.fixture
|
||||
def test_issue_tracker(transactional_db):
|
||||
return IssueTracker.objects.create(
|
||||
name="Bugzilla",
|
||||
task_base_url="https://bugzilla.mozilla.org/show_bug.cgi?id="
|
||||
name="Bugzilla", task_base_url="https://bugzilla.mozilla.org/show_bug.cgi?id="
|
||||
)
|
||||
|
||||
|
||||
|
@ -157,7 +158,8 @@ def test_repository_2(test_repository):
|
|||
name=test_repository.name + '_2',
|
||||
dvcs_type=test_repository.dvcs_type,
|
||||
url=test_repository.url + '_2',
|
||||
codebase=test_repository.codebase)
|
||||
codebase=test_repository.codebase,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -178,16 +180,16 @@ def fixture_create_jobs(test_repository, failure_classifications):
|
|||
def create(jobs):
|
||||
store_job_data(test_repository, jobs)
|
||||
return [Job.objects.get(id=i) for i in range(1, len(jobs) + 1)]
|
||||
|
||||
return create
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_job(eleven_job_blobs, create_jobs):
|
||||
job = eleven_job_blobs[0]
|
||||
job['job'].update({
|
||||
'taskcluster_task_id': 'V3SVuxO8TFy37En_6HcXLs',
|
||||
'taskcluster_retry_id': '0'
|
||||
})
|
||||
job['job'].update(
|
||||
{'taskcluster_task_id': 'V3SVuxO8TFy37En_6HcXLs', 'taskcluster_retry_id': '0'}
|
||||
)
|
||||
return create_jobs([job])[0]
|
||||
|
||||
|
||||
|
@ -263,10 +265,12 @@ def test_job_with_notes(test_job, test_user):
|
|||
"""test job with job notes."""
|
||||
|
||||
for failure_classification_id in [2, 3]:
|
||||
JobNote.objects.create(job=test_job,
|
||||
failure_classification_id=failure_classification_id,
|
||||
user=test_user,
|
||||
text="you look like a man-o-lantern")
|
||||
JobNote.objects.create(
|
||||
job=test_job,
|
||||
failure_classification_id=failure_classification_id,
|
||||
user=test_user,
|
||||
text="you look like a man-o-lantern",
|
||||
)
|
||||
|
||||
test_job.refresh_from_db()
|
||||
|
||||
|
@ -300,6 +304,7 @@ def pulse_connection():
|
|||
def pulse_exchange(pulse_connection, request):
|
||||
def build_exchange(name, create_exchange):
|
||||
return get_exchange(pulse_connection, name, create=create_exchange)
|
||||
|
||||
return build_exchange
|
||||
|
||||
|
||||
|
@ -307,26 +312,32 @@ def pulse_exchange(pulse_connection, request):
|
|||
def failure_lines(test_job):
|
||||
from tests.autoclassify.utils import test_line, create_failure_lines
|
||||
|
||||
return create_failure_lines(test_job,
|
||||
[(test_line, {}),
|
||||
(test_line, {"subtest": "subtest2"})])
|
||||
return create_failure_lines(test_job, [(test_line, {}), (test_line, {"subtest": "subtest2"})])
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def failure_line_logs(test_job):
|
||||
from tests.autoclassify.utils import test_line, create_failure_lines
|
||||
|
||||
return create_failure_lines(test_job,
|
||||
[(test_line, {'action': 'log', 'test': None}),
|
||||
(test_line, {'subtest': 'subtest2'})])
|
||||
return create_failure_lines(
|
||||
test_job,
|
||||
[(test_line, {'action': 'log', 'test': None}), (test_line, {'subtest': 'subtest2'})],
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def failure_classifications(transactional_db):
|
||||
from treeherder.model.models import FailureClassification
|
||||
for name in ["not classified", "fixed by commit", "expected fail",
|
||||
"intermittent", "infra", "intermittent needs filing",
|
||||
"autoclassified intermittent"]:
|
||||
|
||||
for name in [
|
||||
"not classified",
|
||||
"fixed by commit",
|
||||
"expected fail",
|
||||
"intermittent",
|
||||
"infra",
|
||||
"intermittent needs filing",
|
||||
"autoclassified intermittent",
|
||||
]:
|
||||
FailureClassification(name=name).save()
|
||||
|
||||
|
||||
|
@ -334,14 +345,12 @@ def failure_classifications(transactional_db):
|
|||
def text_log_errors_failure_lines(test_job, failure_lines):
|
||||
from tests.autoclassify.utils import test_line, create_text_log_errors
|
||||
|
||||
lines = [(test_line, {}),
|
||||
(test_line, {"subtest": "subtest2"})]
|
||||
lines = [(test_line, {}), (test_line, {"subtest": "subtest2"})]
|
||||
|
||||
text_log_errors = create_text_log_errors(test_job, lines)
|
||||
|
||||
for error_line, failure_line in zip(text_log_errors, failure_lines):
|
||||
TextLogErrorMetadata.objects.create(text_log_error=error_line,
|
||||
failure_line=failure_line)
|
||||
TextLogErrorMetadata.objects.create(text_log_error=error_line, failure_line=failure_line)
|
||||
|
||||
return text_log_errors, failure_lines
|
||||
|
||||
|
@ -352,8 +361,9 @@ def test_matcher(request):
|
|||
|
||||
|
||||
@pytest.fixture
|
||||
def classified_failures(test_job, text_log_errors_failure_lines, test_matcher,
|
||||
failure_classifications):
|
||||
def classified_failures(
|
||||
test_job, text_log_errors_failure_lines, test_matcher, failure_classifications
|
||||
):
|
||||
from treeherder.model.models import ClassifiedFailure
|
||||
|
||||
_, failure_lines = text_log_errors_failure_lines
|
||||
|
@ -375,9 +385,7 @@ def classified_failures(test_job, text_log_errors_failure_lines, test_matcher,
|
|||
@pytest.fixture
|
||||
def test_user(db):
|
||||
# a user *without* sheriff/staff permissions
|
||||
user = User.objects.create(username="testuser1",
|
||||
email='user@foo.com',
|
||||
is_staff=False)
|
||||
user = User.objects.create(username="testuser1", email='user@foo.com', is_staff=False)
|
||||
return user
|
||||
|
||||
|
||||
|
@ -387,45 +395,37 @@ def test_ldap_user(db):
|
|||
A user whose username matches those generated for LDAP SSO logins,
|
||||
and who does not have `is_staff` permissions.
|
||||
"""
|
||||
user = User.objects.create(username="mozilla-ldap/user@foo.com",
|
||||
email='user@foo.com',
|
||||
is_staff=False)
|
||||
user = User.objects.create(
|
||||
username="mozilla-ldap/user@foo.com", email='user@foo.com', is_staff=False
|
||||
)
|
||||
return user
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_sheriff(db):
|
||||
# a user *with* sheriff/staff permissions
|
||||
user = User.objects.create(username="testsheriff1",
|
||||
email='sheriff@foo.com',
|
||||
is_staff=True)
|
||||
user = User.objects.create(username="testsheriff1", email='sheriff@foo.com', is_staff=True)
|
||||
return user
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_perf_framework(transactional_db):
|
||||
return PerformanceFramework.objects.create(
|
||||
name='test_talos', enabled=True)
|
||||
return PerformanceFramework.objects.create(name='test_talos', enabled=True)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_perf_signature(test_repository, test_perf_framework):
|
||||
from treeherder.model.models import (MachinePlatform,
|
||||
Option,
|
||||
OptionCollection)
|
||||
from treeherder.model.models import MachinePlatform, Option, OptionCollection
|
||||
|
||||
option = Option.objects.create(name='opt')
|
||||
option_collection = OptionCollection.objects.create(
|
||||
option_collection_hash='my_option_hash',
|
||||
option=option)
|
||||
platform = MachinePlatform.objects.create(
|
||||
os_name='win',
|
||||
platform='win7',
|
||||
architecture='x86')
|
||||
option_collection_hash='my_option_hash', option=option
|
||||
)
|
||||
platform = MachinePlatform.objects.create(os_name='win', platform='win7', architecture='x86')
|
||||
|
||||
signature = PerformanceSignature.objects.create(
|
||||
repository=test_repository,
|
||||
signature_hash=(40*'t'),
|
||||
signature_hash=(40 * 't'),
|
||||
framework=test_perf_framework,
|
||||
platform=platform,
|
||||
option_collection=option_collection,
|
||||
|
@ -436,7 +436,7 @@ def test_perf_signature(test_repository, test_perf_framework):
|
|||
tags='warm pageload',
|
||||
extra_options='e10s opt',
|
||||
measurement_unit='ms',
|
||||
last_updated=datetime.datetime.now()
|
||||
last_updated=datetime.datetime.now(),
|
||||
)
|
||||
return signature
|
||||
|
||||
|
@ -445,7 +445,7 @@ def test_perf_signature(test_repository, test_perf_framework):
|
|||
def test_perf_signature_2(test_perf_signature):
|
||||
return PerformanceSignature.objects.create(
|
||||
repository=test_perf_signature.repository,
|
||||
signature_hash=(20*'t2'),
|
||||
signature_hash=(20 * 't2'),
|
||||
framework=test_perf_signature.framework,
|
||||
platform=test_perf_signature.platform,
|
||||
option_collection=test_perf_signature.option_collection,
|
||||
|
@ -453,7 +453,7 @@ def test_perf_signature_2(test_perf_signature):
|
|||
test='mytest2',
|
||||
has_subtests=test_perf_signature.has_subtests,
|
||||
extra_options=test_perf_signature.extra_options,
|
||||
last_updated=datetime.datetime.now()
|
||||
last_updated=datetime.datetime.now(),
|
||||
)
|
||||
|
||||
|
||||
|
@ -476,7 +476,7 @@ def test_perf_data(test_perf_signature, eleven_jobs_stored):
|
|||
job=job,
|
||||
push=job.push,
|
||||
repository=job.repository,
|
||||
signature=test_perf_signature
|
||||
signature=test_perf_signature,
|
||||
)
|
||||
perf_datum.push.time = job.push.time
|
||||
perf_datum.push.save()
|
||||
|
@ -496,17 +496,11 @@ def mock_bugzilla_api_request(monkeypatch):
|
|||
|
||||
def _fetch_json(url, params=None):
|
||||
tests_folder = os.path.dirname(__file__)
|
||||
bug_list_path = os.path.join(
|
||||
tests_folder,
|
||||
"sample_data",
|
||||
"bug_list.json"
|
||||
)
|
||||
bug_list_path = os.path.join(tests_folder, "sample_data", "bug_list.json")
|
||||
with open(bug_list_path) as f:
|
||||
return json.load(f)
|
||||
|
||||
monkeypatch.setattr(treeherder.etl.bugzilla,
|
||||
'fetch_json',
|
||||
_fetch_json)
|
||||
monkeypatch.setattr(treeherder.etl.bugzilla, 'fetch_json', _fetch_json)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -555,7 +549,8 @@ def test_perf_alert_summary(test_repository, push_stored, test_perf_framework, t
|
|||
prev_push_id=1,
|
||||
push_id=2,
|
||||
manually_created=False,
|
||||
created=datetime.datetime.now())
|
||||
created=datetime.datetime.now(),
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -563,14 +558,17 @@ def test_perf_alert_summary_2(test_perf_alert_summary):
|
|||
return PerformanceAlertSummary.objects.create(
|
||||
repository=test_perf_alert_summary.repository,
|
||||
framework=test_perf_alert_summary.framework,
|
||||
prev_push_id=test_perf_alert_summary.prev_push_id+1,
|
||||
push_id=test_perf_alert_summary.push_id+1,
|
||||
prev_push_id=test_perf_alert_summary.prev_push_id + 1,
|
||||
push_id=test_perf_alert_summary.push_id + 1,
|
||||
manually_created=False,
|
||||
created=datetime.datetime.now())
|
||||
created=datetime.datetime.now(),
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_perf_alert_summary_with_bug(test_repository, push_stored, test_perf_framework, test_issue_tracker):
|
||||
def test_perf_alert_summary_with_bug(
|
||||
test_repository, push_stored, test_perf_framework, test_issue_tracker
|
||||
):
|
||||
return PerformanceAlertSummary.objects.create(
|
||||
repository=test_repository,
|
||||
framework=test_perf_framework,
|
||||
|
@ -579,7 +577,8 @@ def test_perf_alert_summary_with_bug(test_repository, push_stored, test_perf_fra
|
|||
manually_created=False,
|
||||
created=datetime.datetime.now(),
|
||||
bug_number=123456,
|
||||
bug_updated=datetime.datetime.now())
|
||||
bug_updated=datetime.datetime.now(),
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -592,7 +591,8 @@ def test_perf_alert(test_perf_signature, test_perf_alert_summary):
|
|||
amount_abs=50.0,
|
||||
prev_value=100.0,
|
||||
new_value=150.0,
|
||||
t_value=20.0)
|
||||
t_value=20.0,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -605,7 +605,8 @@ def test_conflicting_perf_alert(test_perf_signature, test_perf_alert_summary_2):
|
|||
amount_abs=50.0,
|
||||
prev_value=100.0,
|
||||
new_value=150.0,
|
||||
t_value=20.0)
|
||||
t_value=20.0,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -618,7 +619,8 @@ def test_perf_alert_2(test_perf_alert, test_perf_signature_2, test_perf_alert_su
|
|||
amount_abs=50.0,
|
||||
prev_value=100.0,
|
||||
new_value=150.0,
|
||||
t_value=20.0)
|
||||
t_value=20.0,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -626,33 +628,34 @@ def generic_reference_data(test_repository):
|
|||
'''
|
||||
Generic reference data (if you want to create a bunch of mock jobs)
|
||||
'''
|
||||
from treeherder.model.models import (BuildPlatform,
|
||||
JobGroup,
|
||||
JobType,
|
||||
Machine,
|
||||
MachinePlatform,
|
||||
Option,
|
||||
OptionCollection,
|
||||
Product,
|
||||
ReferenceDataSignatures)
|
||||
from treeherder.model.models import (
|
||||
BuildPlatform,
|
||||
JobGroup,
|
||||
JobType,
|
||||
Machine,
|
||||
MachinePlatform,
|
||||
Option,
|
||||
OptionCollection,
|
||||
Product,
|
||||
ReferenceDataSignatures,
|
||||
)
|
||||
|
||||
class RefdataHolder:
|
||||
pass
|
||||
|
||||
r = RefdataHolder()
|
||||
|
||||
r.option = Option.objects.create(name='my_option')
|
||||
r.option_collection = OptionCollection.objects.create(
|
||||
option_collection_hash='my_option_hash',
|
||||
option=r.option)
|
||||
option_collection_hash='my_option_hash', option=r.option
|
||||
)
|
||||
r.option_collection_hash = r.option_collection.option_collection_hash
|
||||
r.machine_platform = MachinePlatform.objects.create(
|
||||
os_name="my_os",
|
||||
platform="my_platform",
|
||||
architecture="x86")
|
||||
os_name="my_os", platform="my_platform", architecture="x86"
|
||||
)
|
||||
r.build_platform = BuildPlatform.objects.create(
|
||||
os_name="my_os",
|
||||
platform="my_platform",
|
||||
architecture="x86")
|
||||
os_name="my_os", platform="my_platform", architecture="x86"
|
||||
)
|
||||
r.machine = Machine.objects.create(name='mymachine')
|
||||
r.job_group = JobGroup.objects.create(symbol='S', name='myjobgroup')
|
||||
r.job_type = JobType.objects.create(symbol='j', name='myjob')
|
||||
|
@ -673,22 +676,21 @@ def generic_reference_data(test_repository):
|
|||
option_collection_hash=r.option_collection_hash,
|
||||
build_system_type='buildbot',
|
||||
repository=test_repository.name,
|
||||
first_submission_timestamp=0)
|
||||
first_submission_timestamp=0,
|
||||
)
|
||||
|
||||
return r
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def bug_data(eleven_jobs_stored, test_repository, test_push, bugs):
|
||||
from treeherder.model.models import (Job,
|
||||
BugJobMap,
|
||||
Option)
|
||||
from treeherder.model.models import Job, BugJobMap, Option
|
||||
|
||||
jobs = Job.objects.all()
|
||||
bug_id = bugs[0].id
|
||||
job_id = jobs[0].id
|
||||
BugJobMap.create(job_id=job_id, bug_id=bug_id)
|
||||
query_string = '?startday=2012-05-09&endday=2018-05-10&tree={}'.format(
|
||||
test_repository.name)
|
||||
query_string = '?startday=2012-05-09&endday=2018-05-10&tree={}'.format(test_repository.name)
|
||||
|
||||
return {
|
||||
'tree': test_repository.name,
|
||||
|
@ -696,7 +698,7 @@ def bug_data(eleven_jobs_stored, test_repository, test_push, bugs):
|
|||
'bug_id': bug_id,
|
||||
'job': jobs[0],
|
||||
'jobs': jobs,
|
||||
'query_string': query_string
|
||||
'query_string': query_string,
|
||||
}
|
||||
|
||||
|
||||
|
@ -709,10 +711,7 @@ def test_run_data(bug_data):
|
|||
if push.time.strftime('%Y-%m-%d') == time:
|
||||
test_runs += 1
|
||||
|
||||
return {
|
||||
'test_runs': test_runs,
|
||||
'push_time': time
|
||||
}
|
||||
return {'test_runs': test_runs, 'push_time': time}
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -724,67 +723,67 @@ def generate_enough_perf_datum(test_repository, test_perf_signature):
|
|||
for (push_id, value) in zip([1] * 30 + [2] * 30, [1] * 30 + [2] * 30):
|
||||
# push_id == result_set_id == timestamp for purposes of this test
|
||||
push = Push.objects.get(id=push_id)
|
||||
PerformanceDatum.objects.create(repository=test_repository,
|
||||
result_set_id=push_id,
|
||||
push_id=push_id,
|
||||
signature=test_perf_signature,
|
||||
value=value,
|
||||
push_timestamp=push.time)
|
||||
PerformanceDatum.objects.create(
|
||||
repository=test_repository,
|
||||
result_set_id=push_id,
|
||||
push_id=push_id,
|
||||
signature=test_perf_signature,
|
||||
value=value,
|
||||
push_timestamp=push.time,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_option_collections(transactional_db):
|
||||
option1 = Option.objects.create(name='opt1')
|
||||
option2 = Option.objects.create(name='opt2')
|
||||
OptionCollection.objects.create(
|
||||
option_collection_hash='option_hash1',
|
||||
option=option1)
|
||||
OptionCollection.objects.create(
|
||||
option_collection_hash='option_hash2',
|
||||
option=option2)
|
||||
OptionCollection.objects.create(option_collection_hash='option_hash1', option=option1)
|
||||
OptionCollection.objects.create(option_collection_hash='option_hash2', option=option2)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def backfill_record_context():
|
||||
return {"data_points_to_retrigger": [
|
||||
{
|
||||
"perf_datum_id": 933219901,
|
||||
"value": 0.8714208119774209,
|
||||
"job_id": 269034923,
|
||||
"push_id": 565159,
|
||||
"push_timestamp": "2019-10-02 02:22:28",
|
||||
"push__revision": "04e8766a29242d4deae31b5b04e6ac61ebf61ffd"
|
||||
},
|
||||
{
|
||||
"perf_datum_id": 933219962,
|
||||
"value": 0.9160434865973892,
|
||||
"job_id": 269034920,
|
||||
"push_id": 565160,
|
||||
"push_timestamp": "2019-10-02 02:23:29",
|
||||
"push__revision": "9b42bdc4889fe7782df9b2a0aa990ed5e62cb04c"
|
||||
},
|
||||
{
|
||||
"perf_datum_id": 931772364,
|
||||
"value": 0.9508247997807697,
|
||||
"job_id": 268828343,
|
||||
"push_id": 565161,
|
||||
"push_timestamp": "2019-10-02 02:24:35",
|
||||
"push__revision": "057b59fdadad75e888a739e85a683b2ff7bfc62e"
|
||||
},
|
||||
{
|
||||
"perf_datum_id": 931924904,
|
||||
"value": 0.9829230628232519,
|
||||
"job_id": 268840223,
|
||||
"push_id": 565188,
|
||||
"push_timestamp": "2019-10-02 04:03:09",
|
||||
"push__revision": "49ef9afb62bb909389b105a1751e9b46e6f1688d"
|
||||
},
|
||||
{
|
||||
"perf_datum_id": 931927300,
|
||||
"value": 0.9873498499464002,
|
||||
"job_id": 268840309,
|
||||
"push_id": 565193,
|
||||
"push_timestamp": "2019-10-02 04:08:06",
|
||||
"push__revision": "f5cce52461bac31945b083e51a085fb429a36f04"
|
||||
}
|
||||
]}
|
||||
return {
|
||||
"data_points_to_retrigger": [
|
||||
{
|
||||
"perf_datum_id": 933219901,
|
||||
"value": 0.8714208119774209,
|
||||
"job_id": 269034923,
|
||||
"push_id": 565159,
|
||||
"push_timestamp": "2019-10-02 02:22:28",
|
||||
"push__revision": "04e8766a29242d4deae31b5b04e6ac61ebf61ffd",
|
||||
},
|
||||
{
|
||||
"perf_datum_id": 933219962,
|
||||
"value": 0.9160434865973892,
|
||||
"job_id": 269034920,
|
||||
"push_id": 565160,
|
||||
"push_timestamp": "2019-10-02 02:23:29",
|
||||
"push__revision": "9b42bdc4889fe7782df9b2a0aa990ed5e62cb04c",
|
||||
},
|
||||
{
|
||||
"perf_datum_id": 931772364,
|
||||
"value": 0.9508247997807697,
|
||||
"job_id": 268828343,
|
||||
"push_id": 565161,
|
||||
"push_timestamp": "2019-10-02 02:24:35",
|
||||
"push__revision": "057b59fdadad75e888a739e85a683b2ff7bfc62e",
|
||||
},
|
||||
{
|
||||
"perf_datum_id": 931924904,
|
||||
"value": 0.9829230628232519,
|
||||
"job_id": 268840223,
|
||||
"push_id": 565188,
|
||||
"push_timestamp": "2019-10-02 04:03:09",
|
||||
"push__revision": "49ef9afb62bb909389b105a1751e9b46e6f1688d",
|
||||
},
|
||||
{
|
||||
"perf_datum_id": 931927300,
|
||||
"value": 0.9873498499464002,
|
||||
"job_id": 268840309,
|
||||
"push_id": 565193,
|
||||
"push_timestamp": "2019-10-02 04:08:06",
|
||||
"push__revision": "f5cce52461bac31945b083e51a085fb429a36f04",
|
||||
},
|
||||
]
|
||||
}
|
||||
|
|
|
@ -30,9 +30,7 @@ def completed_job():
|
|||
|
||||
|
||||
@pytest.fixture
|
||||
def pending_jobs_stored(
|
||||
test_repository, failure_classifications, pending_job,
|
||||
push_stored):
|
||||
def pending_jobs_stored(test_repository, failure_classifications, pending_job, push_stored):
|
||||
"""
|
||||
stores a list of buildapi pending jobs into the jobs store
|
||||
"""
|
||||
|
@ -42,9 +40,7 @@ def pending_jobs_stored(
|
|||
|
||||
|
||||
@pytest.fixture
|
||||
def running_jobs_stored(
|
||||
test_repository, failure_classifications, running_job,
|
||||
push_stored):
|
||||
def running_jobs_stored(test_repository, failure_classifications, running_job, push_stored):
|
||||
"""
|
||||
stores a list of buildapi running jobs
|
||||
"""
|
||||
|
@ -54,9 +50,7 @@ def running_jobs_stored(
|
|||
|
||||
|
||||
@pytest.fixture
|
||||
def completed_jobs_stored(
|
||||
test_repository, failure_classifications, completed_job,
|
||||
push_stored):
|
||||
def completed_jobs_stored(test_repository, failure_classifications, completed_job, push_stored):
|
||||
"""
|
||||
stores a list of buildapi completed jobs
|
||||
"""
|
||||
|
|
|
@ -9,11 +9,7 @@ from tests.test_utils import add_log_response
|
|||
from treeherder.etl.jobs import store_job_data
|
||||
from treeherder.log_parser.parsers import StepParser
|
||||
from treeherder.model.error_summary import get_error_summary
|
||||
from treeherder.model.models import (Job,
|
||||
JobDetail,
|
||||
JobLog,
|
||||
TextLogError,
|
||||
TextLogStep)
|
||||
from treeherder.model.models import Job, JobDetail, JobLog, TextLogError, TextLogStep
|
||||
|
||||
# TODO: Turn these into end to end taskcluster tests as part of removing buildbot
|
||||
# support in bug 1443251, or else delete them if they're duplicating coverage.
|
||||
|
@ -24,25 +20,41 @@ def text_log_summary_dict():
|
|||
return {
|
||||
"step_data": {
|
||||
"steps": [
|
||||
{"name": "Clone gecko tc-vcs ",
|
||||
"started_linenumber": 1,
|
||||
"finished_linenumber": 100000,
|
||||
"started": "2016-07-13 16:09:31",
|
||||
"finished": "2016-07-13 16:09:31",
|
||||
"result": "testfailed",
|
||||
"errors": [
|
||||
{"line": "12:34:13 INFO - Assertion failure: addr % CellSize == 0, at ../../../js/src/gc/Heap.h:1041", "linenumber": 61918},
|
||||
{"line": "12:34:24 WARNING - TEST-UNEXPECTED-FAIL | file:///builds/slave/talos-slave/test/build/tests/jsreftest/tests/jsreftest.html?test=ecma_5/JSON/parse-array-gc.js | Exited with code 1 during test run", "linenumber": 61919},
|
||||
{"line": "12:34:37 WARNING - PROCESS-CRASH | file:///builds/slave/talos-slave/test/build/tests/jsreftest/tests/jsreftest.html?test=ecma_5/JSON/parse-array-gc.js | application crashed [@ js::gc::Cell::tenuredZone() const]", "linenumber": 61922},
|
||||
{"line": "12:34:38 ERROR - Return code: 256", "linenumber": 64435}
|
||||
]},
|
||||
{"name": "Build ./build-b2g-desktop.sh /home/worker/workspace", "started_linenumber": 1, "finished_linenumber": 1, "result": "success",
|
||||
"started": "2016-07-13 16:09:31",
|
||||
"finished": "2016-07-13 16:09:31"}
|
||||
{
|
||||
"name": "Clone gecko tc-vcs ",
|
||||
"started_linenumber": 1,
|
||||
"finished_linenumber": 100000,
|
||||
"started": "2016-07-13 16:09:31",
|
||||
"finished": "2016-07-13 16:09:31",
|
||||
"result": "testfailed",
|
||||
"errors": [
|
||||
{
|
||||
"line": "12:34:13 INFO - Assertion failure: addr % CellSize == 0, at ../../../js/src/gc/Heap.h:1041",
|
||||
"linenumber": 61918,
|
||||
},
|
||||
{
|
||||
"line": "12:34:24 WARNING - TEST-UNEXPECTED-FAIL | file:///builds/slave/talos-slave/test/build/tests/jsreftest/tests/jsreftest.html?test=ecma_5/JSON/parse-array-gc.js | Exited with code 1 during test run",
|
||||
"linenumber": 61919,
|
||||
},
|
||||
{
|
||||
"line": "12:34:37 WARNING - PROCESS-CRASH | file:///builds/slave/talos-slave/test/build/tests/jsreftest/tests/jsreftest.html?test=ecma_5/JSON/parse-array-gc.js | application crashed [@ js::gc::Cell::tenuredZone() const]",
|
||||
"linenumber": 61922,
|
||||
},
|
||||
{"line": "12:34:38 ERROR - Return code: 256", "linenumber": 64435},
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "Build ./build-b2g-desktop.sh /home/worker/workspace",
|
||||
"started_linenumber": 1,
|
||||
"finished_linenumber": 1,
|
||||
"result": "success",
|
||||
"started": "2016-07-13 16:09:31",
|
||||
"finished": "2016-07-13 16:09:31",
|
||||
},
|
||||
],
|
||||
"errors_truncated": False
|
||||
"errors_truncated": False,
|
||||
},
|
||||
"logurl": "https://queue.taskcluster.net/v1/task/nhxC4hC3RE6LSVWTZT4rag/runs/0/artifacts/public/logs/live_backing.log"
|
||||
"logurl": "https://queue.taskcluster.net/v1/task/nhxC4hC3RE6LSVWTZT4rag/runs/0/artifacts/public/logs/live_backing.log",
|
||||
}
|
||||
|
||||
|
||||
|
@ -52,8 +64,9 @@ def check_job_log(test_repository, job_guid, parse_status):
|
|||
assert job_logs[0].status == parse_status
|
||||
|
||||
|
||||
def test_store_job_with_unparsed_log(test_repository, failure_classifications,
|
||||
push_stored, monkeypatch, activate_responses):
|
||||
def test_store_job_with_unparsed_log(
|
||||
test_repository, failure_classifications, push_stored, monkeypatch, activate_responses
|
||||
):
|
||||
"""
|
||||
test submitting a job with an unparsed log parses the log,
|
||||
generates an appropriate set of text log steps, and calls
|
||||
|
@ -62,11 +75,10 @@ def test_store_job_with_unparsed_log(test_repository, failure_classifications,
|
|||
|
||||
# create a wrapper around get_error_summary that records whether
|
||||
# it's been called
|
||||
mock_get_error_summary = MagicMock(name='get_error_summary',
|
||||
wraps=get_error_summary)
|
||||
mock_get_error_summary = MagicMock(name='get_error_summary', wraps=get_error_summary)
|
||||
import treeherder.model.error_summary
|
||||
monkeypatch.setattr(treeherder.model.error_summary, 'get_error_summary',
|
||||
mock_get_error_summary)
|
||||
|
||||
monkeypatch.setattr(treeherder.model.error_summary, 'get_error_summary', mock_get_error_summary)
|
||||
log_url = add_log_response("mozilla-central-macosx64-debug-bm65-build1-build15.txt.gz")
|
||||
|
||||
job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
|
||||
|
@ -76,12 +88,10 @@ def test_store_job_with_unparsed_log(test_repository, failure_classifications,
|
|||
'job': {
|
||||
'job_guid': job_guid,
|
||||
'state': 'completed',
|
||||
'log_references': [{
|
||||
'url': log_url,
|
||||
'name': 'buildbot_text',
|
||||
'parse_status': 'pending'
|
||||
}]
|
||||
}
|
||||
'log_references': [
|
||||
{'url': log_url, 'name': 'buildbot_text', 'parse_status': 'pending'}
|
||||
],
|
||||
},
|
||||
}
|
||||
store_job_data(test_repository, [job_data])
|
||||
|
||||
|
@ -94,9 +104,9 @@ def test_store_job_with_unparsed_log(test_repository, failure_classifications,
|
|||
assert len(get_error_summary(Job.objects.get(id=1))) == 2
|
||||
|
||||
|
||||
def test_store_job_pending_to_completed_with_unparsed_log(test_repository, push_stored,
|
||||
failure_classifications,
|
||||
activate_responses):
|
||||
def test_store_job_pending_to_completed_with_unparsed_log(
|
||||
test_repository, push_stored, failure_classifications, activate_responses
|
||||
):
|
||||
|
||||
job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
|
||||
|
||||
|
@ -104,10 +114,7 @@ def test_store_job_pending_to_completed_with_unparsed_log(test_repository, push_
|
|||
job_data = {
|
||||
'project': test_repository.name,
|
||||
'revision': push_stored[0]['revision'],
|
||||
'job': {
|
||||
'job_guid': job_guid,
|
||||
'state': 'running'
|
||||
}
|
||||
'job': {'job_guid': job_guid, 'state': 'running'},
|
||||
}
|
||||
store_job_data(test_repository, [job_data])
|
||||
# should have no text log errors or bug suggestions
|
||||
|
@ -122,12 +129,10 @@ def test_store_job_pending_to_completed_with_unparsed_log(test_repository, push_
|
|||
'job': {
|
||||
'job_guid': job_guid,
|
||||
'state': 'completed',
|
||||
'log_references': [{
|
||||
'url': log_url,
|
||||
'name': 'buildbot_text',
|
||||
'parse_status': 'pending'
|
||||
}]
|
||||
}
|
||||
'log_references': [
|
||||
{'url': log_url, 'name': 'buildbot_text', 'parse_status': 'pending'}
|
||||
],
|
||||
},
|
||||
}
|
||||
store_job_data(test_repository, [job_data])
|
||||
|
||||
|
@ -136,9 +141,9 @@ def test_store_job_pending_to_completed_with_unparsed_log(test_repository, push_
|
|||
assert len(get_error_summary(Job.objects.get(guid=job_guid))) == 2
|
||||
|
||||
|
||||
def test_store_job_with_parsed_log(test_repository, push_stored,
|
||||
failure_classifications,
|
||||
monkeypatch):
|
||||
def test_store_job_with_parsed_log(
|
||||
test_repository, push_stored, failure_classifications, monkeypatch
|
||||
):
|
||||
"""
|
||||
test submitting a job with a pre-parsed log gets job_log_url
|
||||
parse_status of "parsed" and does not parse, even though no text_log_summary
|
||||
|
@ -157,12 +162,14 @@ def test_store_job_with_parsed_log(test_repository, push_stored,
|
|||
'job': {
|
||||
'job_guid': job_guid,
|
||||
'state': 'completed',
|
||||
'log_references': [{
|
||||
'url': 'http://ftp.mozilla.org/pub/mozilla.org/spidermonkey/...',
|
||||
'name': 'buildbot_text',
|
||||
'parse_status': 'parsed'
|
||||
}]
|
||||
}
|
||||
'log_references': [
|
||||
{
|
||||
'url': 'http://ftp.mozilla.org/pub/mozilla.org/spidermonkey/...',
|
||||
'name': 'buildbot_text',
|
||||
'parse_status': 'parsed',
|
||||
}
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
store_job_data(test_repository, [job_data])
|
||||
|
@ -172,12 +179,8 @@ def test_store_job_with_parsed_log(test_repository, push_stored,
|
|||
|
||||
|
||||
def test_store_job_with_text_log_summary_artifact_parsed(
|
||||
test_repository,
|
||||
failure_classifications,
|
||||
push_stored,
|
||||
monkeypatch,
|
||||
text_log_summary_dict,
|
||||
):
|
||||
test_repository, failure_classifications, push_stored, monkeypatch, text_log_summary_dict,
|
||||
):
|
||||
"""
|
||||
test submitting a job with a pre-parsed log gets parse_status of
|
||||
"parsed" and doesn't parse the log, but we get the expected set of
|
||||
|
@ -194,18 +197,22 @@ def test_store_job_with_text_log_summary_artifact_parsed(
|
|||
'job': {
|
||||
'job_guid': job_guid,
|
||||
'state': 'completed',
|
||||
'log_references': [{
|
||||
'url': 'http://ftp.mozilla.org/pub/mozilla.org/spidermonkey/...',
|
||||
'name': 'buildbot_text',
|
||||
'parse_status': 'parsed'
|
||||
}],
|
||||
'artifacts': [{
|
||||
"blob": json.dumps(text_log_summary_dict),
|
||||
"type": "json",
|
||||
"name": "text_log_summary",
|
||||
"job_guid": job_guid
|
||||
}]
|
||||
}
|
||||
'log_references': [
|
||||
{
|
||||
'url': 'http://ftp.mozilla.org/pub/mozilla.org/spidermonkey/...',
|
||||
'name': 'buildbot_text',
|
||||
'parse_status': 'parsed',
|
||||
}
|
||||
],
|
||||
'artifacts': [
|
||||
{
|
||||
"blob": json.dumps(text_log_summary_dict),
|
||||
"type": "json",
|
||||
"name": "text_log_summary",
|
||||
"job_guid": job_guid,
|
||||
}
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
store_job_data(test_repository, [job_data])
|
||||
|
@ -218,12 +225,8 @@ def test_store_job_with_text_log_summary_artifact_parsed(
|
|||
|
||||
|
||||
def test_store_job_with_text_log_summary_artifact_pending(
|
||||
test_repository,
|
||||
failure_classifications,
|
||||
push_stored,
|
||||
monkeypatch,
|
||||
text_log_summary_dict,
|
||||
):
|
||||
test_repository, failure_classifications, push_stored, monkeypatch, text_log_summary_dict,
|
||||
):
|
||||
"""
|
||||
test submitting a job with a log set to pending, but with a text_log_summary.
|
||||
|
||||
|
@ -241,18 +244,22 @@ def test_store_job_with_text_log_summary_artifact_pending(
|
|||
'job': {
|
||||
'job_guid': job_guid,
|
||||
'state': 'completed',
|
||||
'log_references': [{
|
||||
'url': 'http://ftp.mozilla.org/pub/mozilla.org/spidermonkey/...',
|
||||
'name': 'buildbot_text',
|
||||
'parse_status': 'pending'
|
||||
}],
|
||||
'artifacts': [{
|
||||
"blob": json.dumps(text_log_summary_dict),
|
||||
"type": "json",
|
||||
"name": "text_log_summary",
|
||||
"job_guid": job_guid
|
||||
}]
|
||||
}
|
||||
'log_references': [
|
||||
{
|
||||
'url': 'http://ftp.mozilla.org/pub/mozilla.org/spidermonkey/...',
|
||||
'name': 'buildbot_text',
|
||||
'parse_status': 'pending',
|
||||
}
|
||||
],
|
||||
'artifacts': [
|
||||
{
|
||||
"blob": json.dumps(text_log_summary_dict),
|
||||
"type": "json",
|
||||
"name": "text_log_summary",
|
||||
"job_guid": job_guid,
|
||||
}
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
store_job_data(test_repository, [job_data])
|
||||
|
@ -265,11 +272,8 @@ def test_store_job_with_text_log_summary_artifact_pending(
|
|||
|
||||
|
||||
def test_store_job_artifacts_by_add_artifact(
|
||||
test_repository,
|
||||
failure_classifications,
|
||||
push_stored,
|
||||
monkeypatch,
|
||||
):
|
||||
test_repository, failure_classifications, push_stored, monkeypatch,
|
||||
):
|
||||
"""
|
||||
test submitting a job with artifacts added by ``add_artifact``
|
||||
|
||||
|
@ -283,25 +287,31 @@ def test_store_job_artifacts_by_add_artifact(
|
|||
mock_parse = MagicMock(name="parse_line")
|
||||
monkeypatch.setattr(StepParser, 'parse_line', mock_parse)
|
||||
|
||||
tls_blob = json.dumps({
|
||||
"logurl": "https://autophone-dev.s3.amazonaws.com/pub/mozilla.org/mobile/tinderbox-builds/mozilla-inbound-android-api-9/1432676531/en-US/autophone-autophone-s1s2-s1s2-nytimes-local.ini-1-nexus-one-1.log",
|
||||
"step_data": {
|
||||
"steps": [{
|
||||
"name": "foobar",
|
||||
"result": "testfailed",
|
||||
"started_linenumber": 1,
|
||||
"finished_linenumber": 100000,
|
||||
"started": "2016-07-13 16:09:31",
|
||||
"finished": "2016-07-13 16:09:31",
|
||||
"errors": [
|
||||
{"line": "TEST_UNEXPECTED_FAIL | /sdcard/tests/autophone/s1s2test/nytimes.com/index.html | Failed to get uncached measurement.", "linenumber": 64435}
|
||||
tls_blob = json.dumps(
|
||||
{
|
||||
"logurl": "https://autophone-dev.s3.amazonaws.com/pub/mozilla.org/mobile/tinderbox-builds/mozilla-inbound-android-api-9/1432676531/en-US/autophone-autophone-s1s2-s1s2-nytimes-local.ini-1-nexus-one-1.log",
|
||||
"step_data": {
|
||||
"steps": [
|
||||
{
|
||||
"name": "foobar",
|
||||
"result": "testfailed",
|
||||
"started_linenumber": 1,
|
||||
"finished_linenumber": 100000,
|
||||
"started": "2016-07-13 16:09:31",
|
||||
"finished": "2016-07-13 16:09:31",
|
||||
"errors": [
|
||||
{
|
||||
"line": "TEST_UNEXPECTED_FAIL | /sdcard/tests/autophone/s1s2test/nytimes.com/index.html | Failed to get uncached measurement.",
|
||||
"linenumber": 64435,
|
||||
}
|
||||
],
|
||||
}
|
||||
]
|
||||
}]
|
||||
},
|
||||
}
|
||||
})
|
||||
)
|
||||
|
||||
ji_blob = json.dumps({"job_details": [{"title": "mytitle",
|
||||
"value": "myvalue"}]})
|
||||
ji_blob = json.dumps({"job_details": [{"title": "mytitle", "value": "myvalue"}]})
|
||||
pb_blob = json.dumps({"build_url": "feh", "chunk": 1, "config_file": "mah"})
|
||||
|
||||
job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
|
||||
|
@ -316,25 +326,15 @@ def test_store_job_artifacts_by_add_artifact(
|
|||
'blob': tls_blob,
|
||||
'job_guid': job_guid,
|
||||
},
|
||||
{
|
||||
'name': 'Job Info',
|
||||
'type': 'json',
|
||||
'blob': ji_blob,
|
||||
'job_guid': job_guid,
|
||||
},
|
||||
{
|
||||
'name': 'privatebuild',
|
||||
'type': 'json',
|
||||
'blob': pb_blob,
|
||||
'job_guid': job_guid,
|
||||
},
|
||||
{'name': 'Job Info', 'type': 'json', 'blob': ji_blob, 'job_guid': job_guid,},
|
||||
{'name': 'privatebuild', 'type': 'json', 'blob': pb_blob, 'job_guid': job_guid,},
|
||||
],
|
||||
"job_guid": job_guid,
|
||||
"log_references": [
|
||||
{
|
||||
"name": "autophone-nexus-one-1.log",
|
||||
"parse_status": "parsed",
|
||||
"url": "https://autophone-dev.s3.amazonaws.com/pub/mozilla.org/mobile/tinderbox-builds/mozilla-inbound-android-api-9/1432676531/en-US/autophone-autophone-s1s2-s1s2-nytimes-local.ini-1-nexus-one-1.log"
|
||||
"url": "https://autophone-dev.s3.amazonaws.com/pub/mozilla.org/mobile/tinderbox-builds/mozilla-inbound-android-api-9/1432676531/en-US/autophone-autophone-s1s2-s1s2-nytimes-local.ini-1-nexus-one-1.log",
|
||||
}
|
||||
],
|
||||
"state": "completed",
|
||||
|
@ -349,7 +349,7 @@ def test_store_job_artifacts_by_add_artifact(
|
|||
'job': 1,
|
||||
'title': 'mytitle',
|
||||
'value': 'myvalue',
|
||||
'url': None
|
||||
'url': None,
|
||||
}
|
||||
|
||||
assert TextLogStep.objects.count() == 1
|
||||
|
@ -361,7 +361,7 @@ def test_store_job_artifacts_by_add_artifact(
|
|||
'name': 'foobar',
|
||||
'result': 1,
|
||||
'started_line_number': 1,
|
||||
'finished_line_number': 100000
|
||||
'finished_line_number': 100000,
|
||||
}
|
||||
|
||||
assert TextLogError.objects.count() == 1
|
||||
|
@ -388,11 +388,7 @@ def test_store_job_with_tier(test_repository, failure_classifications, push_stor
|
|||
job_data = {
|
||||
'project': test_repository.name,
|
||||
'revision': push_stored[0]['revision'],
|
||||
'job': {
|
||||
'job_guid': job_guid,
|
||||
'state': 'completed',
|
||||
'tier': 3,
|
||||
}
|
||||
'job': {'job_guid': job_guid, 'state': 'completed', 'tier': 3,},
|
||||
}
|
||||
|
||||
store_job_data(test_repository, [job_data])
|
||||
|
@ -407,10 +403,7 @@ def test_store_job_with_default_tier(test_repository, failure_classifications, p
|
|||
job_data = {
|
||||
'project': test_repository.name,
|
||||
'revision': push_stored[0]['revision'],
|
||||
'job': {
|
||||
'job_guid': job_guid,
|
||||
'state': 'completed',
|
||||
}
|
||||
'job': {'job_guid': job_guid, 'state': 'completed',},
|
||||
}
|
||||
|
||||
store_job_data(test_repository, [job_data])
|
||||
|
|
|
@ -2,9 +2,7 @@ from django.urls import reverse
|
|||
|
||||
|
||||
def test_pending_job_available(test_repository, pending_jobs_stored, client):
|
||||
resp = client.get(
|
||||
reverse("jobs-list", kwargs={"project": test_repository.name})
|
||||
)
|
||||
resp = client.get(reverse("jobs-list", kwargs={"project": test_repository.name}))
|
||||
assert resp.status_code == 200
|
||||
jobs = resp.json()
|
||||
|
||||
|
@ -14,9 +12,7 @@ def test_pending_job_available(test_repository, pending_jobs_stored, client):
|
|||
|
||||
|
||||
def test_running_job_available(test_repository, running_jobs_stored, client):
|
||||
resp = client.get(
|
||||
reverse("jobs-list", kwargs={"project": test_repository.name})
|
||||
)
|
||||
resp = client.get(reverse("jobs-list", kwargs={"project": test_repository.name}))
|
||||
assert resp.status_code == 200
|
||||
jobs = resp.json()
|
||||
|
||||
|
@ -26,9 +22,7 @@ def test_running_job_available(test_repository, running_jobs_stored, client):
|
|||
|
||||
|
||||
def test_completed_job_available(test_repository, completed_jobs_stored, client):
|
||||
resp = client.get(
|
||||
reverse("jobs-list", kwargs={"project": test_repository.name})
|
||||
)
|
||||
resp = client.get(reverse("jobs-list", kwargs={"project": test_repository.name}))
|
||||
assert resp.status_code == 200
|
||||
jobs = resp.json()
|
||||
|
||||
|
@ -36,16 +30,15 @@ def test_completed_job_available(test_repository, completed_jobs_stored, client)
|
|||
assert jobs['results'][0]['state'] == 'completed'
|
||||
|
||||
|
||||
def test_pending_stored_to_running_loaded(test_repository, pending_jobs_stored,
|
||||
running_jobs_stored, client):
|
||||
def test_pending_stored_to_running_loaded(
|
||||
test_repository, pending_jobs_stored, running_jobs_stored, client
|
||||
):
|
||||
"""
|
||||
tests a job transition from pending to running
|
||||
given a loaded pending job, if I store and load the same job with status running,
|
||||
the latter is shown in the jobs endpoint
|
||||
"""
|
||||
resp = client.get(
|
||||
reverse("jobs-list", kwargs={"project": test_repository.name})
|
||||
)
|
||||
resp = client.get(reverse("jobs-list", kwargs={"project": test_repository.name}))
|
||||
assert resp.status_code == 200
|
||||
jobs = resp.json()
|
||||
|
||||
|
@ -53,14 +46,13 @@ def test_pending_stored_to_running_loaded(test_repository, pending_jobs_stored,
|
|||
assert jobs['results'][0]['state'] == 'running'
|
||||
|
||||
|
||||
def test_finished_job_to_running(test_repository, completed_jobs_stored,
|
||||
running_jobs_stored, client):
|
||||
def test_finished_job_to_running(
|
||||
test_repository, completed_jobs_stored, running_jobs_stored, client
|
||||
):
|
||||
"""
|
||||
tests that a job finished cannot change state
|
||||
"""
|
||||
resp = client.get(
|
||||
reverse("jobs-list", kwargs={"project": test_repository.name})
|
||||
)
|
||||
resp = client.get(reverse("jobs-list", kwargs={"project": test_repository.name}))
|
||||
assert resp.status_code == 200
|
||||
jobs = resp.json()
|
||||
|
||||
|
@ -68,15 +60,12 @@ def test_finished_job_to_running(test_repository, completed_jobs_stored,
|
|||
assert jobs['results'][0]['state'] == 'completed'
|
||||
|
||||
|
||||
def test_running_job_to_pending(test_repository, running_jobs_stored,
|
||||
pending_jobs_stored, client):
|
||||
def test_running_job_to_pending(test_repository, running_jobs_stored, pending_jobs_stored, client):
|
||||
"""
|
||||
tests that a job transition from pending to running
|
||||
cannot happen
|
||||
"""
|
||||
resp = client.get(
|
||||
reverse("jobs-list", kwargs={"project": test_repository.name})
|
||||
)
|
||||
resp = client.get(reverse("jobs-list", kwargs={"project": test_repository.name}))
|
||||
assert resp.status_code == 200
|
||||
jobs = resp.json()
|
||||
|
||||
|
|
|
@ -1,9 +1,7 @@
|
|||
import copy
|
||||
|
||||
from treeherder.etl.jobs import store_job_data
|
||||
from treeherder.perf.models import (PerformanceDatum,
|
||||
PerformanceFramework,
|
||||
PerformanceSignature)
|
||||
from treeherder.perf.models import PerformanceDatum, PerformanceFramework, PerformanceSignature
|
||||
|
||||
# TODO: Turn these into end to end taskcluster tests as part of removing buildbot
|
||||
# support in bug 1443251, or else delete them if they're duplicating coverage.
|
||||
|
@ -21,25 +19,29 @@ def test_store_perf_artifact(test_repository, failure_classifications, push_stor
|
|||
'state': 'completed',
|
||||
'project': test_repository.name,
|
||||
'option_collection': {'opt': True},
|
||||
'artifacts': [{
|
||||
'blob': {
|
||||
"performance_data": {
|
||||
"framework": {"name": "cheezburger"},
|
||||
"suites": [{
|
||||
"name": "cheezburger metrics",
|
||||
"value": 10.0,
|
||||
"subtests": [
|
||||
{"name": "test1", "value": 20.0},
|
||||
{"name": "test2", "value": 30.0}
|
||||
]
|
||||
}]
|
||||
}
|
||||
},
|
||||
'type': 'json',
|
||||
'name': 'performance_data',
|
||||
'job_guid': job_guid
|
||||
}]
|
||||
}
|
||||
'artifacts': [
|
||||
{
|
||||
'blob': {
|
||||
"performance_data": {
|
||||
"framework": {"name": "cheezburger"},
|
||||
"suites": [
|
||||
{
|
||||
"name": "cheezburger metrics",
|
||||
"value": 10.0,
|
||||
"subtests": [
|
||||
{"name": "test1", "value": 20.0},
|
||||
{"name": "test2", "value": 30.0},
|
||||
],
|
||||
}
|
||||
],
|
||||
}
|
||||
},
|
||||
'type': 'json',
|
||||
'name': 'performance_data',
|
||||
'job_guid': job_guid,
|
||||
}
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
store_job_data(test_repository, [job_data])
|
||||
|
@ -54,14 +56,13 @@ def test_store_perf_artifact_multiple(test_repository, failure_classifications,
|
|||
PerformanceFramework.objects.get_or_create(name='cheezburger', enabled=True)
|
||||
perfobj = {
|
||||
"framework": {"name": "cheezburger"},
|
||||
"suites": [{
|
||||
"name": "cheezburger metrics",
|
||||
"value": 10.0,
|
||||
"subtests": [
|
||||
{"name": "test1", "value": 20.0},
|
||||
{"name": "test2", "value": 30.0}
|
||||
]
|
||||
}]
|
||||
"suites": [
|
||||
{
|
||||
"name": "cheezburger metrics",
|
||||
"value": 10.0,
|
||||
"subtests": [{"name": "test1", "value": 20.0}, {"name": "test2", "value": 30.0}],
|
||||
}
|
||||
],
|
||||
}
|
||||
perfobj2 = copy.deepcopy(perfobj)
|
||||
perfobj2['suites'][0]['name'] = "cheezburger metrics 2"
|
||||
|
@ -74,15 +75,15 @@ def test_store_perf_artifact_multiple(test_repository, failure_classifications,
|
|||
'state': 'completed',
|
||||
'project': test_repository.name,
|
||||
'option_collection': {'opt': True},
|
||||
'artifacts': [{
|
||||
'blob': {
|
||||
"performance_data": [perfobj, perfobj2]
|
||||
},
|
||||
'type': 'json',
|
||||
'name': 'performance_data',
|
||||
'job_guid': job_guid
|
||||
}]
|
||||
}
|
||||
'artifacts': [
|
||||
{
|
||||
'blob': {"performance_data": [perfobj, perfobj2]},
|
||||
'type': 'json',
|
||||
'name': 'performance_data',
|
||||
'job_guid': job_guid,
|
||||
}
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
store_job_data(test_repository, [job_data])
|
||||
|
|
|
@ -12,10 +12,12 @@ def perf_push(test_repository):
|
|||
repository=test_repository,
|
||||
revision='1234abcd',
|
||||
author='foo@bar.com',
|
||||
time=datetime.datetime.now())
|
||||
time=datetime.datetime.now(),
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def perf_job(perf_push, failure_classifications, generic_reference_data):
|
||||
return create_generic_job('myfunguid', perf_push.repository,
|
||||
perf_push.id, generic_reference_data)
|
||||
return create_generic_job(
|
||||
'myfunguid', perf_push.repository, perf_push.id, generic_reference_data
|
||||
)
|
||||
|
|
|
@ -4,16 +4,14 @@ import pytest
|
|||
|
||||
from tests import test_utils
|
||||
from tests.sample_data_generator import job_data
|
||||
from treeherder.etl.jobs import (_remove_existing_jobs,
|
||||
store_job_data)
|
||||
from treeherder.etl.jobs import _remove_existing_jobs, store_job_data
|
||||
from treeherder.etl.push import store_push_data
|
||||
from treeherder.model.models import (Job,
|
||||
JobLog)
|
||||
from treeherder.model.models import Job, JobLog
|
||||
|
||||
|
||||
def test_ingest_single_sample_job(test_repository, failure_classifications,
|
||||
sample_data, sample_push,
|
||||
mock_log_parser):
|
||||
def test_ingest_single_sample_job(
|
||||
test_repository, failure_classifications, sample_data, sample_push, mock_log_parser
|
||||
):
|
||||
"""Process a single job structure in the job_data.txt file"""
|
||||
job_data = sample_data.job_data[:1]
|
||||
test_utils.do_job_ingestion(test_repository, job_data, sample_push)
|
||||
|
@ -24,8 +22,9 @@ def test_ingest_single_sample_job(test_repository, failure_classifications,
|
|||
assert job.signature.signature == '4dabe44cc898e585228c43ea21337a9b00f5ddf7'
|
||||
|
||||
|
||||
def test_ingest_all_sample_jobs(test_repository, failure_classifications,
|
||||
sample_data, sample_push, mock_log_parser):
|
||||
def test_ingest_all_sample_jobs(
|
||||
test_repository, failure_classifications, sample_data, sample_push, mock_log_parser
|
||||
):
|
||||
"""
|
||||
Process each job structure in the job_data.txt file and verify.
|
||||
"""
|
||||
|
@ -33,11 +32,9 @@ def test_ingest_all_sample_jobs(test_repository, failure_classifications,
|
|||
test_utils.do_job_ingestion(test_repository, job_data, sample_push)
|
||||
|
||||
|
||||
def test_ingest_twice_log_parsing_status_changed(test_repository,
|
||||
failure_classifications,
|
||||
sample_data,
|
||||
sample_push,
|
||||
mock_log_parser):
|
||||
def test_ingest_twice_log_parsing_status_changed(
|
||||
test_repository, failure_classifications, sample_data, sample_push, mock_log_parser
|
||||
):
|
||||
"""Process a single job twice, but change the log parsing status between,
|
||||
verify that nothing changes"""
|
||||
job_data = sample_data.job_data[:1]
|
||||
|
@ -56,12 +53,14 @@ def test_ingest_twice_log_parsing_status_changed(test_repository,
|
|||
|
||||
|
||||
@pytest.mark.parametrize("same_ingestion_cycle", [False, True])
|
||||
def test_ingest_running_to_retry_sample_job(test_repository,
|
||||
failure_classifications,
|
||||
sample_data,
|
||||
sample_push,
|
||||
mock_log_parser,
|
||||
same_ingestion_cycle):
|
||||
def test_ingest_running_to_retry_sample_job(
|
||||
test_repository,
|
||||
failure_classifications,
|
||||
sample_data,
|
||||
sample_push,
|
||||
mock_log_parser,
|
||||
same_ingestion_cycle,
|
||||
):
|
||||
"""Process a single job structure in the job_data.txt file"""
|
||||
store_push_data(test_repository, sample_push)
|
||||
|
||||
|
@ -101,15 +100,17 @@ def test_ingest_running_to_retry_sample_job(test_repository,
|
|||
assert job.guid == job_data[-1]['job']['job_guid']
|
||||
|
||||
|
||||
@pytest.mark.parametrize("ingestion_cycles", [[(0, 1), (1, 2), (2, 3)],
|
||||
[(0, 2), (2, 3)],
|
||||
[(0, 3)], [(0, 1), (1, 3)]])
|
||||
def test_ingest_running_to_retry_to_success_sample_job(test_repository,
|
||||
failure_classifications,
|
||||
sample_data,
|
||||
sample_push,
|
||||
mock_log_parser,
|
||||
ingestion_cycles):
|
||||
@pytest.mark.parametrize(
|
||||
"ingestion_cycles", [[(0, 1), (1, 2), (2, 3)], [(0, 2), (2, 3)], [(0, 3)], [(0, 1), (1, 3)]]
|
||||
)
|
||||
def test_ingest_running_to_retry_to_success_sample_job(
|
||||
test_repository,
|
||||
failure_classifications,
|
||||
sample_data,
|
||||
sample_push,
|
||||
mock_log_parser,
|
||||
ingestion_cycles,
|
||||
):
|
||||
# verifies that retries to success work, no matter how jobs are batched
|
||||
store_push_data(test_repository, sample_push)
|
||||
|
||||
|
@ -121,10 +122,10 @@ def test_ingest_running_to_retry_to_success_sample_job(test_repository,
|
|||
|
||||
job_data = []
|
||||
for (state, result, job_guid) in [
|
||||
('running', 'unknown', job_guid_root),
|
||||
('completed', 'retry',
|
||||
job_guid_root + "_" + str(job['end_timestamp'])[-5:]),
|
||||
('completed', 'success', job_guid_root)]:
|
||||
('running', 'unknown', job_guid_root),
|
||||
('completed', 'retry', job_guid_root + "_" + str(job['end_timestamp'])[-5:]),
|
||||
('completed', 'success', job_guid_root),
|
||||
]:
|
||||
new_job_datum = copy.deepcopy(job_datum)
|
||||
new_job_datum['job']['state'] = state
|
||||
new_job_datum['job']['result'] = result
|
||||
|
@ -140,12 +141,17 @@ def test_ingest_running_to_retry_to_success_sample_job(test_repository,
|
|||
assert JobLog.objects.count() == 2
|
||||
|
||||
|
||||
@pytest.mark.parametrize("ingestion_cycles", [[(0, 1), (1, 3), (3, 4)],
|
||||
[(0, 3), (3, 4)],
|
||||
[(0, 2), (2, 4)]])
|
||||
@pytest.mark.parametrize(
|
||||
"ingestion_cycles", [[(0, 1), (1, 3), (3, 4)], [(0, 3), (3, 4)], [(0, 2), (2, 4)]]
|
||||
)
|
||||
def test_ingest_running_to_retry_to_success_sample_job_multiple_retries(
|
||||
test_repository, failure_classifications, sample_data, sample_push,
|
||||
mock_log_parser, ingestion_cycles):
|
||||
test_repository,
|
||||
failure_classifications,
|
||||
sample_data,
|
||||
sample_push,
|
||||
mock_log_parser,
|
||||
ingestion_cycles,
|
||||
):
|
||||
# this verifies that if we ingest multiple retries:
|
||||
# (1) nothing errors out
|
||||
# (2) we end up with three jobs (the original + 2 retry jobs)
|
||||
|
@ -160,12 +166,11 @@ def test_ingest_running_to_retry_to_success_sample_job_multiple_retries(
|
|||
|
||||
job_data = []
|
||||
for (state, result, job_guid) in [
|
||||
('running', 'unknown', job_guid_root),
|
||||
('completed', 'retry',
|
||||
job_guid_root + "_" + str(job['end_timestamp'])[-5:]),
|
||||
('completed', 'retry',
|
||||
job_guid_root + "_12345"),
|
||||
('completed', 'success', job_guid_root)]:
|
||||
('running', 'unknown', job_guid_root),
|
||||
('completed', 'retry', job_guid_root + "_" + str(job['end_timestamp'])[-5:]),
|
||||
('completed', 'retry', job_guid_root + "_12345"),
|
||||
('completed', 'success', job_guid_root),
|
||||
]:
|
||||
new_job_datum = copy.deepcopy(job_datum)
|
||||
new_job_datum['job']['state'] = state
|
||||
new_job_datum['job']['result'] = result
|
||||
|
@ -183,10 +188,9 @@ def test_ingest_running_to_retry_to_success_sample_job_multiple_retries(
|
|||
assert JobLog.objects.count() == 3
|
||||
|
||||
|
||||
def test_ingest_retry_sample_job_no_running(test_repository,
|
||||
failure_classifications,
|
||||
sample_data, sample_push,
|
||||
mock_log_parser):
|
||||
def test_ingest_retry_sample_job_no_running(
|
||||
test_repository, failure_classifications, sample_data, sample_push, mock_log_parser
|
||||
):
|
||||
"""Process a single job structure in the job_data.txt file"""
|
||||
job_data = copy.deepcopy(sample_data.job_data[:1])
|
||||
job = job_data[0]['job']
|
||||
|
@ -209,23 +213,23 @@ def test_ingest_retry_sample_job_no_running(test_repository,
|
|||
assert job.guid == retry_guid
|
||||
|
||||
|
||||
def test_bad_date_value_ingestion(test_repository, failure_classifications,
|
||||
sample_push, mock_log_parser):
|
||||
def test_bad_date_value_ingestion(
|
||||
test_repository, failure_classifications, sample_push, mock_log_parser
|
||||
):
|
||||
"""
|
||||
Test ingesting a job blob with bad date value
|
||||
|
||||
"""
|
||||
blob = job_data(start_timestamp="foo",
|
||||
revision=sample_push[0]['revision'])
|
||||
blob = job_data(start_timestamp="foo", revision=sample_push[0]['revision'])
|
||||
|
||||
store_push_data(test_repository, sample_push[:1])
|
||||
store_job_data(test_repository, [blob])
|
||||
# if no exception, we are good.
|
||||
|
||||
|
||||
def test_remove_existing_jobs_single_existing(test_repository, failure_classifications,
|
||||
sample_data, sample_push,
|
||||
mock_log_parser):
|
||||
def test_remove_existing_jobs_single_existing(
|
||||
test_repository, failure_classifications, sample_data, sample_push, mock_log_parser
|
||||
):
|
||||
"""Remove single existing job prior to loading"""
|
||||
|
||||
job_data = sample_data.job_data[:1]
|
||||
|
@ -236,10 +240,9 @@ def test_remove_existing_jobs_single_existing(test_repository, failure_classific
|
|||
assert data == []
|
||||
|
||||
|
||||
def test_remove_existing_jobs_one_existing_one_new(test_repository, failure_classifications,
|
||||
sample_data,
|
||||
sample_push,
|
||||
mock_log_parser):
|
||||
def test_remove_existing_jobs_one_existing_one_new(
|
||||
test_repository, failure_classifications, sample_data, sample_push, mock_log_parser
|
||||
):
|
||||
"""Remove single existing job prior to loading"""
|
||||
|
||||
job_data = sample_data.job_data[:1]
|
||||
|
@ -251,8 +254,9 @@ def test_remove_existing_jobs_one_existing_one_new(test_repository, failure_clas
|
|||
assert Job.objects.count() == 1
|
||||
|
||||
|
||||
def test_ingest_job_default_tier(test_repository, sample_data, sample_push,
|
||||
failure_classifications, mock_log_parser):
|
||||
def test_ingest_job_default_tier(
|
||||
test_repository, sample_data, sample_push, failure_classifications, mock_log_parser
|
||||
):
|
||||
"""Tier is set to 1 by default"""
|
||||
job_data = sample_data.job_data[:1]
|
||||
store_push_data(test_repository, sample_push)
|
||||
|
@ -261,8 +265,9 @@ def test_ingest_job_default_tier(test_repository, sample_data, sample_push,
|
|||
assert job.tier == 1
|
||||
|
||||
|
||||
def test_ingesting_skip_existing(test_repository, failure_classifications, sample_data,
|
||||
sample_push, mock_log_parser):
|
||||
def test_ingesting_skip_existing(
|
||||
test_repository, failure_classifications, sample_data, sample_push, mock_log_parser
|
||||
):
|
||||
"""Remove single existing job prior to loading"""
|
||||
job_data = sample_data.job_data[:1]
|
||||
test_utils.do_job_ingestion(test_repository, job_data, sample_push)
|
||||
|
@ -272,9 +277,9 @@ def test_ingesting_skip_existing(test_repository, failure_classifications, sampl
|
|||
assert Job.objects.count() == 2
|
||||
|
||||
|
||||
def test_ingest_job_with_updated_job_group(test_repository, failure_classifications,
|
||||
sample_data, mock_log_parser,
|
||||
push_stored):
|
||||
def test_ingest_job_with_updated_job_group(
|
||||
test_repository, failure_classifications, sample_data, mock_log_parser, push_stored
|
||||
):
|
||||
"""
|
||||
The job_type and job_group for a job is independent of any other job_type
|
||||
and job_group combination.
|
||||
|
|
|
@ -8,10 +8,7 @@ import slugid
|
|||
from treeherder.etl.exceptions import MissingPushException
|
||||
from treeherder.etl.job_loader import JobLoader
|
||||
from treeherder.etl.taskcluster_pulse.handler import handleMessage
|
||||
from treeherder.model.models import (Job,
|
||||
JobDetail,
|
||||
JobLog,
|
||||
TaskclusterMetadata)
|
||||
from treeherder.model.models import Job, JobDetail, JobLog, TaskclusterMetadata
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -47,7 +44,8 @@ def mock_artifact(taskId, runId, artifactName):
|
|||
baseUrl.format(taskId=taskId, runId=runId, artifactName=artifactName),
|
||||
body="",
|
||||
content_type='text/plain',
|
||||
status=200)
|
||||
status=200,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -84,6 +82,7 @@ def new_transformed_jobs(sample_data, test_repository, push_stored):
|
|||
|
||||
def test_job_transformation(pulse_jobs, transformed_pulse_jobs):
|
||||
import json
|
||||
|
||||
jl = JobLoader()
|
||||
for idx, pulse_job in enumerate(pulse_jobs):
|
||||
assert jl._is_valid_job(pulse_job)
|
||||
|
@ -105,8 +104,9 @@ def test_new_job_transformation(new_pulse_jobs, new_transformed_jobs, failure_cl
|
|||
assert new_transformed_jobs[taskId] == transformed_job
|
||||
|
||||
|
||||
def test_ingest_pulse_jobs(pulse_jobs, test_repository, push_stored,
|
||||
failure_classifications, mock_log_parser):
|
||||
def test_ingest_pulse_jobs(
|
||||
pulse_jobs, test_repository, push_stored, failure_classifications, mock_log_parser
|
||||
):
|
||||
"""
|
||||
Ingest a job through the JSON Schema validated JobLoader used by Pulse
|
||||
"""
|
||||
|
@ -121,28 +121,39 @@ def test_ingest_pulse_jobs(pulse_jobs, test_repository, push_stored,
|
|||
assert len(jobs) == 5
|
||||
|
||||
assert [job.taskcluster_metadata for job in jobs]
|
||||
assert set(TaskclusterMetadata.objects.values_list(
|
||||
'task_id', flat=True)) == set(['IYyscnNMTLuxzna7PNqUJQ',
|
||||
'XJCbbRQ6Sp-UL1lL-tw5ng',
|
||||
'ZsSzJQu3Q7q2MfehIBAzKQ',
|
||||
'bIzVZt9jQQKgvQYD3a2HQw'])
|
||||
assert set(TaskclusterMetadata.objects.values_list('task_id', flat=True)) == set(
|
||||
[
|
||||
'IYyscnNMTLuxzna7PNqUJQ',
|
||||
'XJCbbRQ6Sp-UL1lL-tw5ng',
|
||||
'ZsSzJQu3Q7q2MfehIBAzKQ',
|
||||
'bIzVZt9jQQKgvQYD3a2HQw',
|
||||
]
|
||||
)
|
||||
|
||||
job_logs = JobLog.objects.filter(job_id=1)
|
||||
assert job_logs.count() == 2
|
||||
logs_expected = [{"name": "builds-4h",
|
||||
"url": "http://ftp.mozilla.org/pub/mozilla.org/spidermonkey/tinderbox-builds/mozilla-inbound-linux64/mozilla-inbound_linux64_spidermonkey-warnaserr-bm57-build1-build352.txt.gz",
|
||||
"parse_status": 0},
|
||||
{"name": "errorsummary_json",
|
||||
"url": "http://mozilla-releng-blobs.s3.amazonaws.com/blobs/Mozilla-Inbound-Non-PGO/sha512/05c7f57df6583c6351c6b49e439e2678e0f43c2e5b66695ea7d096a7519e1805f441448b5ffd4cc3b80b8b2c74b244288fda644f55ed0e226ef4e25ba02ca466",
|
||||
"parse_status": 0}]
|
||||
assert [{"name": item.name, "url": item.url, "parse_status": item.status}
|
||||
for item in job_logs.all()] == logs_expected
|
||||
logs_expected = [
|
||||
{
|
||||
"name": "builds-4h",
|
||||
"url": "http://ftp.mozilla.org/pub/mozilla.org/spidermonkey/tinderbox-builds/mozilla-inbound-linux64/mozilla-inbound_linux64_spidermonkey-warnaserr-bm57-build1-build352.txt.gz",
|
||||
"parse_status": 0,
|
||||
},
|
||||
{
|
||||
"name": "errorsummary_json",
|
||||
"url": "http://mozilla-releng-blobs.s3.amazonaws.com/blobs/Mozilla-Inbound-Non-PGO/sha512/05c7f57df6583c6351c6b49e439e2678e0f43c2e5b66695ea7d096a7519e1805f441448b5ffd4cc3b80b8b2c74b244288fda644f55ed0e226ef4e25ba02ca466",
|
||||
"parse_status": 0,
|
||||
},
|
||||
]
|
||||
assert [
|
||||
{"name": item.name, "url": item.url, "parse_status": item.status} for item in job_logs.all()
|
||||
] == logs_expected
|
||||
|
||||
assert JobDetail.objects.count() == 2
|
||||
|
||||
|
||||
def test_ingest_pending_pulse_job(pulse_jobs, push_stored,
|
||||
failure_classifications, mock_log_parser):
|
||||
def test_ingest_pending_pulse_job(
|
||||
pulse_jobs, push_stored, failure_classifications, mock_log_parser
|
||||
):
|
||||
"""
|
||||
Test that ingesting a pending job (1) works and (2) ingests the
|
||||
taskcluster metadata
|
||||
|
@ -167,8 +178,9 @@ def test_ingest_pending_pulse_job(pulse_jobs, push_stored,
|
|||
assert JobDetail.objects.count() == 2
|
||||
|
||||
|
||||
def test_ingest_pulse_jobs_bad_project(pulse_jobs, test_repository, push_stored,
|
||||
failure_classifications, mock_log_parser):
|
||||
def test_ingest_pulse_jobs_bad_project(
|
||||
pulse_jobs, test_repository, push_stored, failure_classifications, mock_log_parser
|
||||
):
|
||||
"""
|
||||
Test ingesting a pulse job with bad repo will skip, ingest others
|
||||
"""
|
||||
|
@ -204,9 +216,7 @@ def test_ingest_pulse_jobs_with_missing_push(pulse_jobs):
|
|||
assert Job.objects.count() == 0
|
||||
|
||||
|
||||
def test_transition_pending_running_complete(first_job,
|
||||
failure_classifications,
|
||||
mock_log_parser):
|
||||
def test_transition_pending_running_complete(first_job, failure_classifications, mock_log_parser):
|
||||
jl = JobLoader()
|
||||
|
||||
change_state_result(first_job, jl, "pending", "unknown", "pending", "unknown")
|
||||
|
@ -214,45 +224,43 @@ def test_transition_pending_running_complete(first_job,
|
|||
change_state_result(first_job, jl, "completed", "fail", "completed", "testfailed")
|
||||
|
||||
|
||||
def test_transition_complete_pending_stays_complete(first_job,
|
||||
failure_classifications,
|
||||
mock_log_parser):
|
||||
def test_transition_complete_pending_stays_complete(
|
||||
first_job, failure_classifications, mock_log_parser
|
||||
):
|
||||
jl = JobLoader()
|
||||
|
||||
change_state_result(first_job, jl, "completed", "fail", "completed", "testfailed")
|
||||
change_state_result(first_job, jl, "pending", "unknown", "completed", "testfailed")
|
||||
|
||||
|
||||
def test_transition_complete_running_stays_complete(first_job,
|
||||
failure_classifications,
|
||||
mock_log_parser):
|
||||
def test_transition_complete_running_stays_complete(
|
||||
first_job, failure_classifications, mock_log_parser
|
||||
):
|
||||
jl = JobLoader()
|
||||
|
||||
change_state_result(first_job, jl, "completed", "fail", "completed", "testfailed")
|
||||
change_state_result(first_job, jl, "running", "unknown", "completed", "testfailed")
|
||||
|
||||
|
||||
def test_transition_running_pending_stays_running(first_job,
|
||||
failure_classifications,
|
||||
mock_log_parser):
|
||||
def test_transition_running_pending_stays_running(
|
||||
first_job, failure_classifications, mock_log_parser
|
||||
):
|
||||
jl = JobLoader()
|
||||
|
||||
change_state_result(first_job, jl, "running", "unknown", "running", "unknown")
|
||||
change_state_result(first_job, jl, "pending", "unknown", "running", "unknown")
|
||||
|
||||
|
||||
def test_transition_running_superseded(first_job,
|
||||
failure_classifications,
|
||||
mock_log_parser):
|
||||
def test_transition_running_superseded(first_job, failure_classifications, mock_log_parser):
|
||||
jl = JobLoader()
|
||||
|
||||
change_state_result(first_job, jl, "running", "unknown", "running", "unknown")
|
||||
change_state_result(first_job, jl, "completed", "superseded", "completed", "superseded")
|
||||
|
||||
|
||||
def test_transition_pending_retry_fail_stays_retry(first_job,
|
||||
failure_classifications,
|
||||
mock_log_parser):
|
||||
def test_transition_pending_retry_fail_stays_retry(
|
||||
first_job, failure_classifications, mock_log_parser
|
||||
):
|
||||
jl = JobLoader()
|
||||
|
||||
change_state_result(first_job, jl, "pending", "unknown", "pending", "unknown")
|
||||
|
@ -262,8 +270,7 @@ def test_transition_pending_retry_fail_stays_retry(first_job,
|
|||
change_state_result(first_job, jl, "completed", "fail", "completed", "retry")
|
||||
|
||||
|
||||
def test_skip_unscheduled(first_job, failure_classifications,
|
||||
mock_log_parser):
|
||||
def test_skip_unscheduled(first_job, failure_classifications, mock_log_parser):
|
||||
jl = JobLoader()
|
||||
first_job["state"] = "unscheduled"
|
||||
jl.process_job(first_job, 'https://firefox-ci-tc.services.mozilla.com')
|
||||
|
@ -280,8 +287,11 @@ def change_state_result(test_job, job_loader, new_state, new_result, exp_state,
|
|||
# pending jobs wouldn't have logs and our store_job_data doesn't
|
||||
# support it.
|
||||
del job['logs']
|
||||
errorsummary_indices = [i for i, item in enumerate(job["jobInfo"].get("links", []))
|
||||
if item.get("linkText", "").endswith("_errorsummary.log")]
|
||||
errorsummary_indices = [
|
||||
i
|
||||
for i, item in enumerate(job["jobInfo"].get("links", []))
|
||||
if item.get("linkText", "").endswith("_errorsummary.log")
|
||||
]
|
||||
for index in errorsummary_indices:
|
||||
del job["jobInfo"]["links"][index]
|
||||
|
||||
|
|
|
@ -1,9 +1,7 @@
|
|||
import json
|
||||
|
||||
from treeherder.etl.artifact import store_job_artifacts
|
||||
from treeherder.model.models import (JobDetail,
|
||||
TextLogError,
|
||||
TextLogStep)
|
||||
from treeherder.model.models import JobDetail, TextLogError, TextLogStep
|
||||
|
||||
|
||||
def test_load_long_job_details(test_job):
|
||||
|
@ -11,56 +9,51 @@ def test_load_long_job_details(test_job):
|
|||
"""Get the field's max_length for the JobDetail model"""
|
||||
return JobDetail._meta.get_field(field).max_length
|
||||
|
||||
(long_title, long_value, long_url) = ('t' * (2 * max_length("title")),
|
||||
'v' * (2 * max_length("value")),
|
||||
'https://' + ('u' * (2 * max_length("url"))))
|
||||
(long_title, long_value, long_url) = (
|
||||
't' * (2 * max_length("title")),
|
||||
'v' * (2 * max_length("value")),
|
||||
'https://' + ('u' * (2 * max_length("url"))),
|
||||
)
|
||||
ji_artifact = {
|
||||
'type': 'json',
|
||||
'name': 'Job Info',
|
||||
'blob': json.dumps({
|
||||
'job_details': [{
|
||||
'title': long_title,
|
||||
'value': long_value,
|
||||
'url': long_url
|
||||
}]
|
||||
}),
|
||||
'job_guid': test_job.guid
|
||||
'blob': json.dumps(
|
||||
{'job_details': [{'title': long_title, 'value': long_value, 'url': long_url}]}
|
||||
),
|
||||
'job_guid': test_job.guid,
|
||||
}
|
||||
store_job_artifacts([ji_artifact])
|
||||
|
||||
assert JobDetail.objects.count() == 1
|
||||
|
||||
jd = JobDetail.objects.first()
|
||||
assert jd.title == long_title[:max_length("title")]
|
||||
assert jd.value == long_value[:max_length("value")]
|
||||
assert jd.url == long_url[:max_length("url")]
|
||||
assert jd.title == long_title[: max_length("title")]
|
||||
assert jd.value == long_value[: max_length("value")]
|
||||
assert jd.url == long_url[: max_length("url")]
|
||||
|
||||
|
||||
def test_load_textlog_summary_twice(test_repository, test_job):
|
||||
text_log_summary_artifact = {
|
||||
'type': 'json',
|
||||
'name': 'text_log_summary',
|
||||
'blob': json.dumps({
|
||||
'step_data': {
|
||||
"steps": [
|
||||
{
|
||||
'name': 'foo',
|
||||
'started': '2016-05-10 12:44:23.103904',
|
||||
'started_linenumber': 8,
|
||||
'finished_linenumber': 10,
|
||||
'finished': '2016-05-10 12:44:23.104394',
|
||||
'result': 'success',
|
||||
'errors': [
|
||||
{
|
||||
"line": '07:51:28 WARNING - foobar',
|
||||
"linenumber": 1587
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
'blob': json.dumps(
|
||||
{
|
||||
'step_data': {
|
||||
"steps": [
|
||||
{
|
||||
'name': 'foo',
|
||||
'started': '2016-05-10 12:44:23.103904',
|
||||
'started_linenumber': 8,
|
||||
'finished_linenumber': 10,
|
||||
'finished': '2016-05-10 12:44:23.104394',
|
||||
'result': 'success',
|
||||
'errors': [{"line": '07:51:28 WARNING - foobar', "linenumber": 1587}],
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}),
|
||||
'job_guid': test_job.guid
|
||||
),
|
||||
'job_guid': test_job.guid,
|
||||
}
|
||||
|
||||
store_job_artifacts([text_log_summary_artifact])
|
||||
|
@ -77,33 +70,35 @@ def test_load_non_ascii_textlog_errors(test_job):
|
|||
text_log_summary_artifact = {
|
||||
'type': 'json',
|
||||
'name': 'text_log_summary',
|
||||
'blob': json.dumps({
|
||||
'step_data': {
|
||||
"steps": [
|
||||
{
|
||||
'name': 'foo',
|
||||
'started': '2016-05-10 12:44:23.103904',
|
||||
'started_linenumber': 8,
|
||||
'finished_linenumber': 10,
|
||||
'finished': '2016-05-10 12:44:23.104394',
|
||||
'result': 'success',
|
||||
'errors': [
|
||||
{
|
||||
# non-ascii character
|
||||
"line": '07:51:28 WARNING - \U000000c3',
|
||||
"linenumber": 1587
|
||||
},
|
||||
{
|
||||
# astral character (i.e. higher than ucs2)
|
||||
"line": '07:51:29 WARNING - \U0001d400',
|
||||
"linenumber": 1588
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
'blob': json.dumps(
|
||||
{
|
||||
'step_data': {
|
||||
"steps": [
|
||||
{
|
||||
'name': 'foo',
|
||||
'started': '2016-05-10 12:44:23.103904',
|
||||
'started_linenumber': 8,
|
||||
'finished_linenumber': 10,
|
||||
'finished': '2016-05-10 12:44:23.104394',
|
||||
'result': 'success',
|
||||
'errors': [
|
||||
{
|
||||
# non-ascii character
|
||||
"line": '07:51:28 WARNING - \U000000c3',
|
||||
"linenumber": 1587,
|
||||
},
|
||||
{
|
||||
# astral character (i.e. higher than ucs2)
|
||||
"line": '07:51:29 WARNING - \U0001d400',
|
||||
"linenumber": 1588,
|
||||
},
|
||||
],
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}),
|
||||
'job_guid': test_job.guid
|
||||
),
|
||||
'job_guid': test_job.guid,
|
||||
}
|
||||
store_job_artifacts([text_log_summary_artifact])
|
||||
|
||||
|
|
|
@ -7,23 +7,26 @@ import pytest
|
|||
|
||||
from tests.test_utils import create_generic_job
|
||||
from treeherder.etl.perf import store_performance_artifact
|
||||
from treeherder.model.models import (Push,
|
||||
Repository)
|
||||
from treeherder.perf.models import (PerformanceAlert,
|
||||
PerformanceAlertSummary,
|
||||
PerformanceDatum,
|
||||
PerformanceFramework,
|
||||
PerformanceSignature)
|
||||
from treeherder.model.models import Push, Repository
|
||||
from treeherder.perf.models import (
|
||||
PerformanceAlert,
|
||||
PerformanceAlertSummary,
|
||||
PerformanceDatum,
|
||||
PerformanceFramework,
|
||||
PerformanceSignature,
|
||||
)
|
||||
|
||||
|
||||
def _generate_perf_data_range(test_repository,
|
||||
generic_reference_data,
|
||||
create_perf_framework=True,
|
||||
enable_framework=True,
|
||||
add_suite_value=False,
|
||||
extra_suite_metadata=None,
|
||||
extra_subtest_metadata=None,
|
||||
reverse_push_range=False):
|
||||
def _generate_perf_data_range(
|
||||
test_repository,
|
||||
generic_reference_data,
|
||||
create_perf_framework=True,
|
||||
enable_framework=True,
|
||||
add_suite_value=False,
|
||||
extra_suite_metadata=None,
|
||||
extra_subtest_metadata=None,
|
||||
reverse_push_range=False,
|
||||
):
|
||||
framework_name = "cheezburger"
|
||||
if create_perf_framework:
|
||||
PerformanceFramework.objects.create(name=framework_name, enabled=enable_framework)
|
||||
|
@ -34,15 +37,15 @@ def _generate_perf_data_range(test_repository,
|
|||
if reverse_push_range:
|
||||
push_range = reversed(push_range)
|
||||
|
||||
for (i, value) in zip(push_range, [1]*15 + [2]*15):
|
||||
push_time = datetime.datetime.fromtimestamp(now+i)
|
||||
for (i, value) in zip(push_range, [1] * 15 + [2] * 15):
|
||||
push_time = datetime.datetime.fromtimestamp(now + i)
|
||||
push = Push.objects.create(
|
||||
repository=test_repository,
|
||||
revision='abcdefgh%s' % i,
|
||||
author='foo@bar.com',
|
||||
time=push_time)
|
||||
job = create_generic_job('myguid%s' % i, test_repository,
|
||||
push.id, generic_reference_data)
|
||||
time=push_time,
|
||||
)
|
||||
job = create_generic_job('myguid%s' % i, test_repository, push.id, generic_reference_data)
|
||||
datum = {
|
||||
'job_guid': 'fake_job_guid',
|
||||
'name': 'test',
|
||||
|
@ -53,48 +56,48 @@ def _generate_perf_data_range(test_repository,
|
|||
{
|
||||
'name': 'cheezburger metrics',
|
||||
'unit': 'ms',
|
||||
'subtests': [
|
||||
{
|
||||
'name': 'test1',
|
||||
'value': value,
|
||||
'unit': 'ms'
|
||||
}
|
||||
]
|
||||
'subtests': [{'name': 'test1', 'value': value, 'unit': 'ms'}],
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
},
|
||||
}
|
||||
if add_suite_value:
|
||||
datum['blob']['suites'][0]['value'] = value
|
||||
if extra_suite_metadata:
|
||||
datum['blob']['suites'][0].update(extra_suite_metadata)
|
||||
if extra_subtest_metadata:
|
||||
datum['blob']['suites'][0]['subtests'][0].update(
|
||||
extra_subtest_metadata)
|
||||
datum['blob']['suites'][0]['subtests'][0].update(extra_subtest_metadata)
|
||||
|
||||
# the perf data adapter expects unserialized performance data
|
||||
submit_datum = copy.copy(datum)
|
||||
submit_datum['blob'] = json.dumps({
|
||||
'performance_data': submit_datum['blob']
|
||||
})
|
||||
submit_datum['blob'] = json.dumps({'performance_data': submit_datum['blob']})
|
||||
store_performance_artifact(job, submit_datum)
|
||||
|
||||
|
||||
def _verify_signature(repo_name, framework_name, suitename,
|
||||
testname, option_collection_hash, platform,
|
||||
lower_is_better, extra_opts, measurement_unit,
|
||||
last_updated=None, alert_threshold=None,
|
||||
alert_change_type=None,
|
||||
min_back_window=None, max_back_window=None,
|
||||
fore_window=None):
|
||||
def _verify_signature(
|
||||
repo_name,
|
||||
framework_name,
|
||||
suitename,
|
||||
testname,
|
||||
option_collection_hash,
|
||||
platform,
|
||||
lower_is_better,
|
||||
extra_opts,
|
||||
measurement_unit,
|
||||
last_updated=None,
|
||||
alert_threshold=None,
|
||||
alert_change_type=None,
|
||||
min_back_window=None,
|
||||
max_back_window=None,
|
||||
fore_window=None,
|
||||
):
|
||||
if not extra_opts:
|
||||
extra_options = ''
|
||||
else:
|
||||
extra_options = ' '.join(sorted(extra_opts))
|
||||
|
||||
repository = Repository.objects.get(name=repo_name)
|
||||
signature = PerformanceSignature.objects.get(suite=suitename,
|
||||
test=testname)
|
||||
signature = PerformanceSignature.objects.get(suite=suitename, test=testname)
|
||||
assert str(signature.framework) == framework_name
|
||||
assert signature.option_collection.option_collection_hash == option_collection_hash
|
||||
assert signature.platform.platform == platform
|
||||
|
@ -116,20 +119,14 @@ def _verify_signature(repo_name, framework_name, suitename,
|
|||
assert signature.last_updated == last_updated
|
||||
|
||||
|
||||
def test_no_performance_framework(test_repository,
|
||||
failure_classifications,
|
||||
generic_reference_data):
|
||||
_generate_perf_data_range(test_repository,
|
||||
generic_reference_data,
|
||||
create_perf_framework=False
|
||||
)
|
||||
def test_no_performance_framework(test_repository, failure_classifications, generic_reference_data):
|
||||
_generate_perf_data_range(test_repository, generic_reference_data, create_perf_framework=False)
|
||||
# no errors, but no data either
|
||||
assert 0 == PerformanceSignature.objects.all().count()
|
||||
assert 0 == PerformanceDatum.objects.all().count()
|
||||
|
||||
|
||||
def test_same_signature_multiple_performance_frameworks(test_repository,
|
||||
perf_job):
|
||||
def test_same_signature_multiple_performance_frameworks(test_repository, perf_job):
|
||||
framework_names = ['cheezburger1', 'cheezburger2']
|
||||
for framework_name in framework_names:
|
||||
PerformanceFramework.objects.create(name=framework_name, enabled=True)
|
||||
|
@ -142,140 +139,149 @@ def test_same_signature_multiple_performance_frameworks(test_repository,
|
|||
'suites': [
|
||||
{
|
||||
'name': 'cheezburger metrics',
|
||||
'subtests': [
|
||||
{
|
||||
'name': 'test1',
|
||||
'value': 20.0,
|
||||
'unit': 'ms'
|
||||
}
|
||||
]
|
||||
'subtests': [{'name': 'test1', 'value': 20.0, 'unit': 'ms'}],
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
},
|
||||
}
|
||||
# the perf data adapter expects unserialized performance data
|
||||
submit_datum = copy.copy(datum)
|
||||
submit_datum['blob'] = json.dumps({
|
||||
'performance_data': submit_datum['blob']
|
||||
})
|
||||
submit_datum['blob'] = json.dumps({'performance_data': submit_datum['blob']})
|
||||
|
||||
store_performance_artifact(perf_job, submit_datum)
|
||||
|
||||
# we should have 2 performance signature objects, one for each framework
|
||||
# and one datum for each signature
|
||||
for framework_name in framework_names:
|
||||
s = PerformanceSignature.objects.get(framework__name=framework_name,
|
||||
repository=test_repository,
|
||||
suite='cheezburger metrics',
|
||||
test='test1')
|
||||
s = PerformanceSignature.objects.get(
|
||||
framework__name=framework_name,
|
||||
repository=test_repository,
|
||||
suite='cheezburger metrics',
|
||||
test='test1',
|
||||
)
|
||||
d = PerformanceDatum.objects.get(signature=s)
|
||||
assert d.value == 20.0
|
||||
|
||||
|
||||
@pytest.mark.parametrize(('alerts_enabled_repository',
|
||||
'add_suite_value',
|
||||
'extra_suite_metadata',
|
||||
'extra_subtest_metadata',
|
||||
'expected_subtest_alert',
|
||||
'expected_suite_alert'), [
|
||||
# just subtest, no metadata, default settings
|
||||
(True, False, None, {}, True, False),
|
||||
# just subtest, high alert threshold (so no alert)
|
||||
(True, False, None, {'alertThreshold': 500.0}, False,
|
||||
False),
|
||||
# just subtest, but larger min window size
|
||||
# (so no alerting)
|
||||
(True, False, {}, {'minBackWindow': 100,
|
||||
'maxBackWindow': 100}, False,
|
||||
False),
|
||||
# should still alert even if we optionally
|
||||
# use a large maximum back window
|
||||
(True, False, None, {'minBackWindow': 12,
|
||||
'maxBackWindow': 100}, True,
|
||||
False),
|
||||
# summary+subtest, no metadata, default settings
|
||||
(True, True, {}, {}, False, True),
|
||||
# summary+subtest, high alert threshold
|
||||
# (so no alert)
|
||||
(True, True, {'alertThreshold': 500.0}, {}, False,
|
||||
False),
|
||||
# summary+subtest, no metadata, no alerting on summary
|
||||
(True, True, {'shouldAlert': False}, {}, False,
|
||||
False),
|
||||
# summary+subtest, no metadata, no alerting on
|
||||
# summary, alerting on subtest
|
||||
(True, True, {'shouldAlert': False},
|
||||
{'shouldAlert': True}, True, False),
|
||||
# summary+subtest, no metadata on summary, alerting
|
||||
# override on subtest
|
||||
(True, True, {}, {'shouldAlert': True}, True, True),
|
||||
# summary+subtest, alerting override on subtest +
|
||||
# summary
|
||||
(True, True, {'shouldAlert': True},
|
||||
{'shouldAlert': True}, True, True),
|
||||
# summary+subtest, alerting override on subtest +
|
||||
# summary -- but alerts disabled
|
||||
(False, True, {'shouldAlert': True},
|
||||
{'shouldAlert': True}, False, False),
|
||||
# summary+subtest, alerting override on subtest +
|
||||
# summary, but using absolute change so shouldn't
|
||||
# alert
|
||||
(True, True,
|
||||
{'shouldAlert': True, 'alertChangeType': 'absolute'},
|
||||
{'shouldAlert': True, 'alertChangeType': 'absolute'},
|
||||
False, False),
|
||||
# summary + subtest, only subtest is absolute so
|
||||
# summary should alert
|
||||
(True, True,
|
||||
{'shouldAlert': True},
|
||||
{'shouldAlert': True, 'alertChangeType': 'absolute'},
|
||||
False, True),
|
||||
])
|
||||
def test_alert_generation(test_repository, test_issue_tracker,
|
||||
failure_classifications, generic_reference_data,
|
||||
alerts_enabled_repository,
|
||||
add_suite_value, extra_suite_metadata,
|
||||
extra_subtest_metadata, expected_subtest_alert,
|
||||
expected_suite_alert):
|
||||
@pytest.mark.parametrize(
|
||||
(
|
||||
'alerts_enabled_repository',
|
||||
'add_suite_value',
|
||||
'extra_suite_metadata',
|
||||
'extra_subtest_metadata',
|
||||
'expected_subtest_alert',
|
||||
'expected_suite_alert',
|
||||
),
|
||||
[
|
||||
# just subtest, no metadata, default settings
|
||||
(True, False, None, {}, True, False),
|
||||
# just subtest, high alert threshold (so no alert)
|
||||
(True, False, None, {'alertThreshold': 500.0}, False, False),
|
||||
# just subtest, but larger min window size
|
||||
# (so no alerting)
|
||||
(True, False, {}, {'minBackWindow': 100, 'maxBackWindow': 100}, False, False),
|
||||
# should still alert even if we optionally
|
||||
# use a large maximum back window
|
||||
(True, False, None, {'minBackWindow': 12, 'maxBackWindow': 100}, True, False),
|
||||
# summary+subtest, no metadata, default settings
|
||||
(True, True, {}, {}, False, True),
|
||||
# summary+subtest, high alert threshold
|
||||
# (so no alert)
|
||||
(True, True, {'alertThreshold': 500.0}, {}, False, False),
|
||||
# summary+subtest, no metadata, no alerting on summary
|
||||
(True, True, {'shouldAlert': False}, {}, False, False),
|
||||
# summary+subtest, no metadata, no alerting on
|
||||
# summary, alerting on subtest
|
||||
(True, True, {'shouldAlert': False}, {'shouldAlert': True}, True, False),
|
||||
# summary+subtest, no metadata on summary, alerting
|
||||
# override on subtest
|
||||
(True, True, {}, {'shouldAlert': True}, True, True),
|
||||
# summary+subtest, alerting override on subtest +
|
||||
# summary
|
||||
(True, True, {'shouldAlert': True}, {'shouldAlert': True}, True, True),
|
||||
# summary+subtest, alerting override on subtest +
|
||||
# summary -- but alerts disabled
|
||||
(False, True, {'shouldAlert': True}, {'shouldAlert': True}, False, False),
|
||||
# summary+subtest, alerting override on subtest +
|
||||
# summary, but using absolute change so shouldn't
|
||||
# alert
|
||||
(
|
||||
True,
|
||||
True,
|
||||
{'shouldAlert': True, 'alertChangeType': 'absolute'},
|
||||
{'shouldAlert': True, 'alertChangeType': 'absolute'},
|
||||
False,
|
||||
False,
|
||||
),
|
||||
# summary + subtest, only subtest is absolute so
|
||||
# summary should alert
|
||||
(
|
||||
True,
|
||||
True,
|
||||
{'shouldAlert': True},
|
||||
{'shouldAlert': True, 'alertChangeType': 'absolute'},
|
||||
False,
|
||||
True,
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_alert_generation(
|
||||
test_repository,
|
||||
test_issue_tracker,
|
||||
failure_classifications,
|
||||
generic_reference_data,
|
||||
alerts_enabled_repository,
|
||||
add_suite_value,
|
||||
extra_suite_metadata,
|
||||
extra_subtest_metadata,
|
||||
expected_subtest_alert,
|
||||
expected_suite_alert,
|
||||
):
|
||||
test_repository.performance_alerts_enabled = alerts_enabled_repository
|
||||
test_repository.save()
|
||||
|
||||
_generate_perf_data_range(test_repository,
|
||||
generic_reference_data,
|
||||
add_suite_value=add_suite_value,
|
||||
extra_suite_metadata=extra_suite_metadata,
|
||||
extra_subtest_metadata=extra_subtest_metadata)
|
||||
_generate_perf_data_range(
|
||||
test_repository,
|
||||
generic_reference_data,
|
||||
add_suite_value=add_suite_value,
|
||||
extra_suite_metadata=extra_suite_metadata,
|
||||
extra_subtest_metadata=extra_subtest_metadata,
|
||||
)
|
||||
|
||||
# validate that the signatures have the expected properties
|
||||
_verify_signature(test_repository.name,
|
||||
'cheezburger',
|
||||
'cheezburger metrics',
|
||||
'test1',
|
||||
'my_option_hash',
|
||||
'my_platform',
|
||||
True,
|
||||
None,
|
||||
'ms',
|
||||
alert_threshold=extra_subtest_metadata.get('alertThreshold'),
|
||||
alert_change_type=extra_subtest_metadata.get('alertChangeType'),
|
||||
min_back_window=extra_subtest_metadata.get('minBackWindow'),
|
||||
max_back_window=extra_subtest_metadata.get('maxBackWindow'),
|
||||
fore_window=extra_subtest_metadata.get('foreWindow'))
|
||||
_verify_signature(
|
||||
test_repository.name,
|
||||
'cheezburger',
|
||||
'cheezburger metrics',
|
||||
'test1',
|
||||
'my_option_hash',
|
||||
'my_platform',
|
||||
True,
|
||||
None,
|
||||
'ms',
|
||||
alert_threshold=extra_subtest_metadata.get('alertThreshold'),
|
||||
alert_change_type=extra_subtest_metadata.get('alertChangeType'),
|
||||
min_back_window=extra_subtest_metadata.get('minBackWindow'),
|
||||
max_back_window=extra_subtest_metadata.get('maxBackWindow'),
|
||||
fore_window=extra_subtest_metadata.get('foreWindow'),
|
||||
)
|
||||
if add_suite_value:
|
||||
_verify_signature(test_repository.name,
|
||||
'cheezburger',
|
||||
'cheezburger metrics',
|
||||
'',
|
||||
'my_option_hash',
|
||||
'my_platform',
|
||||
True,
|
||||
None,
|
||||
'ms',
|
||||
alert_threshold=extra_suite_metadata.get('alertThreshold'),
|
||||
alert_change_type=extra_suite_metadata.get('alertChangeType'),
|
||||
min_back_window=extra_suite_metadata.get('minBackWindow'),
|
||||
max_back_window=extra_suite_metadata.get('maxBackWindow'),
|
||||
fore_window=extra_suite_metadata.get('foreWindow'))
|
||||
_verify_signature(
|
||||
test_repository.name,
|
||||
'cheezburger',
|
||||
'cheezburger metrics',
|
||||
'',
|
||||
'my_option_hash',
|
||||
'my_platform',
|
||||
True,
|
||||
None,
|
||||
'ms',
|
||||
alert_threshold=extra_suite_metadata.get('alertThreshold'),
|
||||
alert_change_type=extra_suite_metadata.get('alertChangeType'),
|
||||
min_back_window=extra_suite_metadata.get('minBackWindow'),
|
||||
max_back_window=extra_suite_metadata.get('maxBackWindow'),
|
||||
fore_window=extra_suite_metadata.get('foreWindow'),
|
||||
)
|
||||
|
||||
expected_num_alerts = sum([expected_suite_alert, expected_subtest_alert])
|
||||
|
||||
|
@ -310,38 +316,33 @@ def test_alert_generation(test_repository, test_issue_tracker,
|
|||
assert alert.amount_pct == 100
|
||||
|
||||
|
||||
def test_alert_generation_repo_no_alerts(test_repository,
|
||||
failure_classifications,
|
||||
generic_reference_data):
|
||||
def test_alert_generation_repo_no_alerts(
|
||||
test_repository, failure_classifications, generic_reference_data
|
||||
):
|
||||
# validates that no alerts generated on "try" repos
|
||||
test_repository.performance_alerts_enabled = False
|
||||
test_repository.save()
|
||||
|
||||
_generate_perf_data_range(test_repository,
|
||||
generic_reference_data)
|
||||
_generate_perf_data_range(test_repository, generic_reference_data)
|
||||
|
||||
assert 0 == PerformanceAlert.objects.all().count()
|
||||
assert 0 == PerformanceAlertSummary.objects.all().count()
|
||||
|
||||
|
||||
def test_framework_not_enabled(test_repository,
|
||||
failure_classifications,
|
||||
generic_reference_data):
|
||||
def test_framework_not_enabled(test_repository, failure_classifications, generic_reference_data):
|
||||
# The field enabled has been defaulted to 'False'
|
||||
_generate_perf_data_range(test_repository,
|
||||
generic_reference_data,
|
||||
create_perf_framework=True,
|
||||
enable_framework=False)
|
||||
_generate_perf_data_range(
|
||||
test_repository, generic_reference_data, create_perf_framework=True, enable_framework=False
|
||||
)
|
||||
|
||||
assert 0 == PerformanceSignature.objects.all().count()
|
||||
assert 0 == PerformanceDatum.objects.all().count()
|
||||
|
||||
|
||||
def test_last_updated(test_repository, test_issue_tracker,
|
||||
failure_classifications, generic_reference_data):
|
||||
_generate_perf_data_range(test_repository,
|
||||
generic_reference_data,
|
||||
reverse_push_range=True)
|
||||
def test_last_updated(
|
||||
test_repository, test_issue_tracker, failure_classifications, generic_reference_data
|
||||
):
|
||||
_generate_perf_data_range(test_repository, generic_reference_data, reverse_push_range=True)
|
||||
assert PerformanceSignature.objects.count() == 1
|
||||
signature = PerformanceSignature.objects.first()
|
||||
assert signature.last_updated == max(Push.objects.values_list('time', flat=True))
|
||||
|
|
|
@ -9,9 +9,7 @@ from tests.etl.test_perf_data_adapters import _verify_signature
|
|||
from tests.test_utils import create_generic_job
|
||||
from treeherder.etl.perf import store_performance_artifact
|
||||
from treeherder.model.models import Push
|
||||
from treeherder.perf.models import (PerformanceDatum,
|
||||
PerformanceFramework,
|
||||
PerformanceSignature)
|
||||
from treeherder.perf.models import PerformanceDatum, PerformanceFramework, PerformanceSignature
|
||||
|
||||
FRAMEWORK_NAME = 'cheezburger'
|
||||
MEASUREMENT_UNIT = 'ms'
|
||||
|
@ -38,48 +36,32 @@ def sample_perf_artifact():
|
|||
'name': 'test1',
|
||||
'value': 20.0,
|
||||
'unit': MEASUREMENT_UNIT,
|
||||
'lowerIsBetter': True
|
||||
'lowerIsBetter': True,
|
||||
},
|
||||
{
|
||||
'name': 'test2',
|
||||
'value': 30.0,
|
||||
'unit': MEASUREMENT_UNIT,
|
||||
'lowerIsBetter': False
|
||||
'lowerIsBetter': False,
|
||||
},
|
||||
{
|
||||
'name': 'test3',
|
||||
'value': 40.0,
|
||||
'unit': MEASUREMENT_UNIT,
|
||||
}
|
||||
]
|
||||
{'name': 'test3', 'value': 40.0, 'unit': MEASUREMENT_UNIT,},
|
||||
],
|
||||
},
|
||||
{
|
||||
'name': 'cheezburger metrics 2',
|
||||
'lowerIsBetter': False,
|
||||
'value': 10.0,
|
||||
'unit': MEASUREMENT_UNIT,
|
||||
'subtests': [
|
||||
{
|
||||
'name': 'test1',
|
||||
'value': 20.0,
|
||||
'unit': MEASUREMENT_UNIT,
|
||||
}
|
||||
]
|
||||
'subtests': [{'name': 'test1', 'value': 20.0, 'unit': MEASUREMENT_UNIT,}],
|
||||
},
|
||||
{
|
||||
'name': 'cheezburger metrics 3',
|
||||
'value': 10.0,
|
||||
'unit': MEASUREMENT_UNIT,
|
||||
'subtests': [
|
||||
{
|
||||
'name': 'test1',
|
||||
'value': 20.0,
|
||||
'unit': MEASUREMENT_UNIT
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
'subtests': [{'name': 'test1', 'value': 20.0, 'unit': MEASUREMENT_UNIT}],
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
|
@ -99,28 +81,24 @@ def sample_perf_artifact_with_new_unit():
|
|||
'value': 10.0,
|
||||
'unit': UPDATED_MEASUREMENT_UNIT,
|
||||
'subtests': [
|
||||
{
|
||||
'name': 'test1',
|
||||
'value': 20.0,
|
||||
'unit': UPDATED_MEASUREMENT_UNIT,
|
||||
'lowerIsBetter': True
|
||||
},
|
||||
{
|
||||
'name': 'test2',
|
||||
'value': 30.0,
|
||||
'unit': MEASUREMENT_UNIT,
|
||||
'lowerIsBetter': False
|
||||
},
|
||||
{
|
||||
'name': 'test3',
|
||||
'value': 40.0,
|
||||
'unit': MEASUREMENT_UNIT,
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
{
|
||||
'name': 'test1',
|
||||
'value': 20.0,
|
||||
'unit': UPDATED_MEASUREMENT_UNIT,
|
||||
'lowerIsBetter': True,
|
||||
},
|
||||
{
|
||||
'name': 'test2',
|
||||
'value': 30.0,
|
||||
'unit': MEASUREMENT_UNIT,
|
||||
'lowerIsBetter': False,
|
||||
},
|
||||
{'name': 'test3', 'value': 40.0, 'unit': MEASUREMENT_UNIT,},
|
||||
],
|
||||
}
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -130,13 +108,14 @@ def later_perf_push(test_repository):
|
|||
repository=test_repository,
|
||||
revision='1234abcd12',
|
||||
author='foo@bar.com',
|
||||
time=later_timestamp)
|
||||
time=later_timestamp,
|
||||
)
|
||||
|
||||
|
||||
def _verify_datum(suitename, testname, value, push_timestamp):
|
||||
datum = PerformanceDatum.objects.get(
|
||||
signature=PerformanceSignature.objects.get(suite=suitename,
|
||||
test=testname))
|
||||
signature=PerformanceSignature.objects.get(suite=suitename, test=testname)
|
||||
)
|
||||
assert datum.value == value
|
||||
assert datum.push_timestamp == push_timestamp
|
||||
|
||||
|
@ -145,15 +124,19 @@ def _prepare_test_data(datum):
|
|||
PerformanceFramework.objects.get_or_create(name=FRAMEWORK_NAME, enabled=True)
|
||||
# the perf data adapter expects unserialized performance data
|
||||
submit_datum = copy.copy(datum)
|
||||
submit_datum['blob'] = json.dumps({
|
||||
'performance_data': submit_datum['blob']
|
||||
})
|
||||
submit_datum['blob'] = json.dumps({'performance_data': submit_datum['blob']})
|
||||
perf_datum = datum['blob']
|
||||
return perf_datum, submit_datum
|
||||
|
||||
|
||||
def test_ingest_workflow(test_repository,
|
||||
perf_push, later_perf_push, perf_job, generic_reference_data, sample_perf_artifact):
|
||||
def test_ingest_workflow(
|
||||
test_repository,
|
||||
perf_push,
|
||||
later_perf_push,
|
||||
perf_job,
|
||||
generic_reference_data,
|
||||
sample_perf_artifact,
|
||||
):
|
||||
perf_datum, submit_datum = _prepare_test_data(sample_perf_artifact)
|
||||
|
||||
store_performance_artifact(perf_job, submit_datum)
|
||||
|
@ -164,93 +147,104 @@ def test_ingest_workflow(test_repository,
|
|||
assert FRAMEWORK_NAME == framework.name
|
||||
for suite in perf_datum['suites']:
|
||||
# verify summary, then subtests
|
||||
_verify_signature(test_repository.name,
|
||||
perf_datum['framework']['name'],
|
||||
suite['name'],
|
||||
'',
|
||||
'my_option_hash',
|
||||
'my_platform',
|
||||
suite.get('lowerIsBetter', True),
|
||||
suite.get('extraOptions'),
|
||||
suite.get('unit'),
|
||||
perf_push.time)
|
||||
_verify_signature(
|
||||
test_repository.name,
|
||||
perf_datum['framework']['name'],
|
||||
suite['name'],
|
||||
'',
|
||||
'my_option_hash',
|
||||
'my_platform',
|
||||
suite.get('lowerIsBetter', True),
|
||||
suite.get('extraOptions'),
|
||||
suite.get('unit'),
|
||||
perf_push.time,
|
||||
)
|
||||
_verify_datum(suite['name'], '', suite['value'], perf_push.time)
|
||||
for subtest in suite['subtests']:
|
||||
_verify_signature(test_repository.name,
|
||||
perf_datum['framework']['name'],
|
||||
suite['name'],
|
||||
subtest['name'],
|
||||
'my_option_hash',
|
||||
'my_platform',
|
||||
subtest.get('lowerIsBetter', True),
|
||||
suite.get('extraOptions'),
|
||||
suite.get('unit'),
|
||||
perf_push.time)
|
||||
_verify_datum(suite['name'], subtest['name'], subtest['value'],
|
||||
perf_push.time)
|
||||
_verify_signature(
|
||||
test_repository.name,
|
||||
perf_datum['framework']['name'],
|
||||
suite['name'],
|
||||
subtest['name'],
|
||||
'my_option_hash',
|
||||
'my_platform',
|
||||
subtest.get('lowerIsBetter', True),
|
||||
suite.get('extraOptions'),
|
||||
suite.get('unit'),
|
||||
perf_push.time,
|
||||
)
|
||||
_verify_datum(suite['name'], subtest['name'], subtest['value'], perf_push.time)
|
||||
|
||||
|
||||
def test_hash_remains_unchanged(test_repository, perf_job, sample_perf_artifact):
|
||||
_, submit_datum = _prepare_test_data(sample_perf_artifact)
|
||||
store_performance_artifact(perf_job, submit_datum)
|
||||
|
||||
summary_signature = PerformanceSignature.objects.get(
|
||||
suite='cheezburger metrics', test='')
|
||||
summary_signature = PerformanceSignature.objects.get(suite='cheezburger metrics', test='')
|
||||
# Ensure we don't inadvertently change the way we generate signature hashes.
|
||||
assert summary_signature.signature_hash == 'f451f0c9000a7f99e5dc2f05792bfdb0e11d0cac'
|
||||
subtest_signatures = PerformanceSignature.objects.filter(
|
||||
parent_signature=summary_signature).values_list('signature_hash', flat=True)
|
||||
parent_signature=summary_signature
|
||||
).values_list('signature_hash', flat=True)
|
||||
assert len(subtest_signatures) == 3
|
||||
|
||||
|
||||
def test_timestamp_can_be_updated(test_repository, perf_job, later_perf_push, generic_reference_data,
|
||||
sample_perf_artifact):
|
||||
def test_timestamp_can_be_updated(
|
||||
test_repository, perf_job, later_perf_push, generic_reference_data, sample_perf_artifact
|
||||
):
|
||||
_, submit_datum = _prepare_test_data(sample_perf_artifact)
|
||||
store_performance_artifact(perf_job, submit_datum)
|
||||
|
||||
# send another datum, a little later, verify that signature is changed accordingly
|
||||
later_job = create_generic_job('lateguid', test_repository,
|
||||
later_perf_push.id, generic_reference_data)
|
||||
later_job = create_generic_job(
|
||||
'lateguid', test_repository, later_perf_push.id, generic_reference_data
|
||||
)
|
||||
store_performance_artifact(later_job, submit_datum)
|
||||
|
||||
signature = PerformanceSignature.objects.get(
|
||||
suite='cheezburger metrics',
|
||||
test='test1')
|
||||
signature = PerformanceSignature.objects.get(suite='cheezburger metrics', test='test1')
|
||||
assert signature.last_updated == later_perf_push.time
|
||||
|
||||
|
||||
def test_measurement_unit_can_be_updated(test_repository, later_perf_push, perf_job, generic_reference_data,
|
||||
sample_perf_artifact, sample_perf_artifact_with_new_unit):
|
||||
def test_measurement_unit_can_be_updated(
|
||||
test_repository,
|
||||
later_perf_push,
|
||||
perf_job,
|
||||
generic_reference_data,
|
||||
sample_perf_artifact,
|
||||
sample_perf_artifact_with_new_unit,
|
||||
):
|
||||
_, submit_datum = _prepare_test_data(sample_perf_artifact)
|
||||
store_performance_artifact(perf_job, submit_datum)
|
||||
|
||||
_, updated_submit_datum = _prepare_test_data(sample_perf_artifact_with_new_unit)
|
||||
later_job = create_generic_job('lateguid', test_repository,
|
||||
later_perf_push.id, generic_reference_data)
|
||||
later_job = create_generic_job(
|
||||
'lateguid', test_repository, later_perf_push.id, generic_reference_data
|
||||
)
|
||||
store_performance_artifact(later_job, updated_submit_datum)
|
||||
|
||||
summary_signature = PerformanceSignature.objects.get(
|
||||
suite='cheezburger metrics', test='')
|
||||
summary_signature = PerformanceSignature.objects.get(suite='cheezburger metrics', test='')
|
||||
updated_subtest_signature = PerformanceSignature.objects.get(
|
||||
suite='cheezburger metrics',
|
||||
test='test1')
|
||||
suite='cheezburger metrics', test='test1'
|
||||
)
|
||||
assert summary_signature.measurement_unit == UPDATED_MEASUREMENT_UNIT
|
||||
assert updated_subtest_signature.measurement_unit == UPDATED_MEASUREMENT_UNIT
|
||||
|
||||
# no side effects when parent/sibling signatures
|
||||
# change measurement units
|
||||
not_changed_subtest_signature = PerformanceSignature.objects.get(
|
||||
suite='cheezburger metrics',
|
||||
test='test2')
|
||||
suite='cheezburger metrics', test='test2'
|
||||
)
|
||||
assert not_changed_subtest_signature.measurement_unit == MEASUREMENT_UNIT
|
||||
|
||||
|
||||
def test_changing_extra_options_decouples_perf_signatures(test_repository, later_perf_push, perf_job,
|
||||
generic_reference_data, sample_perf_artifact):
|
||||
def test_changing_extra_options_decouples_perf_signatures(
|
||||
test_repository, later_perf_push, perf_job, generic_reference_data, sample_perf_artifact
|
||||
):
|
||||
updated_perf_artifact = copy.deepcopy(sample_perf_artifact)
|
||||
updated_perf_artifact['blob']['suites'][0]['extraOptions'] = ['different-extra-options']
|
||||
later_job = create_generic_job('lateguid', test_repository,
|
||||
later_perf_push.id, generic_reference_data)
|
||||
later_job = create_generic_job(
|
||||
'lateguid', test_repository, later_perf_push.id, generic_reference_data
|
||||
)
|
||||
_, submit_datum = _prepare_test_data(sample_perf_artifact)
|
||||
_, updated_submit_datum = _prepare_test_data(updated_perf_artifact)
|
||||
|
||||
|
|
|
@ -4,33 +4,52 @@ import jsonschema
|
|||
import pytest
|
||||
|
||||
|
||||
@pytest.mark.parametrize(('suite_value', 'test_value', 'expected_fail'),
|
||||
[({}, {}, True),
|
||||
({'value': 1234}, {}, True),
|
||||
({}, {'value': 1234}, False),
|
||||
({'value': 1234}, {'value': 1234}, False),
|
||||
({'value': float('inf')}, {}, True),
|
||||
({}, {'value': float('inf')}, True),
|
||||
({'value': 1234,
|
||||
'extraOptions': [
|
||||
# has >45 characters
|
||||
['android-api-53211-with-google-play-services-and-some-random-other-extra-information']
|
||||
]}, {'value': 1234}, True),
|
||||
({'value': 1234,
|
||||
'extraOptions': ['1', '2', '3', '4', '5', '6', '7', '8', '9']}, {'value': 1234}, True),
|
||||
({'value': 1234,
|
||||
'extraOptions': ['1', '2', '3', '4', '5', '6', '7', '8']}, {'value': 1234}, False)])
|
||||
@pytest.mark.parametrize(
|
||||
('suite_value', 'test_value', 'expected_fail'),
|
||||
[
|
||||
({}, {}, True),
|
||||
({'value': 1234}, {}, True),
|
||||
({}, {'value': 1234}, False),
|
||||
({'value': 1234}, {'value': 1234}, False),
|
||||
({'value': float('inf')}, {}, True),
|
||||
({}, {'value': float('inf')}, True),
|
||||
(
|
||||
{
|
||||
'value': 1234,
|
||||
'extraOptions': [
|
||||
# has >45 characters
|
||||
[
|
||||
'android-api-53211-with-google-play-services-and-some-random-other-extra-information'
|
||||
]
|
||||
],
|
||||
},
|
||||
{'value': 1234},
|
||||
True,
|
||||
),
|
||||
(
|
||||
{'value': 1234, 'extraOptions': ['1', '2', '3', '4', '5', '6', '7', '8', '9']},
|
||||
{'value': 1234},
|
||||
True,
|
||||
),
|
||||
(
|
||||
{'value': 1234, 'extraOptions': ['1', '2', '3', '4', '5', '6', '7', '8']},
|
||||
{'value': 1234},
|
||||
False,
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_perf_schema(suite_value, test_value, expected_fail):
|
||||
with open('schemas/performance-artifact.json') as f:
|
||||
perf_schema = json.load(f)
|
||||
|
||||
datum = {
|
||||
"framework": {"name": "talos"}, "suites": [{
|
||||
"name": "basic_compositor_video",
|
||||
"subtests": [{
|
||||
"name": "240p.120fps.mp4_scale_fullscreen_startup"
|
||||
}]
|
||||
}]
|
||||
"framework": {"name": "talos"},
|
||||
"suites": [
|
||||
{
|
||||
"name": "basic_compositor_video",
|
||||
"subtests": [{"name": "240p.120fps.mp4_scale_fullscreen_startup"}],
|
||||
}
|
||||
],
|
||||
}
|
||||
datum['suites'][0].update(suite_value)
|
||||
datum['suites'][0]['subtests'][0].update(test_value)
|
||||
|
|
|
@ -5,11 +5,13 @@ import os
|
|||
import pytest
|
||||
import responses
|
||||
|
||||
from treeherder.etl.push_loader import (GithubPullRequestTransformer,
|
||||
GithubPushTransformer,
|
||||
HgPushTransformer,
|
||||
PulsePushError,
|
||||
PushLoader)
|
||||
from treeherder.etl.push_loader import (
|
||||
GithubPullRequestTransformer,
|
||||
GithubPushTransformer,
|
||||
HgPushTransformer,
|
||||
PulsePushError,
|
||||
PushLoader,
|
||||
)
|
||||
from treeherder.model.models import Push
|
||||
|
||||
|
||||
|
@ -46,26 +48,22 @@ def transformed_hg_push(sample_data):
|
|||
@pytest.fixture
|
||||
def mock_github_pr_commits(activate_responses):
|
||||
tests_folder = os.path.dirname(os.path.dirname(__file__))
|
||||
path = os.path.join(
|
||||
tests_folder,
|
||||
"sample_data/pulse_consumer",
|
||||
"github_pr_commits.json"
|
||||
)
|
||||
path = os.path.join(tests_folder, "sample_data/pulse_consumer", "github_pr_commits.json")
|
||||
with open(path) as f:
|
||||
mocked_content = f.read()
|
||||
responses.add(responses.GET, "https://api.github.com/repos/mozilla/test_treeherder/pulls/1692/commits",
|
||||
body=mocked_content, status=200,
|
||||
content_type='application/json')
|
||||
responses.add(
|
||||
responses.GET,
|
||||
"https://api.github.com/repos/mozilla/test_treeherder/pulls/1692/commits",
|
||||
body=mocked_content,
|
||||
status=200,
|
||||
content_type='application/json',
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_github_push_compare(activate_responses):
|
||||
tests_folder = os.path.dirname(os.path.dirname(__file__))
|
||||
path = os.path.join(
|
||||
tests_folder,
|
||||
"sample_data/pulse_consumer",
|
||||
"github_push_compare.json"
|
||||
)
|
||||
path = os.path.join(tests_folder, "sample_data/pulse_consumer", "github_push_compare.json")
|
||||
with open(path) as f:
|
||||
mocked_content = json.load(f)
|
||||
|
||||
|
@ -74,35 +72,46 @@ def mock_github_push_compare(activate_responses):
|
|||
"https://api.github.com/repos/mozilla-mobile/android-components/compare/"
|
||||
"7285afe57ae6207fdb5d6db45133dac2053b7820..."
|
||||
"5fdb785b28b356f50fc1d9cb180d401bb03fc1f1",
|
||||
json=mocked_content[0], status=200, match_querystring=False,
|
||||
content_type='application/json')
|
||||
json=mocked_content[0],
|
||||
status=200,
|
||||
match_querystring=False,
|
||||
content_type='application/json',
|
||||
)
|
||||
responses.add(
|
||||
responses.GET,
|
||||
"https://api.github.com/repos/servo/servo/compare/"
|
||||
"4c25e02f26f7536edbf23a360d56604fb9507378..."
|
||||
"ad9bfc2a62b70b9f3dbb1c3a5969f30bacce3d74",
|
||||
json=mocked_content[1], status=200, match_querystring=False,
|
||||
content_type='application/json')
|
||||
json=mocked_content[1],
|
||||
status=200,
|
||||
match_querystring=False,
|
||||
content_type='application/json',
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_hg_push_commits(activate_responses):
|
||||
tests_folder = os.path.dirname(os.path.dirname(__file__))
|
||||
path = os.path.join(
|
||||
tests_folder,
|
||||
"sample_data/pulse_consumer",
|
||||
"hg_push_commits.json"
|
||||
)
|
||||
path = os.path.join(tests_folder, "sample_data/pulse_consumer", "hg_push_commits.json")
|
||||
with open(path) as f:
|
||||
mocked_content = f.read()
|
||||
responses.add(responses.GET, "https://hg.mozilla.org/try/json-pushes",
|
||||
body=mocked_content, status=200, match_querystring=False,
|
||||
content_type='application/json')
|
||||
responses.add(
|
||||
responses.GET,
|
||||
"https://hg.mozilla.org/try/json-pushes",
|
||||
body=mocked_content,
|
||||
status=200,
|
||||
match_querystring=False,
|
||||
content_type='application/json',
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("exchange, transformer_class", [
|
||||
("exchange/taskcluster-github/v1/push", GithubPushTransformer),
|
||||
("exchange/taskcluster-github/v1/pull-request", GithubPullRequestTransformer)])
|
||||
@pytest.mark.parametrize(
|
||||
"exchange, transformer_class",
|
||||
[
|
||||
("exchange/taskcluster-github/v1/push", GithubPushTransformer),
|
||||
("exchange/taskcluster-github/v1/pull-request", GithubPullRequestTransformer),
|
||||
],
|
||||
)
|
||||
def test_get_transformer_class(exchange, transformer_class):
|
||||
rsl = PushLoader()
|
||||
assert rsl.get_transformer_class(exchange) == transformer_class
|
||||
|
@ -114,22 +123,23 @@ def test_unsupported_exchange():
|
|||
rsl.get_transformer_class("meh")
|
||||
|
||||
|
||||
def test_ingest_github_pull_request(test_repository, github_pr, transformed_github_pr,
|
||||
mock_github_pr_commits):
|
||||
def test_ingest_github_pull_request(
|
||||
test_repository, github_pr, transformed_github_pr, mock_github_pr_commits
|
||||
):
|
||||
xformer = GithubPullRequestTransformer(github_pr)
|
||||
push = xformer.transform(test_repository.name)
|
||||
assert transformed_github_pr == push
|
||||
|
||||
|
||||
def test_ingest_github_push(test_repository, github_push, transformed_github_push,
|
||||
mock_github_push_compare):
|
||||
def test_ingest_github_push(
|
||||
test_repository, github_push, transformed_github_push, mock_github_push_compare
|
||||
):
|
||||
xformer = GithubPushTransformer(github_push[0]["payload"])
|
||||
push = xformer.transform(test_repository.name)
|
||||
assert transformed_github_push == push
|
||||
|
||||
|
||||
def test_ingest_hg_push(test_repository, hg_push, transformed_hg_push,
|
||||
mock_hg_push_commits):
|
||||
def test_ingest_hg_push(test_repository, hg_push, transformed_hg_push, mock_hg_push_commits):
|
||||
xformer = HgPushTransformer(hg_push)
|
||||
push = xformer.transform(test_repository.name)
|
||||
assert transformed_hg_push == push
|
||||
|
@ -140,7 +150,9 @@ def test_ingest_hg_push_good_repo(hg_push, test_repository, mock_hg_push_commits
|
|||
"""Test graceful handling of an unknown HG repo"""
|
||||
hg_push["payload"]["repo_url"] = "https://hg.mozilla.org/mozilla-central"
|
||||
assert Push.objects.count() == 0
|
||||
PushLoader().process(hg_push, "exchange/hgpushes/v1", "https://firefox-ci-tc.services.mozilla.com")
|
||||
PushLoader().process(
|
||||
hg_push, "exchange/hgpushes/v1", "https://firefox-ci-tc.services.mozilla.com"
|
||||
)
|
||||
assert Push.objects.count() == 1
|
||||
|
||||
|
||||
|
@ -148,7 +160,9 @@ def test_ingest_hg_push_good_repo(hg_push, test_repository, mock_hg_push_commits
|
|||
def test_ingest_hg_push_bad_repo(hg_push):
|
||||
"""Test graceful handling of an unknown HG repo"""
|
||||
hg_push["payload"]["repo_url"] = "https://bad.repo.com"
|
||||
PushLoader().process(hg_push, "exchange/hgpushes/v1", "https://firefox-ci-tc.services.mozilla.com")
|
||||
PushLoader().process(
|
||||
hg_push, "exchange/hgpushes/v1", "https://firefox-ci-tc.services.mozilla.com"
|
||||
)
|
||||
assert Push.objects.count() == 0
|
||||
|
||||
|
||||
|
@ -156,34 +170,48 @@ def test_ingest_hg_push_bad_repo(hg_push):
|
|||
def test_ingest_github_push_bad_repo(github_push):
|
||||
"""Test graceful handling of an unknown GH repo"""
|
||||
github_push[0]["payload"]["details"]["event.head.repo.url"] = "https://bad.repo.com"
|
||||
PushLoader().process(github_push[0]["payload"], github_push[0]["exchange"], "https://firefox-ci-tc.services.mozilla.com")
|
||||
PushLoader().process(
|
||||
github_push[0]["payload"],
|
||||
github_push[0]["exchange"],
|
||||
"https://firefox-ci-tc.services.mozilla.com",
|
||||
)
|
||||
assert Push.objects.count() == 0
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_ingest_github_push_merge_commit(github_push, test_repository, mock_github_push_compare):
|
||||
"""Test a a merge push which will require hitting the network for the right info"""
|
||||
test_repository.url = github_push[1]["payload"]["details"]["event.head.repo.url"].replace(".git", "")
|
||||
test_repository.url = github_push[1]["payload"]["details"]["event.head.repo.url"].replace(
|
||||
".git", ""
|
||||
)
|
||||
test_repository.branch = github_push[1]["payload"]["details"]["event.base.repo.branch"]
|
||||
test_repository.save()
|
||||
PushLoader().process(github_push[1]["payload"], github_push[1]["exchange"], "https://firefox-ci-tc.services.mozilla.com")
|
||||
PushLoader().process(
|
||||
github_push[1]["payload"],
|
||||
github_push[1]["exchange"],
|
||||
"https://firefox-ci-tc.services.mozilla.com",
|
||||
)
|
||||
assert Push.objects.count() == 1
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize("branch, expected_pushes", [
|
||||
("master", 1),
|
||||
("bar", 1),
|
||||
("baz", 0),
|
||||
("foo", 1),
|
||||
])
|
||||
def test_ingest_github_push_comma_separated_branches(branch, expected_pushes, github_push, test_repository,
|
||||
mock_github_push_compare):
|
||||
@pytest.mark.parametrize(
|
||||
"branch, expected_pushes", [("master", 1), ("bar", 1), ("baz", 0), ("foo", 1),]
|
||||
)
|
||||
def test_ingest_github_push_comma_separated_branches(
|
||||
branch, expected_pushes, github_push, test_repository, mock_github_push_compare
|
||||
):
|
||||
"""Test a repository accepting pushes for multiple branches"""
|
||||
test_repository.url = github_push[0]["payload"]["details"]["event.head.repo.url"].replace(".git", "")
|
||||
test_repository.url = github_push[0]["payload"]["details"]["event.head.repo.url"].replace(
|
||||
".git", ""
|
||||
)
|
||||
test_repository.branch = "master,foo,bar"
|
||||
test_repository.save()
|
||||
github_push[0]["payload"]["details"]["event.base.repo.branch"] = branch
|
||||
assert Push.objects.count() == 0
|
||||
PushLoader().process(github_push[0]["payload"], github_push[0]["exchange"], "https://firefox-ci-tc.services.mozilla.com")
|
||||
PushLoader().process(
|
||||
github_push[0]["payload"],
|
||||
github_push[0]["exchange"],
|
||||
"https://firefox-ci-tc.services.mozilla.com",
|
||||
)
|
||||
assert Push.objects.count() == expected_pushes
|
||||
|
|
|
@ -5,21 +5,23 @@ import responses
|
|||
from django.core.cache import cache
|
||||
|
||||
from treeherder.etl.pushlog import HgPushlogProcess
|
||||
from treeherder.model.models import (Commit,
|
||||
Push)
|
||||
from treeherder.model.models import Commit, Push
|
||||
|
||||
|
||||
def test_ingest_hg_pushlog(test_repository, test_base_dir,
|
||||
activate_responses):
|
||||
def test_ingest_hg_pushlog(test_repository, test_base_dir, activate_responses):
|
||||
"""ingesting a number of pushes should populate push and revisions"""
|
||||
|
||||
pushlog_path = os.path.join(test_base_dir, 'sample_data', 'hg_pushlog.json')
|
||||
with open(pushlog_path) as f:
|
||||
pushlog_content = f.read()
|
||||
pushlog_fake_url = "http://www.thisismypushlog.com"
|
||||
responses.add(responses.GET, pushlog_fake_url,
|
||||
body=pushlog_content, status=200,
|
||||
content_type='application/json')
|
||||
responses.add(
|
||||
responses.GET,
|
||||
pushlog_fake_url,
|
||||
body=pushlog_content,
|
||||
status=200,
|
||||
content_type='application/json',
|
||||
)
|
||||
|
||||
process = HgPushlogProcess()
|
||||
|
||||
|
@ -30,8 +32,7 @@ def test_ingest_hg_pushlog(test_repository, test_base_dir,
|
|||
assert Commit.objects.count() == 15
|
||||
|
||||
|
||||
def test_ingest_hg_pushlog_already_stored(test_repository, test_base_dir,
|
||||
activate_responses):
|
||||
def test_ingest_hg_pushlog_already_stored(test_repository, test_base_dir, activate_responses):
|
||||
"""test that trying to ingest a push already stored doesn't doesn't affect
|
||||
all the pushes in the request,
|
||||
e.g. trying to store [A,B] with A already stored, B will be stored"""
|
||||
|
@ -46,11 +47,14 @@ def test_ingest_hg_pushlog_already_stored(test_repository, test_base_dir,
|
|||
|
||||
# store the first push only
|
||||
first_push_json = json.dumps({"lastpushid": 1, "pushes": {"1": first_push}})
|
||||
responses.add(responses.GET, pushlog_fake_url,
|
||||
body=first_push_json, status=200,
|
||||
content_type='application/json',
|
||||
match_querystring=True,
|
||||
)
|
||||
responses.add(
|
||||
responses.GET,
|
||||
pushlog_fake_url,
|
||||
body=first_push_json,
|
||||
status=200,
|
||||
content_type='application/json',
|
||||
match_querystring=True,
|
||||
)
|
||||
|
||||
process = HgPushlogProcess()
|
||||
process.run(pushlog_fake_url, test_repository.name)
|
||||
|
@ -66,8 +70,10 @@ def test_ingest_hg_pushlog_already_stored(test_repository, test_base_dir,
|
|||
responses.GET,
|
||||
pushlog_fake_url + "&startID=1",
|
||||
body=first_and_second_push_json,
|
||||
status=200, content_type='application/json',
|
||||
match_querystring=True)
|
||||
status=200,
|
||||
content_type='application/json',
|
||||
match_querystring=True,
|
||||
)
|
||||
|
||||
process = HgPushlogProcess()
|
||||
|
||||
|
@ -76,20 +82,22 @@ def test_ingest_hg_pushlog_already_stored(test_repository, test_base_dir,
|
|||
assert Push.objects.count() == 2
|
||||
|
||||
|
||||
def test_ingest_hg_pushlog_cache_last_push(test_repository,
|
||||
test_base_dir,
|
||||
activate_responses):
|
||||
def test_ingest_hg_pushlog_cache_last_push(test_repository, test_base_dir, activate_responses):
|
||||
"""
|
||||
ingesting a number of pushes should cache the top revision of the last push
|
||||
"""
|
||||
|
||||
pushlog_path = os.path.join(test_base_dir, 'sample_data',
|
||||
'hg_pushlog.json')
|
||||
pushlog_path = os.path.join(test_base_dir, 'sample_data', 'hg_pushlog.json')
|
||||
with open(pushlog_path) as f:
|
||||
pushlog_content = f.read()
|
||||
pushlog_fake_url = "http://www.thisismypushlog.com"
|
||||
responses.add(responses.GET, pushlog_fake_url, body=pushlog_content,
|
||||
status=200, content_type='application/json')
|
||||
responses.add(
|
||||
responses.GET,
|
||||
pushlog_fake_url,
|
||||
body=pushlog_content,
|
||||
status=200,
|
||||
content_type='application/json',
|
||||
)
|
||||
|
||||
process = HgPushlogProcess()
|
||||
process.run(pushlog_fake_url, test_repository.name)
|
||||
|
@ -102,8 +110,7 @@ def test_ingest_hg_pushlog_cache_last_push(test_repository,
|
|||
assert cache.get(cache_key) == max_push_id
|
||||
|
||||
|
||||
def test_empty_json_pushes(test_repository, test_base_dir,
|
||||
activate_responses):
|
||||
def test_empty_json_pushes(test_repository, test_base_dir, activate_responses):
|
||||
"""
|
||||
Gracefully handle getting an empty list of pushes from json-pushes
|
||||
|
||||
|
@ -113,11 +120,14 @@ def test_empty_json_pushes(test_repository, test_base_dir,
|
|||
|
||||
# store the first push only
|
||||
empty_push_json = json.dumps({"lastpushid": 123, "pushes": {}})
|
||||
responses.add(responses.GET, pushlog_fake_url,
|
||||
body=empty_push_json, status=200,
|
||||
content_type='application/json',
|
||||
match_querystring=True,
|
||||
)
|
||||
responses.add(
|
||||
responses.GET,
|
||||
pushlog_fake_url,
|
||||
body=empty_push_json,
|
||||
status=200,
|
||||
content_type='application/json',
|
||||
match_querystring=True,
|
||||
)
|
||||
|
||||
process = HgPushlogProcess()
|
||||
process.run(pushlog_fake_url, test_repository.name)
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
import responses
|
||||
|
||||
from treeherder.etl.runnable_jobs import (RUNNABLE_JOBS_URL,
|
||||
TASKCLUSTER_INDEX_URL,
|
||||
_taskcluster_runnable_jobs)
|
||||
from treeherder.etl.runnable_jobs import (
|
||||
RUNNABLE_JOBS_URL,
|
||||
TASKCLUSTER_INDEX_URL,
|
||||
_taskcluster_runnable_jobs,
|
||||
)
|
||||
|
||||
TASK_ID = 'AFq3FRt4TyiTwIN7fUqOQg'
|
||||
CONTENT1 = {'taskId': TASK_ID}
|
||||
|
@ -19,11 +21,11 @@ API_RETURN = {
|
|||
'platform_option': 'opt',
|
||||
'ref_data_name': JOB_NAME,
|
||||
'state': 'runnable',
|
||||
'result': 'runnable'
|
||||
'result': 'runnable',
|
||||
}
|
||||
RUNNABLE_JOBS_CONTENTS = {
|
||||
JOB_NAME: {
|
||||
'collection': {'opt': True},
|
||||
'collection': {'opt': True},
|
||||
'groupName': API_RETURN['job_group_name'],
|
||||
'groupSymbol': API_RETURN['job_group_symbol'],
|
||||
'platform': API_RETURN['platform'],
|
||||
|
@ -39,10 +41,20 @@ def test_taskcluster_runnable_jobs(test_repository):
|
|||
"""
|
||||
repo = test_repository.name
|
||||
|
||||
responses.add(responses.GET, TASKCLUSTER_INDEX_URL % repo,
|
||||
json=CONTENT1, match_querystring=True, status=200)
|
||||
responses.add(responses.GET, RUNNABLE_JOBS_URL,
|
||||
json=RUNNABLE_JOBS_CONTENTS, match_querystring=True, status=200)
|
||||
responses.add(
|
||||
responses.GET,
|
||||
TASKCLUSTER_INDEX_URL % repo,
|
||||
json=CONTENT1,
|
||||
match_querystring=True,
|
||||
status=200,
|
||||
)
|
||||
responses.add(
|
||||
responses.GET,
|
||||
RUNNABLE_JOBS_URL,
|
||||
json=RUNNABLE_JOBS_CONTENTS,
|
||||
match_querystring=True,
|
||||
status=200,
|
||||
)
|
||||
jobs_ret = _taskcluster_runnable_jobs(repo)
|
||||
|
||||
assert len(jobs_ret) == 1
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from treeherder.etl.text import (astral_filter,
|
||||
filter_re)
|
||||
from treeherder.etl.text import astral_filter, filter_re
|
||||
|
||||
|
||||
def test_filter_re_matching():
|
||||
|
|
|
@ -13,8 +13,9 @@ def test_intermittents_commenter(bug_data):
|
|||
|
||||
process = Commenter(weekly_mode=True, dry_run=True)
|
||||
params = {'include_fields': 'product%2C+component%2C+priority%2C+whiteboard%2C+id'}
|
||||
url = '{}/rest/bug?id={}&include_fields={}'.format(settings.BZ_API_URL, bug_data['bug_id'],
|
||||
params['include_fields'])
|
||||
url = '{}/rest/bug?id={}&include_fields={}'.format(
|
||||
settings.BZ_API_URL, bug_data['bug_id'], params['include_fields']
|
||||
)
|
||||
|
||||
content = {
|
||||
"bugs": [
|
||||
|
@ -23,18 +24,15 @@ def test_intermittents_commenter(bug_data):
|
|||
u"priority": u"P3",
|
||||
u"product": u"Testing",
|
||||
u"whiteboard": u"[stockwell infra] [see summary at comment 92]",
|
||||
u"id": bug_data['bug_id']
|
||||
u"id": bug_data['bug_id'],
|
||||
}
|
||||
],
|
||||
"faults": []
|
||||
"faults": [],
|
||||
}
|
||||
|
||||
responses.add(responses.Response(
|
||||
method='GET',
|
||||
url=url,
|
||||
json=content,
|
||||
match_querystring=True,
|
||||
status=200))
|
||||
responses.add(
|
||||
responses.Response(method='GET', url=url, json=content, match_querystring=True, status=200)
|
||||
)
|
||||
|
||||
resp = process.fetch_bug_details(bug_data['bug_id'])
|
||||
assert resp == content['bugs']
|
||||
|
|
|
@ -2,37 +2,31 @@ import pytest
|
|||
import responses
|
||||
|
||||
from tests.test_utils import add_log_response
|
||||
from treeherder.log_parser.artifactbuildercollection import (MAX_DOWNLOAD_SIZE_IN_BYTES,
|
||||
ArtifactBuilderCollection,
|
||||
LogSizeException)
|
||||
from treeherder.log_parser.artifactbuildercollection import (
|
||||
MAX_DOWNLOAD_SIZE_IN_BYTES,
|
||||
ArtifactBuilderCollection,
|
||||
LogSizeException,
|
||||
)
|
||||
from treeherder.log_parser.artifactbuilders import BuildbotLogViewArtifactBuilder
|
||||
|
||||
|
||||
def test_builders_as_list():
|
||||
"""test that passing in a list of builders works"""
|
||||
builder = BuildbotLogViewArtifactBuilder()
|
||||
lpc = ArtifactBuilderCollection(
|
||||
"foo-url",
|
||||
builders=[builder]
|
||||
)
|
||||
lpc = ArtifactBuilderCollection("foo-url", builders=[builder])
|
||||
assert lpc.builders == [builder]
|
||||
|
||||
|
||||
def test_builders_as_single_still_list():
|
||||
"""test that passing in a single builder becomes a list"""
|
||||
builder = BuildbotLogViewArtifactBuilder()
|
||||
lpc = ArtifactBuilderCollection(
|
||||
"foo-url",
|
||||
builders=builder
|
||||
)
|
||||
lpc = ArtifactBuilderCollection("foo-url", builders=builder)
|
||||
assert lpc.builders == [builder]
|
||||
|
||||
|
||||
def test_default_builders():
|
||||
"""test no builders"""
|
||||
lpc = ArtifactBuilderCollection(
|
||||
"foo-url",
|
||||
)
|
||||
lpc = ArtifactBuilderCollection("foo-url",)
|
||||
assert isinstance(lpc.builders, list)
|
||||
assert len(lpc.builders) == 3
|
||||
|
||||
|
@ -49,17 +43,8 @@ def test_all_builders_complete():
|
|||
|
||||
lpc.parse()
|
||||
exp = {
|
||||
"text_log_summary": {
|
||||
"step_data": {
|
||||
"steps": [],
|
||||
"errors_truncated": False
|
||||
},
|
||||
"logurl": url,
|
||||
},
|
||||
"Job Info": {
|
||||
"job_details": [],
|
||||
"logurl": url,
|
||||
}
|
||||
"text_log_summary": {"step_data": {"steps": [], "errors_truncated": False}, "logurl": url,},
|
||||
"Job Info": {"job_details": [], "logurl": url,},
|
||||
}
|
||||
|
||||
assert exp == lpc.artifacts
|
||||
|
@ -76,7 +61,7 @@ def test_log_download_size_limit():
|
|||
adding_headers={
|
||||
'Content-Encoding': 'gzip',
|
||||
'Content-Length': str(MAX_DOWNLOAD_SIZE_IN_BYTES + 1),
|
||||
}
|
||||
},
|
||||
)
|
||||
lpc = ArtifactBuilderCollection(url)
|
||||
|
||||
|
|
|
@ -1,20 +1,18 @@
|
|||
from django.core.management import call_command
|
||||
|
||||
from treeherder.model.models import (FailureLine,
|
||||
TextLogError)
|
||||
from treeherder.model.models import FailureLine, TextLogError
|
||||
|
||||
from ..autoclassify.utils import (create_failure_lines,
|
||||
create_text_log_errors,
|
||||
group_line,
|
||||
test_line)
|
||||
from ..autoclassify.utils import create_failure_lines, create_text_log_errors, group_line, test_line
|
||||
|
||||
|
||||
def test_crossreference_error_lines(test_job):
|
||||
lines = [(test_line, {}),
|
||||
(test_line, {"subtest": "subtest2"}),
|
||||
(test_line, {"status": "TIMEOUT"}),
|
||||
(test_line, {"expected": "ERROR"}),
|
||||
(test_line, {"message": "message2"})]
|
||||
lines = [
|
||||
(test_line, {}),
|
||||
(test_line, {"subtest": "subtest2"}),
|
||||
(test_line, {"status": "TIMEOUT"}),
|
||||
(test_line, {"expected": "ERROR"}),
|
||||
(test_line, {"message": "message2"}),
|
||||
]
|
||||
|
||||
create_failure_lines(test_job, lines)
|
||||
create_text_log_errors(test_job, lines)
|
||||
|
@ -33,35 +31,36 @@ def test_crossreference_error_lines(test_job):
|
|||
|
||||
|
||||
def test_crossreference_error_lines_truncated(test_job):
|
||||
lines = [(test_line, {}),
|
||||
(test_line, {"subtest": "subtest2"}),
|
||||
(test_line, {"status": "TIMEOUT"}),
|
||||
(test_line, {"expected": "ERROR"}),
|
||||
(test_line, {"message": "message2"}),
|
||||
]
|
||||
lines = [
|
||||
(test_line, {}),
|
||||
(test_line, {"subtest": "subtest2"}),
|
||||
(test_line, {"status": "TIMEOUT"}),
|
||||
(test_line, {"expected": "ERROR"}),
|
||||
(test_line, {"message": "message2"}),
|
||||
]
|
||||
|
||||
create_text_log_errors(test_job, lines)
|
||||
create_failure_lines(test_job,
|
||||
lines[:-1] + [({"action": "truncated"}, {})])
|
||||
create_failure_lines(test_job, lines[:-1] + [({"action": "truncated"}, {})])
|
||||
|
||||
call_command('crossreference_error_lines', str(test_job.id))
|
||||
|
||||
error_lines = TextLogError.objects.filter(step__job=test_job).all()
|
||||
failure_lines = FailureLine.objects.all()
|
||||
|
||||
for failure_line, error_line in zip(failure_lines[:len(failure_lines)-1], error_lines):
|
||||
for failure_line, error_line in zip(failure_lines[: len(failure_lines) - 1], error_lines):
|
||||
assert error_line.metadata.failure_line == failure_line
|
||||
assert error_line.metadata.best_is_verified is False
|
||||
assert error_line.metadata.best_classification is None
|
||||
|
||||
|
||||
def test_crossreference_error_lines_missing(test_job):
|
||||
lines = [(test_line, {}),
|
||||
(test_line, {"subtest": "subtest2"}),
|
||||
(test_line, {"status": "TIMEOUT"}),
|
||||
(test_line, {"expected": "ERROR"}),
|
||||
(test_line, {"message": "message2"}),
|
||||
]
|
||||
lines = [
|
||||
(test_line, {}),
|
||||
(test_line, {"subtest": "subtest2"}),
|
||||
(test_line, {"status": "TIMEOUT"}),
|
||||
(test_line, {"expected": "ERROR"}),
|
||||
(test_line, {"message": "message2"}),
|
||||
]
|
||||
|
||||
create_failure_lines(test_job, lines[1:])
|
||||
create_text_log_errors(test_job, lines)
|
||||
|
@ -78,12 +77,14 @@ def test_crossreference_error_lines_missing(test_job):
|
|||
|
||||
|
||||
def test_crossreference_error_lines_leading_groups(test_job):
|
||||
lines = [(group_line, {}),
|
||||
(test_line, {}),
|
||||
(test_line, {"subtest": "subtest2"}),
|
||||
(test_line, {"status": "TIMEOUT"}),
|
||||
(test_line, {"expected": "ERROR"}),
|
||||
(test_line, {"message": "message2"})]
|
||||
lines = [
|
||||
(group_line, {}),
|
||||
(test_line, {}),
|
||||
(test_line, {"subtest": "subtest2"}),
|
||||
(test_line, {"status": "TIMEOUT"}),
|
||||
(test_line, {"expected": "ERROR"}),
|
||||
(test_line, {"message": "message2"}),
|
||||
]
|
||||
|
||||
create_failure_lines(test_job, lines)
|
||||
create_text_log_errors(test_job, lines)
|
||||
|
|
|
@ -34,7 +34,7 @@ NON_ERROR_TEST_CASES = (
|
|||
"[taskcluster:info] Starting task",
|
||||
"[taskcluster] Starting task",
|
||||
"01:22:41 INFO - ImportError: No module named pygtk",
|
||||
"01:22:41 INFO - ImportError: No module named pygtk\r\n"
|
||||
"01:22:41 INFO - ImportError: No module named pygtk\r\n",
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -39,204 +39,152 @@ def do_test(log):
|
|||
def test_crashtest_passing():
|
||||
"""Process a job with a single log reference."""
|
||||
|
||||
do_test(
|
||||
"mozilla-central_fedora-b2g_test-crashtest-1-bm54-tests1-linux-build50"
|
||||
)
|
||||
do_test("mozilla-central_fedora-b2g_test-crashtest-1-bm54-tests1-linux-build50")
|
||||
|
||||
|
||||
def test_mochitest_pass():
|
||||
"""Process a job with a single log reference."""
|
||||
|
||||
do_test(
|
||||
"mozilla-central_mountainlion_test-mochitest-2-bm77-tests1-macosx-build141"
|
||||
)
|
||||
do_test("mozilla-central_mountainlion_test-mochitest-2-bm77-tests1-macosx-build141")
|
||||
|
||||
|
||||
def test_duration_gt_1hr():
|
||||
do_test(
|
||||
"mozilla-central-win32-pgo-bm85-build1-build111"
|
||||
)
|
||||
do_test("mozilla-central-win32-pgo-bm85-build1-build111")
|
||||
|
||||
|
||||
@slow
|
||||
def test_mochitest_fail():
|
||||
"""Process a job with a single log reference."""
|
||||
|
||||
do_test(
|
||||
"mozilla-esr17_xp_test_pgo-mochitest-browser-chrome-bm74-tests1-windows-build12"
|
||||
)
|
||||
do_test("mozilla-esr17_xp_test_pgo-mochitest-browser-chrome-bm74-tests1-windows-build12")
|
||||
|
||||
|
||||
def test_mochitest_process_crash():
|
||||
"""Test a mochitest log that has PROCESS-CRASH """
|
||||
|
||||
do_test(
|
||||
"mozilla-inbound_ubuntu64_vm-debug_test-mochitest-other-bm53-tests1-linux-build122"
|
||||
)
|
||||
do_test("mozilla-inbound_ubuntu64_vm-debug_test-mochitest-other-bm53-tests1-linux-build122")
|
||||
|
||||
|
||||
@slow
|
||||
def test_jetpack_fail():
|
||||
"""Process a job with a single log reference."""
|
||||
|
||||
do_test(
|
||||
"ux_ubuntu32_vm_test-jetpack-bm67-tests1-linux-build16"
|
||||
)
|
||||
do_test("ux_ubuntu32_vm_test-jetpack-bm67-tests1-linux-build16")
|
||||
|
||||
|
||||
@slow
|
||||
def test_crash_1():
|
||||
"""Test from old log parser"""
|
||||
do_test(
|
||||
"crash-1"
|
||||
)
|
||||
do_test("crash-1")
|
||||
|
||||
|
||||
@slow
|
||||
def test_crash_2():
|
||||
"""Test from old log parser"""
|
||||
do_test(
|
||||
"crash-2"
|
||||
)
|
||||
do_test("crash-2")
|
||||
|
||||
|
||||
@slow
|
||||
def test_crash_mac_1():
|
||||
"""Test from old log parser"""
|
||||
do_test(
|
||||
"crash-mac-1"
|
||||
)
|
||||
do_test("crash-mac-1")
|
||||
|
||||
|
||||
@slow
|
||||
def test_crashtest_timeout():
|
||||
"""Test from old log parser"""
|
||||
do_test(
|
||||
"crashtest-timeout"
|
||||
)
|
||||
do_test("crashtest-timeout")
|
||||
|
||||
|
||||
@slow
|
||||
def test_jsreftest_fail():
|
||||
"""Test from old log parser"""
|
||||
do_test(
|
||||
"jsreftest-fail"
|
||||
)
|
||||
do_test("jsreftest-fail")
|
||||
|
||||
|
||||
@slow
|
||||
def test_jsreftest_timeout_crash():
|
||||
"""Test from old log parser"""
|
||||
do_test(
|
||||
"jsreftest-timeout-crash"
|
||||
)
|
||||
do_test("jsreftest-timeout-crash")
|
||||
|
||||
|
||||
@slow
|
||||
def test_leaks_1():
|
||||
"""Test from old log parser"""
|
||||
do_test(
|
||||
"leaks-1"
|
||||
)
|
||||
do_test("leaks-1")
|
||||
|
||||
|
||||
@slow
|
||||
def test_mochitest_test_end():
|
||||
"""Test from old log parser"""
|
||||
do_test(
|
||||
"mochitest-test-end"
|
||||
)
|
||||
do_test("mochitest-test-end")
|
||||
|
||||
|
||||
@slow
|
||||
def test_multiple_timeouts():
|
||||
"""Test from old log parser"""
|
||||
do_test(
|
||||
"multiple-timeouts"
|
||||
)
|
||||
do_test("multiple-timeouts")
|
||||
|
||||
|
||||
@slow
|
||||
def test_opt_objc_exception():
|
||||
"""Test from old log parser"""
|
||||
do_test(
|
||||
"opt-objc-exception"
|
||||
)
|
||||
do_test("opt-objc-exception")
|
||||
|
||||
|
||||
@slow
|
||||
def test_reftest_fail_crash():
|
||||
"""Test from old log parser"""
|
||||
do_test(
|
||||
"reftest-fail-crash"
|
||||
)
|
||||
do_test("reftest-fail-crash")
|
||||
|
||||
|
||||
@slow
|
||||
def test_reftest_jserror():
|
||||
"""Test from old log parser"""
|
||||
do_test(
|
||||
"reftest-jserror"
|
||||
)
|
||||
do_test("reftest-jserror")
|
||||
|
||||
|
||||
@slow
|
||||
def test_reftest_opt_fail():
|
||||
"""Test from old log parser"""
|
||||
do_test(
|
||||
"reftest-opt-fail"
|
||||
)
|
||||
do_test("reftest-opt-fail")
|
||||
|
||||
|
||||
@slow
|
||||
def test_reftest_timeout():
|
||||
"""Test from old log parser"""
|
||||
do_test(
|
||||
"reftest-timeout"
|
||||
)
|
||||
do_test("reftest-timeout")
|
||||
|
||||
|
||||
@slow
|
||||
def test_tinderbox_exception():
|
||||
"""Test from old log parser"""
|
||||
do_test(
|
||||
"tinderbox-exception"
|
||||
)
|
||||
do_test("tinderbox-exception")
|
||||
|
||||
|
||||
def test_xpcshell_crash():
|
||||
"""Test from old log parser"""
|
||||
do_test(
|
||||
"xpcshell-crash"
|
||||
)
|
||||
do_test("xpcshell-crash")
|
||||
|
||||
|
||||
def test_xpcshell_multiple():
|
||||
"""Test from old log parser"""
|
||||
do_test(
|
||||
"xpcshell-multiple"
|
||||
)
|
||||
do_test("xpcshell-multiple")
|
||||
|
||||
|
||||
def test_xpcshell_timeout():
|
||||
"""Test from old log parser"""
|
||||
do_test(
|
||||
"xpcshell-timeout"
|
||||
)
|
||||
do_test("xpcshell-timeout")
|
||||
|
||||
|
||||
def test_extreme_log_line_length_truncation():
|
||||
"""This log has lines that are huge. Ensure we truncate the lines to 100"""
|
||||
do_test(
|
||||
"mozilla-central_ubuntu64_hw_test-androidx86-set-4-bm103-tests1-linux-build369"
|
||||
)
|
||||
do_test("mozilla-central_ubuntu64_hw_test-androidx86-set-4-bm103-tests1-linux-build369")
|
||||
|
||||
|
||||
def test_too_many_error_lines_truncation():
|
||||
"""This log has a large number of lines that match the error regex. Ensure we truncate to 100 lines."""
|
||||
do_test(
|
||||
"large-number-of-error-lines"
|
||||
)
|
||||
do_test("large-number-of-error-lines")
|
||||
|
||||
|
||||
def test_taskcluster_missing_finish_marker():
|
||||
|
@ -246,6 +194,4 @@ def test_taskcluster_missing_finish_marker():
|
|||
between the step markers that should result in unnamed steps being created
|
||||
to house any errors within them.
|
||||
"""
|
||||
do_test(
|
||||
"taskcluster-missing-finish-step-marker"
|
||||
)
|
||||
do_test("taskcluster-missing-finish-step-marker")
|
||||
|
|
|
@ -15,9 +15,10 @@ def test_performance_log_parsing():
|
|||
|
||||
# first two have only one artifact, second has two artifacts
|
||||
for (logfile, num_perf_artifacts) in [
|
||||
('mozilla-inbound-android-api-11-debug-bm91-build1-build1317.txt.gz', 1),
|
||||
('try_ubuntu64_hw_test-chromez-bm103-tests1-linux-build1429.txt.gz', 1),
|
||||
('mozilla-inbound-linux64-bm72-build1-build225.txt.gz', 2)]:
|
||||
('mozilla-inbound-android-api-11-debug-bm91-build1-build1317.txt.gz', 1),
|
||||
('try_ubuntu64_hw_test-chromez-bm103-tests1-linux-build1429.txt.gz', 1),
|
||||
('mozilla-inbound-linux64-bm72-build1-build225.txt.gz', 2),
|
||||
]:
|
||||
url = add_log_response(logfile)
|
||||
|
||||
builder = BuildbotPerformanceDataArtifactBuilder(url=url)
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
import json
|
||||
|
||||
from treeherder.log_parser.parsers import (EmptyPerformanceData,
|
||||
PerformanceParser)
|
||||
from treeherder.log_parser.parsers import EmptyPerformanceData, PerformanceParser
|
||||
|
||||
|
||||
def test_performance_log_parsing_malformed_perfherder_data():
|
||||
|
@ -20,15 +19,14 @@ def test_performance_log_parsing_malformed_perfherder_data():
|
|||
pass
|
||||
|
||||
valid_perfherder_data = {
|
||||
"framework": {"name": "talos"}, "suites": [{
|
||||
"name": "basic_compositor_video",
|
||||
"subtests": [{
|
||||
"name": "240p.120fps.mp4_scale_fullscreen_startup",
|
||||
"value": 1234
|
||||
}]
|
||||
}]
|
||||
"framework": {"name": "talos"},
|
||||
"suites": [
|
||||
{
|
||||
"name": "basic_compositor_video",
|
||||
"subtests": [{"name": "240p.120fps.mp4_scale_fullscreen_startup", "value": 1234}],
|
||||
}
|
||||
],
|
||||
}
|
||||
parser.parse_line('PERFHERDER_DATA: {}'.format(
|
||||
json.dumps(valid_perfherder_data)), 3)
|
||||
parser.parse_line('PERFHERDER_DATA: {}'.format(json.dumps(valid_perfherder_data)), 3)
|
||||
|
||||
assert parser.get_artifact() == [valid_perfherder_data]
|
||||
|
|
|
@ -5,11 +5,8 @@ import responses
|
|||
from django.conf import settings
|
||||
from requests.exceptions import HTTPError
|
||||
|
||||
from treeherder.log_parser.failureline import (store_failure_lines,
|
||||
write_failure_lines)
|
||||
from treeherder.model.models import (FailureLine,
|
||||
Group,
|
||||
JobLog)
|
||||
from treeherder.log_parser.failureline import store_failure_lines, write_failure_lines
|
||||
from treeherder.model.models import FailureLine, Group, JobLog
|
||||
|
||||
from ..sampledata import SampleData
|
||||
|
||||
|
@ -19,8 +16,7 @@ def test_store_error_summary(activate_responses, test_repository, test_job):
|
|||
log_url = 'http://my-log.mozilla.org'
|
||||
|
||||
with open(log_path) as log_handler:
|
||||
responses.add(responses.GET, log_url,
|
||||
body=log_handler.read(), status=200)
|
||||
responses.add(responses.GET, log_url, body=log_handler.read(), status=200)
|
||||
|
||||
log_obj = JobLog.objects.create(job=test_job, name="errorsummary_json", url=log_url)
|
||||
|
||||
|
@ -45,8 +41,7 @@ def test_store_error_summary_default_group(activate_responses, test_repository,
|
|||
resp_body = json.load(log_handler)
|
||||
|
||||
resp_body["group"] = "default"
|
||||
responses.add(responses.GET, log_url,
|
||||
body=json.dumps(resp_body), status=200)
|
||||
responses.add(responses.GET, log_url, body=json.dumps(resp_body), status=200)
|
||||
|
||||
log_obj = JobLog.objects.create(job=test_job, name="errorsummary_json", url=log_url)
|
||||
|
||||
|
@ -59,16 +54,14 @@ def test_store_error_summary_default_group(activate_responses, test_repository,
|
|||
assert failure.group.all().first().name == "default"
|
||||
|
||||
|
||||
def test_store_error_summary_truncated(activate_responses, test_repository,
|
||||
test_job, monkeypatch):
|
||||
def test_store_error_summary_truncated(activate_responses, test_repository, test_job, monkeypatch):
|
||||
log_path = SampleData().get_log_path("plain-chunked_errorsummary_10_lines.log")
|
||||
log_url = 'http://my-log.mozilla.org'
|
||||
|
||||
monkeypatch.setattr(settings, 'FAILURE_LINES_CUTOFF', 5)
|
||||
|
||||
with open(log_path) as log_handler:
|
||||
responses.add(responses.GET, log_url,
|
||||
body=log_handler.read(), status=200)
|
||||
responses.add(responses.GET, log_url, body=log_handler.read(), status=200)
|
||||
|
||||
log_obj = JobLog.objects.create(job=test_job, name="errorsummary_json", url=log_url)
|
||||
|
||||
|
@ -93,8 +86,13 @@ def test_store_error_summary_astral(activate_responses, test_repository, test_jo
|
|||
log_url = 'http://my-log.mozilla.org'
|
||||
|
||||
with open(log_path, encoding='utf8') as log_handler:
|
||||
responses.add(responses.GET, log_url, content_type="text/plain;charset=utf-8",
|
||||
body=log_handler.read(), status=200)
|
||||
responses.add(
|
||||
responses.GET,
|
||||
log_url,
|
||||
content_type="text/plain;charset=utf-8",
|
||||
body=log_handler.read(),
|
||||
status=200,
|
||||
)
|
||||
|
||||
log_obj = JobLog.objects.create(job=test_job, name="errorsummary_json", url=log_url)
|
||||
|
||||
|
@ -110,7 +108,10 @@ def test_store_error_summary_astral(activate_responses, test_repository, test_jo
|
|||
|
||||
assert failure.repository == test_repository
|
||||
|
||||
assert failure.test == u"toolkit/content/tests/widgets/test_videocontrols_video_direction.html <U+01F346>"
|
||||
assert (
|
||||
failure.test
|
||||
== u"toolkit/content/tests/widgets/test_videocontrols_video_direction.html <U+01F346>"
|
||||
)
|
||||
assert failure.subtest == u"Test timed out. <U+010081>"
|
||||
assert failure.message == u"<U+0F0151>"
|
||||
assert failure.stack.endswith("<U+0F0151>")
|
||||
|
@ -123,8 +124,7 @@ def test_store_error_summary_404(activate_responses, test_repository, test_job):
|
|||
log_url = 'http://my-log.mozilla.org'
|
||||
|
||||
with open(log_path) as log_handler:
|
||||
responses.add(responses.GET, log_url,
|
||||
body=log_handler.read(), status=404)
|
||||
responses.add(responses.GET, log_url, body=log_handler.read(), status=404)
|
||||
|
||||
log_obj = JobLog.objects.create(job=test_job, name="errorsummary_json", url=log_url)
|
||||
|
||||
|
@ -139,8 +139,7 @@ def test_store_error_summary_500(activate_responses, test_repository, test_job):
|
|||
log_url = 'http://my-log.mozilla.org'
|
||||
|
||||
with open(log_path) as log_handler:
|
||||
responses.add(responses.GET, log_url,
|
||||
body=log_handler.read(), status=500)
|
||||
responses.add(responses.GET, log_url, body=log_handler.read(), status=500)
|
||||
|
||||
log_obj = JobLog.objects.create(job=test_job, name="errorsummary_json", url=log_url)
|
||||
|
||||
|
@ -155,17 +154,15 @@ def test_store_error_summary_duplicate(activate_responses, test_repository, test
|
|||
log_url = 'http://my-log.mozilla.org'
|
||||
log_obj = JobLog.objects.create(job=test_job, name="errorsummary_json", url=log_url)
|
||||
|
||||
write_failure_lines(log_obj, [{"action": "log",
|
||||
"level": "debug",
|
||||
"message": "test",
|
||||
"line": 1}])
|
||||
write_failure_lines(log_obj, [{"action": "log",
|
||||
"level": "debug",
|
||||
"message": "test",
|
||||
"line": 1},
|
||||
{"action": "log",
|
||||
"level": "debug",
|
||||
"message": "test 1",
|
||||
"line": 2}])
|
||||
write_failure_lines(
|
||||
log_obj, [{"action": "log", "level": "debug", "message": "test", "line": 1}]
|
||||
)
|
||||
write_failure_lines(
|
||||
log_obj,
|
||||
[
|
||||
{"action": "log", "level": "debug", "message": "test", "line": 1},
|
||||
{"action": "log", "level": "debug", "message": "test 1", "line": 2},
|
||||
],
|
||||
)
|
||||
|
||||
assert FailureLine.objects.count() == 2
|
||||
|
|
|
@ -4,9 +4,7 @@ from tests.test_utils import add_log_response
|
|||
from treeherder.etl.jobs import store_job_data
|
||||
from treeherder.etl.push import store_push_data
|
||||
from treeherder.model.error_summary import get_error_summary
|
||||
from treeherder.model.models import (Job,
|
||||
JobDetail,
|
||||
TextLogError)
|
||||
from treeherder.model.models import Job, JobDetail, TextLogError
|
||||
|
||||
from ..sampledata import SampleData
|
||||
|
||||
|
@ -45,9 +43,9 @@ def test_parse_log(test_repository, failure_classifications, jobs_with_local_log
|
|||
print(JobDetail.objects.count() == 4)
|
||||
|
||||
|
||||
def test_create_error_summary(failure_classifications,
|
||||
jobs_with_local_log, sample_push,
|
||||
test_repository):
|
||||
def test_create_error_summary(
|
||||
failure_classifications, jobs_with_local_log, sample_push, test_repository
|
||||
):
|
||||
"""
|
||||
check that a bug suggestions artifact gets inserted when running
|
||||
a parse_log task for a failed job, and that the number of
|
||||
|
|
|
@ -14,49 +14,52 @@ TINDERBOX_TEST_CASES = (
|
|||
'libxul_link:2918047744'
|
||||
'</a>'
|
||||
),
|
||||
[{
|
||||
'content_type': 'link',
|
||||
'title': None,
|
||||
'url': 'http://graphs.mozilla.org/graph.html#tests=[[205,63,8]]',
|
||||
'value': 'libxul_link:2918047744'
|
||||
}]
|
||||
[
|
||||
{
|
||||
'content_type': 'link',
|
||||
'title': None,
|
||||
'url': 'http://graphs.mozilla.org/graph.html#tests=[[205,63,8]]',
|
||||
'value': 'libxul_link:2918047744',
|
||||
}
|
||||
],
|
||||
),
|
||||
|
||||
(
|
||||
(
|
||||
'TinderboxPrint: sources.xml: http://stage.mozilla.org/pub/mozilla.org/'
|
||||
'b2g/manifests/depend/mozilla-central/hamachi-eng/20140718040333/'
|
||||
'sources-5d0aad07bd13e04de0cd7befc0e2b83a.xml'
|
||||
),
|
||||
[{
|
||||
'content_type': 'link',
|
||||
'title': 'sources.xml',
|
||||
'url': (
|
||||
'http://stage.mozilla.org/pub/mozilla.org/'
|
||||
'b2g/manifests/depend/mozilla-central/hamachi-eng/20140718040333/'
|
||||
'sources-5d0aad07bd13e04de0cd7befc0e2b83a.xml'
|
||||
),
|
||||
'value': (
|
||||
'http://stage.mozilla.org/pub/mozilla.org/'
|
||||
'b2g/manifests/depend/mozilla-central/hamachi-eng/20140718040333/'
|
||||
'sources-5d0aad07bd13e04de0cd7befc0e2b83a.xml'
|
||||
)
|
||||
}]
|
||||
[
|
||||
{
|
||||
'content_type': 'link',
|
||||
'title': 'sources.xml',
|
||||
'url': (
|
||||
'http://stage.mozilla.org/pub/mozilla.org/'
|
||||
'b2g/manifests/depend/mozilla-central/hamachi-eng/20140718040333/'
|
||||
'sources-5d0aad07bd13e04de0cd7befc0e2b83a.xml'
|
||||
),
|
||||
'value': (
|
||||
'http://stage.mozilla.org/pub/mozilla.org/'
|
||||
'b2g/manifests/depend/mozilla-central/hamachi-eng/20140718040333/'
|
||||
'sources-5d0aad07bd13e04de0cd7befc0e2b83a.xml'
|
||||
),
|
||||
}
|
||||
],
|
||||
),
|
||||
|
||||
(
|
||||
(
|
||||
'TinderboxPrint: mozharness_revlink: '
|
||||
'https://hg.mozilla.org/build/mozharness/rev/16ba958057a8'
|
||||
),
|
||||
[{
|
||||
'content_type': 'link',
|
||||
'title': 'mozharness_revlink',
|
||||
'url': 'https://hg.mozilla.org/build/mozharness/rev/16ba958057a8',
|
||||
'value': 'https://hg.mozilla.org/build/mozharness/rev/16ba958057a8'
|
||||
}]
|
||||
[
|
||||
{
|
||||
'content_type': 'link',
|
||||
'title': 'mozharness_revlink',
|
||||
'url': 'https://hg.mozilla.org/build/mozharness/rev/16ba958057a8',
|
||||
'value': 'https://hg.mozilla.org/build/mozharness/rev/16ba958057a8',
|
||||
}
|
||||
],
|
||||
),
|
||||
|
||||
(
|
||||
(
|
||||
'TinderboxPrint: '
|
||||
|
@ -65,17 +68,18 @@ TINDERBOX_TEST_CASES = (
|
|||
'wpt_structured_full.log'
|
||||
'</a>: uploaded'
|
||||
),
|
||||
[{
|
||||
'content_type': 'link',
|
||||
'title': 'artifact uploaded',
|
||||
'url': (
|
||||
'http://mozilla-releng-blobs.s3.amazonaws.com'
|
||||
'/blobs/cedar/sha512/9123cb277dbf1eb6d90'
|
||||
),
|
||||
'value': 'wpt_structured_full.log'
|
||||
}]
|
||||
[
|
||||
{
|
||||
'content_type': 'link',
|
||||
'title': 'artifact uploaded',
|
||||
'url': (
|
||||
'http://mozilla-releng-blobs.s3.amazonaws.com'
|
||||
'/blobs/cedar/sha512/9123cb277dbf1eb6d90'
|
||||
),
|
||||
'value': 'wpt_structured_full.log',
|
||||
}
|
||||
],
|
||||
),
|
||||
|
||||
(
|
||||
(
|
||||
'TinderboxPrint: '
|
||||
|
@ -84,44 +88,47 @@ TINDERBOX_TEST_CASES = (
|
|||
'libxul_link:2918047744'
|
||||
'</a>'
|
||||
),
|
||||
[{
|
||||
'content_type': 'link',
|
||||
'title': None,
|
||||
'url': 'http://graphs.mozilla.org/graph.html#tests=[[205,63,8]]',
|
||||
'value': 'libxul_link:2918047744'
|
||||
}]
|
||||
[
|
||||
{
|
||||
'content_type': 'link',
|
||||
'title': None,
|
||||
'url': 'http://graphs.mozilla.org/graph.html#tests=[[205,63,8]]',
|
||||
'value': 'libxul_link:2918047744',
|
||||
}
|
||||
],
|
||||
),
|
||||
|
||||
(
|
||||
(
|
||||
'TinderboxPrint: '
|
||||
'xpcshell-xpcshell<br/>2153/<em class="testfail">1</em> '
|
||||
'<em class="testfail">CRASH</em>'
|
||||
),
|
||||
[{
|
||||
'content_type': 'raw_html',
|
||||
'title': 'xpcshell-xpcshell',
|
||||
'value': (
|
||||
'2153/<em class="testfail">1</em> '
|
||||
'<em class="testfail">CRASH</em>'
|
||||
)
|
||||
}]
|
||||
[
|
||||
{
|
||||
'content_type': 'raw_html',
|
||||
'title': 'xpcshell-xpcshell',
|
||||
'value': (
|
||||
'2153/<em class="testfail">1</em> ' '<em class="testfail">CRASH</em>'
|
||||
),
|
||||
}
|
||||
],
|
||||
),
|
||||
|
||||
(
|
||||
(
|
||||
'TinderboxPrint: hazard results: '
|
||||
'https://ftp-ssl.mozilla.org/pub/mozilla.org/firefox/tinderbox-builds/'
|
||||
'mozilla-central-linux64-br-haz/20150226025813'
|
||||
),
|
||||
[{
|
||||
'content_type': 'link',
|
||||
'title': 'hazard results',
|
||||
'url': 'https://ftp-ssl.mozilla.org/pub/mozilla.org/firefox/tinderbox-builds/'
|
||||
'mozilla-central-linux64-br-haz/20150226025813',
|
||||
'value': 'https://ftp-ssl.mozilla.org/pub/mozilla.org/firefox/tinderbox-builds/'
|
||||
'mozilla-central-linux64-br-haz/20150226025813'
|
||||
}]
|
||||
[
|
||||
{
|
||||
'content_type': 'link',
|
||||
'title': 'hazard results',
|
||||
'url': 'https://ftp-ssl.mozilla.org/pub/mozilla.org/firefox/tinderbox-builds/'
|
||||
'mozilla-central-linux64-br-haz/20150226025813',
|
||||
'value': 'https://ftp-ssl.mozilla.org/pub/mozilla.org/firefox/tinderbox-builds/'
|
||||
'mozilla-central-linux64-br-haz/20150226025813',
|
||||
}
|
||||
],
|
||||
),
|
||||
)
|
||||
|
||||
|
|
|
@ -6,66 +6,32 @@ import simplejson as json
|
|||
from django.db.utils import DataError
|
||||
from jsonschema import ValidationError
|
||||
|
||||
from treeherder.log_parser.utils import (MAX_LENGTH,
|
||||
SECOND_MAX_LENGTH,
|
||||
_lookup_extra_options_max,
|
||||
validate_perf_data)
|
||||
from treeherder.log_parser.utils import (
|
||||
MAX_LENGTH,
|
||||
SECOND_MAX_LENGTH,
|
||||
_lookup_extra_options_max,
|
||||
validate_perf_data,
|
||||
)
|
||||
|
||||
LENGTH_OK = {
|
||||
'framework': {},
|
||||
'suites': [
|
||||
{
|
||||
'extraOptions': [
|
||||
'.' * 45,
|
||||
'.' * 100,
|
||||
],
|
||||
'name': 'testing',
|
||||
'subtests': []
|
||||
}
|
||||
] * 3
|
||||
}
|
||||
'framework': {},
|
||||
'suites': [{'extraOptions': ['.' * 45, '.' * 100,], 'name': 'testing', 'subtests': []}] * 3,
|
||||
}
|
||||
|
||||
LONGER_THAN_ALL_MAX = {
|
||||
'framework': {},
|
||||
'suites': [
|
||||
{
|
||||
'extraOptions': [
|
||||
'.' * 46,
|
||||
'.' * 101,
|
||||
],
|
||||
'name': 'testing',
|
||||
'subtests': []
|
||||
}
|
||||
]
|
||||
}
|
||||
'framework': {},
|
||||
'suites': [{'extraOptions': ['.' * 46, '.' * 101,], 'name': 'testing', 'subtests': []}],
|
||||
}
|
||||
|
||||
LONGER_THAN_BIGGER_MAX = {
|
||||
'framework': {},
|
||||
'suites': [
|
||||
{
|
||||
'extraOptions': [
|
||||
'.' * 45,
|
||||
'.' * 101,
|
||||
],
|
||||
'name': 'testing',
|
||||
'subtests': []
|
||||
}
|
||||
]
|
||||
}
|
||||
'framework': {},
|
||||
'suites': [{'extraOptions': ['.' * 45, '.' * 101,], 'name': 'testing', 'subtests': []}],
|
||||
}
|
||||
|
||||
LONGER_THAN_SMALLER_MAX = {
|
||||
'framework': {},
|
||||
'suites': [
|
||||
{
|
||||
'extraOptions': [
|
||||
'.' * 46,
|
||||
'.' * 100,
|
||||
],
|
||||
'name': 'testing',
|
||||
'subtests': []
|
||||
}
|
||||
] * 3
|
||||
}
|
||||
'framework': {},
|
||||
'suites': [{'extraOptions': ['.' * 46, '.' * 100,], 'name': 'testing', 'subtests': []}] * 3,
|
||||
}
|
||||
|
||||
|
||||
def test_smaller_than_bigger():
|
||||
|
@ -85,7 +51,9 @@ def test_validate_perf_schema_no_exception():
|
|||
pytest.fail(str(exc))
|
||||
|
||||
|
||||
@pytest.mark.parametrize('data', (LONGER_THAN_ALL_MAX, LONGER_THAN_BIGGER_MAX, LONGER_THAN_SMALLER_MAX))
|
||||
@pytest.mark.parametrize(
|
||||
'data', (LONGER_THAN_ALL_MAX, LONGER_THAN_BIGGER_MAX, LONGER_THAN_SMALLER_MAX)
|
||||
)
|
||||
def test_validate_perf_schema(data):
|
||||
for datum in data:
|
||||
with pytest.raises(ValidationError):
|
||||
|
|
|
@ -2,8 +2,7 @@ import json
|
|||
|
||||
from django.utils.timezone import now as django_now
|
||||
|
||||
from treeherder.perf.models import (BackfillRecord,
|
||||
BackfillReport)
|
||||
from treeherder.perf.models import BackfillRecord, BackfillReport
|
||||
|
||||
|
||||
class TestBackfillReportClass:
|
||||
|
@ -18,8 +17,9 @@ class TestBackfillReportClass:
|
|||
|
||||
assert backfill_record.is_outdated is True
|
||||
|
||||
def test_last_updated_is_synced_with_child_records(self, test_perf_alert,
|
||||
backfill_record_context):
|
||||
def test_last_updated_is_synced_with_child_records(
|
||||
self, test_perf_alert, backfill_record_context
|
||||
):
|
||||
test_summary = test_perf_alert.summary
|
||||
context_dump = json.dumps(backfill_record_context)
|
||||
|
||||
|
@ -27,9 +27,9 @@ class TestBackfillReportClass:
|
|||
last_updated_before_new_record = backfill_report.last_updated
|
||||
|
||||
# this should re update the report
|
||||
BackfillRecord.objects.create(alert=test_perf_alert,
|
||||
report=backfill_report,
|
||||
context=context_dump)
|
||||
BackfillRecord.objects.create(
|
||||
alert=test_perf_alert, report=backfill_report, context=context_dump
|
||||
)
|
||||
assert last_updated_before_new_record < backfill_report.last_updated
|
||||
|
||||
# record bulk deletes count as report updates too
|
||||
|
@ -39,9 +39,9 @@ class TestBackfillReportClass:
|
|||
assert last_updated_before_expelling_records < backfill_report.last_updated
|
||||
|
||||
# deleting single record counts are report update too
|
||||
new_backfill_record = BackfillRecord.objects.create(alert=test_perf_alert,
|
||||
report=backfill_report,
|
||||
context=context_dump)
|
||||
new_backfill_record = BackfillRecord.objects.create(
|
||||
alert=test_perf_alert, report=backfill_report, context=context_dump
|
||||
)
|
||||
last_updated_before_single_record_delete = backfill_report.last_updated
|
||||
|
||||
new_backfill_record.delete()
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
import json
|
||||
import os
|
||||
from datetime import (datetime,
|
||||
timedelta)
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
import pytest
|
||||
from django.db import connection
|
||||
|
@ -12,11 +11,7 @@ from treeherder.model.models import Bugscache
|
|||
|
||||
@pytest.fixture
|
||||
def sample_bugs(test_base_dir):
|
||||
filename = os.path.join(
|
||||
test_base_dir,
|
||||
'sample_data',
|
||||
'bug_list.json'
|
||||
)
|
||||
filename = os.path.join(test_base_dir, 'sample_data', 'bug_list.json')
|
||||
with open(filename) as f:
|
||||
return json.load(f)
|
||||
|
||||
|
@ -40,38 +35,23 @@ def _update_bugscache(bug_list):
|
|||
|
||||
|
||||
BUG_SEARCHES = (
|
||||
(
|
||||
"test_popup_preventdefault_chrome.xul",
|
||||
[455091]
|
||||
),
|
||||
(
|
||||
"test_popup_preventdefault_chrome.xul foo bar",
|
||||
[]
|
||||
),
|
||||
("test_popup_preventdefault_chrome.xul", [455091]),
|
||||
("test_popup_preventdefault_chrome.xul foo bar", []),
|
||||
(
|
||||
"test_switch_frame.py TestSwitchFrame.test_should_be_able_to_carry_on_working_if_the_frame_is_deleted",
|
||||
[1054669, 1078237]
|
||||
[1054669, 1078237],
|
||||
),
|
||||
(
|
||||
"command timed out: 3600 seconds without output running ['/tools/buildbot/bin/python', 'scripts/scrip",
|
||||
[1054456]
|
||||
[1054456],
|
||||
),
|
||||
(
|
||||
"[taskcluster:error] Command \" [./test-macosx.sh --no-read-buildbot-config --installer-url=https://q",
|
||||
[100]
|
||||
),
|
||||
(
|
||||
"should not be match_d",
|
||||
[]
|
||||
),
|
||||
(
|
||||
"should not be match%d",
|
||||
[]
|
||||
),
|
||||
(
|
||||
"should not be matche=d",
|
||||
[]
|
||||
[100],
|
||||
),
|
||||
("should not be match_d", []),
|
||||
("should not be match%d", []),
|
||||
("should not be matche=d", []),
|
||||
)
|
||||
|
||||
|
||||
|
@ -138,8 +118,9 @@ def test_bug_properties(transactional_db, sample_bugs):
|
|||
bug['last_change_time'] = fifty_days_ago
|
||||
_update_bugscache(bug_list)
|
||||
|
||||
expected_keys = set(['crash_signature', 'resolution', 'summary', 'keywords', 'os', 'id',
|
||||
'status', 'whiteboard'])
|
||||
expected_keys = set(
|
||||
['crash_signature', 'resolution', 'summary', 'keywords', 'os', 'id', 'status', 'whiteboard']
|
||||
)
|
||||
|
||||
suggestions = Bugscache.search(search_term)
|
||||
assert set(suggestions['open_recent'][0].keys()) == expected_keys
|
||||
|
|
|
@ -2,14 +2,15 @@ from decimal import Decimal
|
|||
|
||||
from django.contrib.auth.models import User
|
||||
|
||||
from tests.autoclassify.utils import (create_lines,
|
||||
test_line)
|
||||
from tests.autoclassify.utils import create_lines, test_line
|
||||
from treeherder.autoclassify.autoclassify import mark_best_classification
|
||||
from treeherder.model.models import (BugJobMap,
|
||||
ClassifiedFailure,
|
||||
FailureLine,
|
||||
TextLogErrorMatch,
|
||||
TextLogErrorMetadata)
|
||||
from treeherder.model.models import (
|
||||
BugJobMap,
|
||||
ClassifiedFailure,
|
||||
FailureLine,
|
||||
TextLogErrorMatch,
|
||||
TextLogErrorMetadata,
|
||||
)
|
||||
|
||||
|
||||
def test_set_bug(classified_failures):
|
||||
|
@ -75,9 +76,7 @@ def test_update_autoclassification_bug(test_job, test_job_2, classified_failures
|
|||
|
||||
# Create a BugJobMap
|
||||
BugJobMap.create(
|
||||
job_id=test_job.id,
|
||||
bug_id=1234,
|
||||
user=user,
|
||||
job_id=test_job.id, bug_id=1234, user=user,
|
||||
)
|
||||
mark_best_classification(text_log_errors[0], classified_failure)
|
||||
assert classified_failure.bug_number is None
|
||||
|
@ -87,9 +86,7 @@ def test_update_autoclassification_bug(test_job, test_job_2, classified_failures
|
|||
metadata.save()
|
||||
|
||||
BugJobMap.create(
|
||||
job_id=test_job_2.id,
|
||||
bug_id=1234,
|
||||
user=user,
|
||||
job_id=test_job_2.id, bug_id=1234, user=user,
|
||||
)
|
||||
classified_failure.refresh_from_db()
|
||||
assert classified_failure.bug_number == 1234
|
||||
|
|
|
@ -5,26 +5,33 @@ from django.core.management import call_command
|
|||
from django.db.models import Max
|
||||
|
||||
from tests import test_utils
|
||||
from tests.autoclassify.utils import (create_failure_lines,
|
||||
test_line)
|
||||
from treeherder.model.management.commands.cycle_data import (MINIMUM_PERFHERDER_EXPIRE_INTERVAL,
|
||||
PerfherderCycler)
|
||||
from treeherder.model.models import (FailureLine,
|
||||
Job,
|
||||
JobDetail,
|
||||
JobGroup,
|
||||
JobLog,
|
||||
JobType,
|
||||
Machine,
|
||||
Push)
|
||||
from tests.autoclassify.utils import create_failure_lines, test_line
|
||||
from treeherder.model.management.commands.cycle_data import (
|
||||
MINIMUM_PERFHERDER_EXPIRE_INTERVAL,
|
||||
PerfherderCycler,
|
||||
)
|
||||
from treeherder.model.models import (
|
||||
FailureLine,
|
||||
Job,
|
||||
JobDetail,
|
||||
JobGroup,
|
||||
JobLog,
|
||||
JobType,
|
||||
Machine,
|
||||
Push,
|
||||
)
|
||||
from treeherder.perf.exceptions import MaxRuntimeExceeded
|
||||
from treeherder.perf.models import (PerformanceDatum,
|
||||
PerformanceDatumManager,
|
||||
PerformanceSignature)
|
||||
from treeherder.perf.models import PerformanceDatum, PerformanceDatumManager, PerformanceSignature
|
||||
|
||||
|
||||
def test_cycle_all_data(test_repository, failure_classifications, sample_data,
|
||||
sample_push, mock_log_parser, failure_lines):
|
||||
def test_cycle_all_data(
|
||||
test_repository,
|
||||
failure_classifications,
|
||||
sample_data,
|
||||
sample_push,
|
||||
mock_log_parser,
|
||||
failure_lines,
|
||||
):
|
||||
"""
|
||||
Test cycling the sample data
|
||||
"""
|
||||
|
@ -46,8 +53,14 @@ def test_cycle_all_data(test_repository, failure_classifications, sample_data,
|
|||
assert JobLog.objects.count() == 0
|
||||
|
||||
|
||||
def test_cycle_all_but_one_job(test_repository, failure_classifications, sample_data,
|
||||
sample_push, mock_log_parser, failure_lines):
|
||||
def test_cycle_all_but_one_job(
|
||||
test_repository,
|
||||
failure_classifications,
|
||||
sample_data,
|
||||
sample_push,
|
||||
mock_log_parser,
|
||||
failure_lines,
|
||||
):
|
||||
"""
|
||||
Test cycling all but one job in a group of jobs to confirm there are no
|
||||
unexpected deletions
|
||||
|
@ -62,15 +75,16 @@ def test_cycle_all_but_one_job(test_repository, failure_classifications, sample_
|
|||
job_not_deleted.save()
|
||||
|
||||
extra_objects = {
|
||||
'failure_lines': (FailureLine,
|
||||
create_failure_lines(
|
||||
job_not_deleted,
|
||||
[(test_line, {}),
|
||||
(test_line, {"subtest": "subtest2"})])),
|
||||
'job_details': (JobDetail, [JobDetail.objects.create(
|
||||
job=job_not_deleted,
|
||||
title='test',
|
||||
value='testvalue')])
|
||||
'failure_lines': (
|
||||
FailureLine,
|
||||
create_failure_lines(
|
||||
job_not_deleted, [(test_line, {}), (test_line, {"subtest": "subtest2"})]
|
||||
),
|
||||
),
|
||||
'job_details': (
|
||||
JobDetail,
|
||||
[JobDetail.objects.create(job=job_not_deleted, title='test', value='testvalue')],
|
||||
),
|
||||
}
|
||||
|
||||
# set other job's submit time to be a week ago from now
|
||||
|
@ -78,24 +92,23 @@ def test_cycle_all_but_one_job(test_repository, failure_classifications, sample_
|
|||
for job in Job.objects.all().exclude(id=job_not_deleted.id):
|
||||
job.submit_time = cycle_date_ts
|
||||
job.save()
|
||||
num_job_logs_to_be_deleted = JobLog.objects.all().exclude(
|
||||
job__id=job_not_deleted.id).count()
|
||||
num_job_logs_to_be_deleted = JobLog.objects.all().exclude(job__id=job_not_deleted.id).count()
|
||||
num_job_logs_before = JobLog.objects.count()
|
||||
|
||||
call_command('cycle_data', 'from:treeherder', sleep_time=0, days=1, debug=True, chunk_size=1)
|
||||
|
||||
assert Job.objects.count() == 1
|
||||
assert JobLog.objects.count() == (num_job_logs_before -
|
||||
num_job_logs_to_be_deleted)
|
||||
assert JobLog.objects.count() == (num_job_logs_before - num_job_logs_to_be_deleted)
|
||||
|
||||
for (object_type, objects) in extra_objects.values():
|
||||
actual = set(item.id for item in object_type.objects.all())
|
||||
expected = set(item.id for item in objects)
|
||||
assert (actual == expected)
|
||||
assert actual == expected
|
||||
|
||||
|
||||
def test_cycle_all_data_in_chunks(test_repository, failure_classifications, sample_data,
|
||||
sample_push, mock_log_parser):
|
||||
def test_cycle_all_data_in_chunks(
|
||||
test_repository, failure_classifications, sample_data, sample_push, mock_log_parser
|
||||
):
|
||||
"""
|
||||
Test cycling the sample data in chunks.
|
||||
"""
|
||||
|
@ -108,8 +121,7 @@ def test_cycle_all_data_in_chunks(test_repository, failure_classifications, samp
|
|||
job.submit_time = cycle_date_ts
|
||||
job.save()
|
||||
|
||||
create_failure_lines(Job.objects.get(id=1),
|
||||
[(test_line, {})] * 7)
|
||||
create_failure_lines(Job.objects.get(id=1), [(test_line, {})] * 7)
|
||||
|
||||
call_command('cycle_data', 'from:treeherder', sleep_time=0, days=1, chunk_size=3)
|
||||
|
||||
|
@ -119,9 +131,9 @@ def test_cycle_all_data_in_chunks(test_repository, failure_classifications, samp
|
|||
assert JobDetail.objects.count() == 0
|
||||
|
||||
|
||||
def test_cycle_job_model_reference_data(test_repository, failure_classifications,
|
||||
sample_data, sample_push,
|
||||
mock_log_parser):
|
||||
def test_cycle_job_model_reference_data(
|
||||
test_repository, failure_classifications, sample_data, sample_push, mock_log_parser
|
||||
):
|
||||
job_data = sample_data.job_data[:20]
|
||||
test_utils.do_job_ingestion(test_repository, job_data, sample_push, False)
|
||||
|
||||
|
@ -144,14 +156,18 @@ def test_cycle_job_model_reference_data(test_repository, failure_classifications
|
|||
assert Machine.objects.filter(id=m_id).count() == 0
|
||||
|
||||
# assert that we still have everything that shouldn't have been cycled
|
||||
assert JobType.objects.filter(id__in=original_job_type_ids).count() == len(original_job_type_ids)
|
||||
assert JobGroup.objects.filter(id__in=original_job_group_ids).count() == len(original_job_group_ids)
|
||||
assert JobType.objects.filter(id__in=original_job_type_ids).count() == len(
|
||||
original_job_type_ids
|
||||
)
|
||||
assert JobGroup.objects.filter(id__in=original_job_group_ids).count() == len(
|
||||
original_job_group_ids
|
||||
)
|
||||
assert Machine.objects.filter(id__in=original_machine_ids).count() == len(original_machine_ids)
|
||||
|
||||
|
||||
def test_cycle_job_with_performance_data(test_repository, failure_classifications,
|
||||
test_job, mock_log_parser,
|
||||
test_perf_signature):
|
||||
def test_cycle_job_with_performance_data(
|
||||
test_repository, failure_classifications, test_job, mock_log_parser, test_perf_signature
|
||||
):
|
||||
# build a date that will cause the data to be cycled
|
||||
test_job.submit_time = datetime.datetime.now() - datetime.timedelta(weeks=1)
|
||||
test_job.save()
|
||||
|
@ -163,7 +179,8 @@ def test_cycle_job_with_performance_data(test_repository, failure_classification
|
|||
job=test_job,
|
||||
signature=test_perf_signature,
|
||||
push_timestamp=test_job.push.time,
|
||||
value=1.0)
|
||||
value=1.0,
|
||||
)
|
||||
|
||||
call_command('cycle_data', 'from:treeherder', sleep_time=0, days=1, chunk_size=3)
|
||||
|
||||
|
@ -175,23 +192,32 @@ def test_cycle_job_with_performance_data(test_repository, failure_classification
|
|||
assert p.job is None
|
||||
|
||||
|
||||
@pytest.mark.parametrize('repository_name, command_options, subcommand_options, should_expire',
|
||||
[('autoland', '--days=365', None, True),
|
||||
('mozilla-inbound', '--days=365', None, True),
|
||||
('mozilla-beta', '--days=365', None, True),
|
||||
('mozilla-central', '--days=365', None, True),
|
||||
('autoland', '--days=401', None, False),
|
||||
])
|
||||
def test_cycle_performance_data(test_repository, repository_name, push_stored,
|
||||
test_perf_signature, command_options, subcommand_options,
|
||||
should_expire):
|
||||
@pytest.mark.parametrize(
|
||||
'repository_name, command_options, subcommand_options, should_expire',
|
||||
[
|
||||
('autoland', '--days=365', None, True),
|
||||
('mozilla-inbound', '--days=365', None, True),
|
||||
('mozilla-beta', '--days=365', None, True),
|
||||
('mozilla-central', '--days=365', None, True),
|
||||
('autoland', '--days=401', None, False),
|
||||
],
|
||||
)
|
||||
def test_cycle_performance_data(
|
||||
test_repository,
|
||||
repository_name,
|
||||
push_stored,
|
||||
test_perf_signature,
|
||||
command_options,
|
||||
subcommand_options,
|
||||
should_expire,
|
||||
):
|
||||
test_repository.name = repository_name
|
||||
test_repository.save()
|
||||
|
||||
expired_timestamp = datetime.datetime.now() - datetime.timedelta(days=400)
|
||||
|
||||
test_perf_signature_2 = PerformanceSignature.objects.create(
|
||||
signature_hash='b'*40,
|
||||
signature_hash='b' * 40,
|
||||
repository=test_perf_signature.repository,
|
||||
framework=test_perf_signature.framework,
|
||||
platform=test_perf_signature.platform,
|
||||
|
@ -199,7 +225,8 @@ def test_cycle_performance_data(test_repository, repository_name, push_stored,
|
|||
suite=test_perf_signature.suite,
|
||||
test='test 2',
|
||||
last_updated=expired_timestamp,
|
||||
has_subtests=False)
|
||||
has_subtests=False,
|
||||
)
|
||||
|
||||
push1 = Push.objects.get(id=1)
|
||||
push1.time = datetime.datetime.now()
|
||||
|
@ -217,7 +244,8 @@ def test_cycle_performance_data(test_repository, repository_name, push_stored,
|
|||
job=None,
|
||||
signature=test_perf_signature,
|
||||
push_timestamp=push1.time,
|
||||
value=1.0)
|
||||
value=1.0,
|
||||
)
|
||||
|
||||
# the performance datum that which we're targetting
|
||||
PerformanceDatum.objects.create(
|
||||
|
@ -227,15 +255,20 @@ def test_cycle_performance_data(test_repository, repository_name, push_stored,
|
|||
job=None,
|
||||
signature=test_perf_signature_2,
|
||||
push_timestamp=push2.time,
|
||||
value=1.0)
|
||||
value=1.0,
|
||||
)
|
||||
|
||||
command = filter(lambda arg: arg is not None,
|
||||
['cycle_data', command_options, 'from:perfherder', subcommand_options])
|
||||
command = filter(
|
||||
lambda arg: arg is not None,
|
||||
['cycle_data', command_options, 'from:perfherder', subcommand_options],
|
||||
)
|
||||
call_command(*list(command)) # test repository isn't a main one
|
||||
|
||||
if should_expire:
|
||||
assert list(PerformanceDatum.objects.values_list('id', flat=True)) == [1]
|
||||
assert list(PerformanceSignature.objects.values_list('id', flat=True)) == [test_perf_signature.id]
|
||||
assert list(PerformanceSignature.objects.values_list('id', flat=True)) == [
|
||||
test_perf_signature.id
|
||||
]
|
||||
else:
|
||||
assert PerformanceDatum.objects.count() == 2
|
||||
assert PerformanceSignature.objects.count() == 2
|
||||
|
@ -249,12 +282,14 @@ def test_performance_cycler_quit_indicator():
|
|||
max_five_minutes = datetime.timedelta(minutes=5)
|
||||
|
||||
with pytest.raises(MaxRuntimeExceeded):
|
||||
PerformanceDatumManager._maybe_quit(started_at=ten_minutes_ago,
|
||||
max_overall_runtime=max_one_second)
|
||||
PerformanceDatumManager._maybe_quit(
|
||||
started_at=ten_minutes_ago, max_overall_runtime=max_one_second
|
||||
)
|
||||
|
||||
try:
|
||||
PerformanceDatumManager._maybe_quit(started_at=two_seconds_ago,
|
||||
max_overall_runtime=max_five_minutes)
|
||||
PerformanceDatumManager._maybe_quit(
|
||||
started_at=two_seconds_ago, max_overall_runtime=max_five_minutes
|
||||
)
|
||||
except MaxRuntimeExceeded:
|
||||
pytest.fail('Performance cycling shouldn\'t have quit')
|
||||
|
||||
|
@ -264,13 +299,9 @@ def test_performance_cycler_doesnt_delete_too_recent_data():
|
|||
dangerously_recent = 40
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
PerfherderCycler(days=dangerously_recent,
|
||||
chunk_size=1000,
|
||||
sleep_time=0)
|
||||
PerfherderCycler(days=dangerously_recent, chunk_size=1000, sleep_time=0)
|
||||
|
||||
try:
|
||||
PerfherderCycler(days=down_to_last_year,
|
||||
chunk_size=1000,
|
||||
sleep_time=0)
|
||||
PerfherderCycler(days=down_to_last_year, chunk_size=1000, sleep_time=0)
|
||||
except ValueError:
|
||||
pytest.fail('Should be able to expire data older than one year')
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
import pytest
|
||||
|
||||
from treeherder.model.error_summary import (get_crash_signature,
|
||||
get_error_search_term)
|
||||
from treeherder.model.error_summary import get_crash_signature, get_error_search_term
|
||||
|
||||
PIPE_DELIMITED_LINE_TEST_CASES = (
|
||||
(
|
||||
|
@ -10,7 +9,7 @@ PIPE_DELIMITED_LINE_TEST_CASES = (
|
|||
'| chrome://mochitests/content/browser/browser/components/loop/test/mochitest/browser_fxa_login.js '
|
||||
'| Check settings tab URL - Got http://mochi.test:8888/browser/browser/components/loop/test/mochitest/loop_fxa.sjs'
|
||||
),
|
||||
'browser_fxa_login.js'
|
||||
'browser_fxa_login.js',
|
||||
),
|
||||
(
|
||||
(
|
||||
|
@ -18,7 +17,7 @@ PIPE_DELIMITED_LINE_TEST_CASES = (
|
|||
'| file:///C:/slave/test/build/tests/reftest/tests/layout/reftests/layers/component-alpha-exit-1.html '
|
||||
'| image comparison (==), max difference: 255, number of differing pixels: 251'
|
||||
),
|
||||
'component-alpha-exit-1.html'
|
||||
'component-alpha-exit-1.html',
|
||||
),
|
||||
(
|
||||
(
|
||||
|
@ -26,7 +25,7 @@ PIPE_DELIMITED_LINE_TEST_CASES = (
|
|||
'| /tests/dom/media/tests/mochitest/test_dataChannel_basicAudio.html '
|
||||
'| undefined assertion name - Result logged after SimpleTest.finish()'
|
||||
),
|
||||
'test_dataChannel_basicAudio.html'
|
||||
'test_dataChannel_basicAudio.html',
|
||||
),
|
||||
(
|
||||
(
|
||||
|
@ -34,7 +33,7 @@ PIPE_DELIMITED_LINE_TEST_CASES = (
|
|||
r"| mainthreadio "
|
||||
r"| File 'c:\users\cltbld~1.t-w' was accessed and we were not expecting it: {'Count': 6, 'Duration': 0.112512, 'RunCount': 6}"
|
||||
),
|
||||
'mainthreadio'
|
||||
'mainthreadio',
|
||||
),
|
||||
(
|
||||
(
|
||||
|
@ -43,7 +42,7 @@ PIPE_DELIMITED_LINE_TEST_CASES = (
|
|||
"http://10.0.2.2:8854/tests/dom/canvas/test/reftest/wrapper.html?green.png "
|
||||
"| application crashed [@ jemalloc_crash]"
|
||||
),
|
||||
'webgl-resize-test.html'
|
||||
'webgl-resize-test.html',
|
||||
),
|
||||
(
|
||||
(
|
||||
|
@ -52,7 +51,7 @@ PIPE_DELIMITED_LINE_TEST_CASES = (
|
|||
"http://10.0.2.2:8854/tests/dom/canvas/test/reftest/wrapper.html?green.png "
|
||||
"| application crashed [@ jemalloc_crash]"
|
||||
),
|
||||
'webgl-resize-test.html'
|
||||
'webgl-resize-test.html',
|
||||
),
|
||||
(
|
||||
(
|
||||
|
@ -61,7 +60,7 @@ PIPE_DELIMITED_LINE_TEST_CASES = (
|
|||
"| /tests/dom/events/test/pointerevents/pointerevent_touch-action-table-test_touch-manual.html "
|
||||
"| touch-action attribute test on the cell: assert_true: scroll received while shouldn't expected true got false"
|
||||
),
|
||||
'pointerevent_touch-action-table-test_touch-manual.html'
|
||||
'pointerevent_touch-action-table-test_touch-manual.html',
|
||||
),
|
||||
(
|
||||
(
|
||||
|
@ -70,8 +69,8 @@ PIPE_DELIMITED_LINE_TEST_CASES = (
|
|||
"| /tests/dom/events/test/pointerevents/pointerevent_touch-action-table-test_touch-manual.html "
|
||||
"| touch-action attribute test on the cell: assert_true: scroll received while shouldn't expected true got false"
|
||||
),
|
||||
'pointerevent_touch-action-table-test_touch-manual.html'
|
||||
)
|
||||
'pointerevent_touch-action-table-test_touch-manual.html',
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
|
@ -90,7 +89,7 @@ LEAK_LINE_TEST_CASES = (
|
|||
'(BackstagePass, CallbackObject, DOMEventTargetHelper, '
|
||||
'EventListenerManager, EventTokenBucket, ...)'
|
||||
),
|
||||
'BackstagePass, CallbackObject, DOMEventTargetHelper, EventListenerManager, EventTokenBucket, ...'
|
||||
'BackstagePass, CallbackObject, DOMEventTargetHelper, EventListenerManager, EventTokenBucket, ...',
|
||||
),
|
||||
(
|
||||
(
|
||||
|
@ -99,7 +98,7 @@ LEAK_LINE_TEST_CASES = (
|
|||
'(AsyncLatencyLogger, AsyncTransactionTrackersHolder, AudioOutputObserver, '
|
||||
'BufferRecycleBin, CipherSuiteChangeObserver, ...)'
|
||||
),
|
||||
'AsyncLatencyLogger, AsyncTransactionTrackersHolder, AudioOutputObserver, BufferRecycleBin, CipherSui'
|
||||
'AsyncLatencyLogger, AsyncTransactionTrackersHolder, AudioOutputObserver, BufferRecycleBin, CipherSui',
|
||||
),
|
||||
(
|
||||
(
|
||||
|
@ -107,7 +106,7 @@ LEAK_LINE_TEST_CASES = (
|
|||
'| LeakSanitizer | leak at '
|
||||
'MakeUnique, nsThread::nsChainedEventQueue::nsChainedEventQueue, nsThread, nsThreadManager::Init'
|
||||
),
|
||||
'MakeUnique, nsThread::nsChainedEventQueue::nsChainedEventQueue, nsThread, nsThreadManager::Init'
|
||||
'MakeUnique, nsThread::nsChainedEventQueue::nsChainedEventQueue, nsThread, nsThreadManager::Init',
|
||||
),
|
||||
)
|
||||
|
||||
|
@ -122,11 +121,11 @@ def test_get_leak_search_term(line, exp_search_term):
|
|||
FULL_LINE_FALLBACK_TEST_CASES = (
|
||||
(
|
||||
'Automation Error: No crash directory (/mnt/sdcard/tests/profile/minidumps/) found on remote device',
|
||||
'Automation Error: No crash directory (/mnt/sdcard/tests/profile/minidumps/) found on remote device'
|
||||
'Automation Error: No crash directory (/mnt/sdcard/tests/profile/minidumps/) found on remote device',
|
||||
),
|
||||
(
|
||||
'PROCESS-CRASH | Automation Error: Missing end of test marker (process crashed?)',
|
||||
'PROCESS-CRASH | Automation Error: Missing end of test marker (process crashed?)'
|
||||
'PROCESS-CRASH | Automation Error: Missing end of test marker (process crashed?)',
|
||||
),
|
||||
)
|
||||
|
||||
|
@ -153,7 +152,7 @@ LONG_LINE_TEST_CASES = (
|
|||
(
|
||||
'command timed out: 2400 seconds without output running '
|
||||
'[\'/tools/buildbot/bin/python\', \'scripts/scrip'
|
||||
)
|
||||
),
|
||||
),
|
||||
(
|
||||
(
|
||||
|
@ -161,7 +160,7 @@ LONG_LINE_TEST_CASES = (
|
|||
'| test_switch_frame.py TestSwitchFrame.test_should_be_able_to_carry_on_working_if_the_frame_is_deleted_from_under_us '
|
||||
'| AssertionError: 0 != 1'
|
||||
),
|
||||
'test_switch_frame.py TestSwitchFrame.test_should_be_able_to_carry_on_working_if_the_frame_is_deleted'
|
||||
'test_switch_frame.py TestSwitchFrame.test_should_be_able_to_carry_on_working_if_the_frame_is_deleted',
|
||||
),
|
||||
)
|
||||
|
||||
|
@ -182,7 +181,7 @@ CRASH_LINE_TEST_CASES = (
|
|||
'jsreftest.html?test=test262/ch11/11.4/11.4.1/11.4.1-4.a-6.js | '
|
||||
'application crashed [@ nsInputStreamPump::OnStateStop()]'
|
||||
),
|
||||
'nsInputStreamPump::OnStateStop()'
|
||||
'nsInputStreamPump::OnStateStop()',
|
||||
),
|
||||
)
|
||||
|
||||
|
@ -197,18 +196,15 @@ def test_get_crash_signature(line, exp_search_term):
|
|||
BLACKLIST_TEST_CASES = (
|
||||
(
|
||||
'TEST-UNEXPECTED-FAIL | remoteautomation.py | application timed out after 330 seconds with no output',
|
||||
'TEST-UNEXPECTED-FAIL | remoteautomation.py | application timed out after 330 seconds with no output'
|
||||
),
|
||||
(
|
||||
'Return code: 1',
|
||||
None
|
||||
'TEST-UNEXPECTED-FAIL | remoteautomation.py | application timed out after 330 seconds with no output',
|
||||
),
|
||||
('Return code: 1', None),
|
||||
(
|
||||
(
|
||||
'REFTEST PROCESS-CRASH | file:///home/worker/workspace/build/tests/reftest/tests/layout/reftests/font-inflation/video-1.html '
|
||||
'| application crashed [@ mozalloc_abort]'
|
||||
),
|
||||
'video-1.html'
|
||||
'video-1.html',
|
||||
),
|
||||
)
|
||||
|
||||
|
|
|
@ -8,8 +8,7 @@ def test_note_deletion(test_job_with_notes):
|
|||
|
||||
# delete second failure classification, verify that we now have first one
|
||||
# (after reloading job)
|
||||
JobNote.objects.get(job=test_job_with_notes,
|
||||
failure_classification_id=3).delete()
|
||||
JobNote.objects.get(job=test_job_with_notes, failure_classification_id=3).delete()
|
||||
test_job_with_notes.refresh_from_db()
|
||||
assert test_job_with_notes.failure_classification_id == 2
|
||||
|
||||
|
|
|
@ -7,25 +7,55 @@ SAME_SUITE = 'same suite'
|
|||
SAME_TEST = 'same test'
|
||||
|
||||
|
||||
@pytest.mark.parametrize("suite_public_name, suite_public_name_2,"
|
||||
"test_public_name, test_public_name_2,"
|
||||
"suite, suite_2, test, test_2", [
|
||||
(SAME_SUITE_PUBLIC_NAME, SAME_SUITE_PUBLIC_NAME,
|
||||
SAME_TEST_PUBLIC_NAME, SAME_TEST_PUBLIC_NAME,
|
||||
SAME_SUITE, SAME_SUITE, 'test', 'test_2'),
|
||||
|
||||
(SAME_SUITE_PUBLIC_NAME, SAME_SUITE_PUBLIC_NAME,
|
||||
SAME_TEST_PUBLIC_NAME, SAME_TEST_PUBLIC_NAME,
|
||||
'suite', 'suite_2', SAME_TEST, SAME_TEST),
|
||||
|
||||
(SAME_SUITE_PUBLIC_NAME, SAME_SUITE_PUBLIC_NAME,
|
||||
SAME_TEST_PUBLIC_NAME, SAME_TEST_PUBLIC_NAME,
|
||||
'suite', 'suite_2', 'test', 'test_2'),
|
||||
])
|
||||
def test_trigger_public_suite_name_constraint(test_perf_signature, test_perf_signature_2,
|
||||
suite_public_name, suite_public_name_2,
|
||||
test_public_name, test_public_name_2,
|
||||
suite, suite_2, test, test_2):
|
||||
@pytest.mark.parametrize(
|
||||
"suite_public_name, suite_public_name_2,"
|
||||
"test_public_name, test_public_name_2,"
|
||||
"suite, suite_2, test, test_2",
|
||||
[
|
||||
(
|
||||
SAME_SUITE_PUBLIC_NAME,
|
||||
SAME_SUITE_PUBLIC_NAME,
|
||||
SAME_TEST_PUBLIC_NAME,
|
||||
SAME_TEST_PUBLIC_NAME,
|
||||
SAME_SUITE,
|
||||
SAME_SUITE,
|
||||
'test',
|
||||
'test_2',
|
||||
),
|
||||
(
|
||||
SAME_SUITE_PUBLIC_NAME,
|
||||
SAME_SUITE_PUBLIC_NAME,
|
||||
SAME_TEST_PUBLIC_NAME,
|
||||
SAME_TEST_PUBLIC_NAME,
|
||||
'suite',
|
||||
'suite_2',
|
||||
SAME_TEST,
|
||||
SAME_TEST,
|
||||
),
|
||||
(
|
||||
SAME_SUITE_PUBLIC_NAME,
|
||||
SAME_SUITE_PUBLIC_NAME,
|
||||
SAME_TEST_PUBLIC_NAME,
|
||||
SAME_TEST_PUBLIC_NAME,
|
||||
'suite',
|
||||
'suite_2',
|
||||
'test',
|
||||
'test_2',
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_trigger_public_suite_name_constraint(
|
||||
test_perf_signature,
|
||||
test_perf_signature_2,
|
||||
suite_public_name,
|
||||
suite_public_name_2,
|
||||
test_public_name,
|
||||
test_public_name_2,
|
||||
suite,
|
||||
suite_2,
|
||||
test,
|
||||
test_2,
|
||||
):
|
||||
test_perf_signature.suite_public_name = suite_public_name
|
||||
test_perf_signature.test_public_name = test_public_name
|
||||
test_perf_signature.suite = suite
|
||||
|
@ -42,35 +72,78 @@ def test_trigger_public_suite_name_constraint(test_perf_signature, test_perf_sig
|
|||
test_perf_signature_2.save()
|
||||
|
||||
|
||||
@pytest.mark.parametrize("suite_public_name, suite_public_name_2,"
|
||||
"test_public_name, test_public_name_2,"
|
||||
"suite, suite_2, test, test_2", [
|
||||
(None, None, None, None, 'suite', 'suite_2', 'test', 'test_2'),
|
||||
('suite_public_name', 'suite_public_name_2', None, None,
|
||||
'suite', 'suite_2', 'test', 'test_2'),
|
||||
(None, None, 'test', 'test_2', 'suite', 'suite_2', 'test', 'test_2'),
|
||||
('suite_public_name', None, 'test', None, 'suite', 'suite_2', 'test', 'test_2'),
|
||||
|
||||
('suite_public_name', 'suite_public_name_2',
|
||||
SAME_TEST_PUBLIC_NAME, SAME_TEST_PUBLIC_NAME,
|
||||
'suite', 'suite_2', 'test', 'test_2'),
|
||||
|
||||
(SAME_SUITE_PUBLIC_NAME, SAME_SUITE_PUBLIC_NAME,
|
||||
'test_public_name', 'test_public_name_2',
|
||||
'suite', 'suite_2', 'test', 'test_2'),
|
||||
|
||||
('suite_public_name', 'suite_public_name_2',
|
||||
SAME_TEST_PUBLIC_NAME, SAME_TEST_PUBLIC_NAME,
|
||||
SAME_SUITE, SAME_SUITE, SAME_TEST, SAME_TEST),
|
||||
|
||||
('suite_public_name', 'suite_public_name_2',
|
||||
'test_public_name', 'test_public_name_2',
|
||||
'suite', 'suite_2', 'test', 'test_2'),
|
||||
])
|
||||
def test_do_not_trigger_public_suite_name_constraint(test_perf_signature, test_perf_signature_2,
|
||||
suite_public_name, suite_public_name_2,
|
||||
test_public_name, test_public_name_2,
|
||||
suite, suite_2, test, test_2):
|
||||
@pytest.mark.parametrize(
|
||||
"suite_public_name, suite_public_name_2,"
|
||||
"test_public_name, test_public_name_2,"
|
||||
"suite, suite_2, test, test_2",
|
||||
[
|
||||
(None, None, None, None, 'suite', 'suite_2', 'test', 'test_2'),
|
||||
(
|
||||
'suite_public_name',
|
||||
'suite_public_name_2',
|
||||
None,
|
||||
None,
|
||||
'suite',
|
||||
'suite_2',
|
||||
'test',
|
||||
'test_2',
|
||||
),
|
||||
(None, None, 'test', 'test_2', 'suite', 'suite_2', 'test', 'test_2'),
|
||||
('suite_public_name', None, 'test', None, 'suite', 'suite_2', 'test', 'test_2'),
|
||||
(
|
||||
'suite_public_name',
|
||||
'suite_public_name_2',
|
||||
SAME_TEST_PUBLIC_NAME,
|
||||
SAME_TEST_PUBLIC_NAME,
|
||||
'suite',
|
||||
'suite_2',
|
||||
'test',
|
||||
'test_2',
|
||||
),
|
||||
(
|
||||
SAME_SUITE_PUBLIC_NAME,
|
||||
SAME_SUITE_PUBLIC_NAME,
|
||||
'test_public_name',
|
||||
'test_public_name_2',
|
||||
'suite',
|
||||
'suite_2',
|
||||
'test',
|
||||
'test_2',
|
||||
),
|
||||
(
|
||||
'suite_public_name',
|
||||
'suite_public_name_2',
|
||||
SAME_TEST_PUBLIC_NAME,
|
||||
SAME_TEST_PUBLIC_NAME,
|
||||
SAME_SUITE,
|
||||
SAME_SUITE,
|
||||
SAME_TEST,
|
||||
SAME_TEST,
|
||||
),
|
||||
(
|
||||
'suite_public_name',
|
||||
'suite_public_name_2',
|
||||
'test_public_name',
|
||||
'test_public_name_2',
|
||||
'suite',
|
||||
'suite_2',
|
||||
'test',
|
||||
'test_2',
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_do_not_trigger_public_suite_name_constraint(
|
||||
test_perf_signature,
|
||||
test_perf_signature_2,
|
||||
suite_public_name,
|
||||
suite_public_name_2,
|
||||
test_public_name,
|
||||
test_public_name_2,
|
||||
suite,
|
||||
suite_2,
|
||||
test,
|
||||
test_2,
|
||||
):
|
||||
test_perf_signature.suite_public_name = suite_public_name
|
||||
test_perf_signature.test_public_name = test_public_name
|
||||
test_perf_signature.suite = suite
|
||||
|
|
|
@ -3,13 +3,12 @@ import datetime
|
|||
import pytest
|
||||
from django.core.exceptions import ValidationError
|
||||
|
||||
from treeherder.perf.models import (PerformanceAlert,
|
||||
PerformanceAlertSummary,
|
||||
PerformanceSignature)
|
||||
from treeherder.perf.models import PerformanceAlert, PerformanceAlertSummary, PerformanceSignature
|
||||
|
||||
|
||||
def test_summary_modification(test_repository, test_perf_signature,
|
||||
test_perf_alert_summary, test_perf_alert):
|
||||
def test_summary_modification(
|
||||
test_repository, test_perf_signature, test_perf_alert_summary, test_perf_alert
|
||||
):
|
||||
(s, a) = (test_perf_alert_summary, test_perf_alert)
|
||||
|
||||
assert s.bug_number is None
|
||||
|
@ -29,19 +28,20 @@ def test_summary_modification(test_repository, test_perf_signature,
|
|||
assert s.status == PerformanceAlertSummary.UNTRIAGED
|
||||
|
||||
|
||||
def test_summary_status(test_repository, test_perf_signature,
|
||||
test_perf_alert_summary, test_perf_framework):
|
||||
def test_summary_status(
|
||||
test_repository, test_perf_signature, test_perf_alert_summary, test_perf_framework
|
||||
):
|
||||
signature1 = test_perf_signature
|
||||
signature2 = PerformanceSignature.objects.create(
|
||||
repository=test_repository,
|
||||
signature_hash=(40*'u'),
|
||||
signature_hash=(40 * 'u'),
|
||||
framework=test_perf_signature.framework,
|
||||
platform=test_perf_signature.platform,
|
||||
option_collection=test_perf_signature.option_collection,
|
||||
suite='mysuite_2',
|
||||
test='mytest_2',
|
||||
has_subtests=False,
|
||||
last_updated=datetime.datetime.now()
|
||||
last_updated=datetime.datetime.now(),
|
||||
)
|
||||
s = test_perf_alert_summary
|
||||
|
||||
|
@ -53,7 +53,8 @@ def test_summary_status(test_repository, test_perf_signature,
|
|||
amount_abs=50.0,
|
||||
prev_value=100.0,
|
||||
new_value=150.0,
|
||||
t_value=20.0)
|
||||
t_value=20.0,
|
||||
)
|
||||
|
||||
# this is the test case
|
||||
# ignore downstream and reassigned to update the summary status
|
||||
|
@ -68,15 +69,17 @@ def test_summary_status(test_repository, test_perf_signature,
|
|||
amount_abs=50.0,
|
||||
prev_value=100.0,
|
||||
new_value=150.0,
|
||||
t_value=20.0)
|
||||
t_value=20.0,
|
||||
)
|
||||
b.status = PerformanceAlert.ACKNOWLEDGED
|
||||
b.save()
|
||||
s = PerformanceAlertSummary.objects.get(id=1)
|
||||
assert s.status == PerformanceAlertSummary.IMPROVEMENT
|
||||
|
||||
|
||||
def test_alert_modification(test_perf_signature, test_perf_alert_summary,
|
||||
push_stored, test_perf_alert):
|
||||
def test_alert_modification(
|
||||
test_perf_signature, test_perf_alert_summary, push_stored, test_perf_alert
|
||||
):
|
||||
p = test_perf_alert
|
||||
s2 = PerformanceAlertSummary.objects.create(
|
||||
id=2,
|
||||
|
@ -85,7 +88,8 @@ def test_alert_modification(test_perf_signature, test_perf_alert_summary,
|
|||
push_id=4,
|
||||
created=datetime.datetime.now(),
|
||||
framework=test_perf_alert_summary.framework,
|
||||
manually_created=False)
|
||||
manually_created=False,
|
||||
)
|
||||
|
||||
assert p.related_summary is None
|
||||
assert p.status == PerformanceAlert.UNTRIAGED
|
||||
|
|
|
@ -21,7 +21,7 @@ def test_many_various_alerts():
|
|||
'osx-10-10-shippable',
|
||||
'osx-10-10-shippable',
|
||||
'android-hw-pix-7-1-android-aarch64',
|
||||
'android-hw-pix-7-1-android-aarch64'
|
||||
'android-hw-pix-7-1-android-aarch64',
|
||||
)
|
||||
|
||||
reversed_magnitudes = list(reversed(range(len(alerts))))
|
||||
|
@ -37,10 +37,7 @@ def test_many_various_alerts():
|
|||
@pytest.fixture
|
||||
def test_few_various_alerts():
|
||||
alerts = [Mock(spec=PerformanceAlert) for i in range(2)]
|
||||
platforms = (
|
||||
'windows7-32-shippable',
|
||||
'linux64-shippable-qr'
|
||||
)
|
||||
platforms = ('windows7-32-shippable', 'linux64-shippable-qr')
|
||||
reversed_magnitudes = list(reversed(range(len(alerts))))
|
||||
toggle = True
|
||||
for idx, alert in enumerate(alerts):
|
||||
|
@ -59,7 +56,7 @@ def test_few_regressions():
|
|||
'windows7-32-shippable',
|
||||
'linux64-shippable-qr',
|
||||
'osx-10-10-shippable',
|
||||
'android-hw-pix-7-1-android-aarch64'
|
||||
'android-hw-pix-7-1-android-aarch64',
|
||||
)
|
||||
reversed_magnitudes = list(reversed(range(len(alerts))))
|
||||
for idx, alert in enumerate(alerts):
|
||||
|
@ -96,45 +93,42 @@ def test_init():
|
|||
AlertsPicker(
|
||||
max_alerts=0,
|
||||
max_improvements=2,
|
||||
platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android')
|
||||
platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android'),
|
||||
)
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
AlertsPicker(
|
||||
max_alerts=3,
|
||||
max_improvements=0,
|
||||
platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android')
|
||||
platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android'),
|
||||
)
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
AlertsPicker(
|
||||
max_alerts=3,
|
||||
max_improvements=5,
|
||||
platforms_of_interest=tuple()
|
||||
)
|
||||
AlertsPicker(max_alerts=3, max_improvements=5, platforms_of_interest=tuple())
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
AlertsPicker(
|
||||
max_alerts=0,
|
||||
max_improvements=0,
|
||||
platforms_of_interest=tuple()
|
||||
)
|
||||
AlertsPicker(max_alerts=0, max_improvements=0, platforms_of_interest=tuple())
|
||||
|
||||
|
||||
def test_extract_important_alerts(test_bad_platform_names, test_few_improvements, test_few_regressions):
|
||||
def test_extract_important_alerts(
|
||||
test_bad_platform_names, test_few_improvements, test_few_regressions
|
||||
):
|
||||
def count_alert_types(alerts):
|
||||
return Counter([alert.is_regression for alert in alerts])
|
||||
|
||||
picker = AlertsPicker(
|
||||
max_alerts=5,
|
||||
max_improvements=2,
|
||||
platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android')
|
||||
platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android'),
|
||||
)
|
||||
|
||||
expected_platforms_order = ('windows10-64-shippable',
|
||||
'windows7-32-shippable',
|
||||
'linux64-shippable-qr',
|
||||
'osx-10-10-shippable',
|
||||
'windows10-64-shippable')
|
||||
expected_platforms_order = (
|
||||
'windows10-64-shippable',
|
||||
'windows7-32-shippable',
|
||||
'linux64-shippable-qr',
|
||||
'osx-10-10-shippable',
|
||||
'windows10-64-shippable',
|
||||
)
|
||||
expected_magnitudes_order = (4, 3, 2, 1, 4)
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
|
@ -152,14 +146,16 @@ def test_extract_important_alerts(test_bad_platform_names, test_few_improvements
|
|||
assert alert.amount_pct == expected_magnitudes_order[idx]
|
||||
|
||||
|
||||
def test_ensure_alerts_variety(test_few_regressions, test_few_improvements, test_many_various_alerts, test_few_various_alerts):
|
||||
def test_ensure_alerts_variety(
|
||||
test_few_regressions, test_few_improvements, test_many_various_alerts, test_few_various_alerts
|
||||
):
|
||||
def count_alert_types(alerts):
|
||||
return Counter([alert.is_regression for alert in alerts])
|
||||
|
||||
picker = AlertsPicker(
|
||||
max_alerts=5,
|
||||
max_improvements=2,
|
||||
platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android')
|
||||
platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android'),
|
||||
)
|
||||
|
||||
selected_alerts = picker._ensure_alerts_variety(test_few_regressions)
|
||||
|
@ -187,7 +183,7 @@ def test_ensure_alerts_variety(test_few_regressions, test_few_improvements, test
|
|||
picker = AlertsPicker(
|
||||
max_alerts=1,
|
||||
max_improvements=2,
|
||||
platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android')
|
||||
platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android'),
|
||||
)
|
||||
|
||||
selected_alerts = picker._ensure_alerts_variety(test_few_various_alerts)
|
||||
|
@ -197,29 +193,30 @@ def test_ensure_alerts_variety(test_few_regressions, test_few_improvements, test
|
|||
assert number_of[False] == 0
|
||||
|
||||
|
||||
@pytest.mark.parametrize(('max_alerts, expected_alerts_platforms'), [
|
||||
(5, ('windows10', 'windows7', 'linux', 'osx', 'android')),
|
||||
(8, ('windows10', 'windows7', 'linux', 'osx', 'android',
|
||||
'windows10', 'windows7', 'linux'))
|
||||
])
|
||||
@pytest.mark.parametrize(
|
||||
('max_alerts, expected_alerts_platforms'),
|
||||
[
|
||||
(5, ('windows10', 'windows7', 'linux', 'osx', 'android')),
|
||||
(8, ('windows10', 'windows7', 'linux', 'osx', 'android', 'windows10', 'windows7', 'linux')),
|
||||
],
|
||||
)
|
||||
def test_ensure_platform_variety(test_many_various_alerts, max_alerts, expected_alerts_platforms):
|
||||
picker = AlertsPicker(
|
||||
max_alerts=max_alerts,
|
||||
max_improvements=2,
|
||||
platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android')
|
||||
platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android'),
|
||||
)
|
||||
|
||||
picked_alerts = picker._ensure_platform_variety(test_many_various_alerts)
|
||||
for idx, platform in enumerate(expected_alerts_platforms):
|
||||
assert (picked_alerts[idx].series_signature.platform.platform
|
||||
.startswith(platform))
|
||||
assert picked_alerts[idx].series_signature.platform.platform.startswith(platform)
|
||||
|
||||
|
||||
def test_os_relevance():
|
||||
picker = AlertsPicker(
|
||||
max_alerts=5,
|
||||
max_improvements=2,
|
||||
platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android')
|
||||
platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android'),
|
||||
)
|
||||
assert 5 == picker._os_relevance('windows10')
|
||||
assert 4 == picker._os_relevance('windows7')
|
||||
|
@ -235,7 +232,7 @@ def test_has_relevant_platform(test_many_various_alerts, test_bad_platform_names
|
|||
picker = AlertsPicker(
|
||||
max_alerts=5,
|
||||
max_improvements=2,
|
||||
platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android')
|
||||
platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android'),
|
||||
)
|
||||
|
||||
for alert in test_many_various_alerts:
|
||||
|
@ -248,7 +245,7 @@ def test_extract_by_relevant_platforms(test_many_various_alerts, test_bad_platfo
|
|||
picker = AlertsPicker(
|
||||
max_alerts=5,
|
||||
max_improvements=2,
|
||||
platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android')
|
||||
platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android'),
|
||||
)
|
||||
all_alerts = test_many_various_alerts + test_bad_platform_names
|
||||
|
||||
|
@ -260,22 +257,25 @@ def test_extract_by_relevant_platforms(test_many_various_alerts, test_bad_platfo
|
|||
def test_multi_criterion_sort(test_many_various_alerts):
|
||||
def count_alert_types(alerts):
|
||||
return Counter([alert.is_regression for alert in alerts])
|
||||
|
||||
picker = AlertsPicker(
|
||||
max_alerts=5,
|
||||
max_improvements=2,
|
||||
platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android')
|
||||
platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android'),
|
||||
)
|
||||
|
||||
expected_platforms_order = ('windows10-64-shippable',
|
||||
'windows7-32-shippable',
|
||||
'linux64-shippable-qr',
|
||||
'osx-10-10-shippable',
|
||||
'android-hw-pix-7-1-android-aarch64',
|
||||
'windows10-64-shippable',
|
||||
'windows7-32-shippable',
|
||||
'linux64-shippable-qr',
|
||||
'osx-10-10-shippable',
|
||||
'android-hw-pix-7-1-android-aarch64')
|
||||
expected_platforms_order = (
|
||||
'windows10-64-shippable',
|
||||
'windows7-32-shippable',
|
||||
'linux64-shippable-qr',
|
||||
'osx-10-10-shippable',
|
||||
'android-hw-pix-7-1-android-aarch64',
|
||||
'windows10-64-shippable',
|
||||
'windows7-32-shippable',
|
||||
'linux64-shippable-qr',
|
||||
'osx-10-10-shippable',
|
||||
'android-hw-pix-7-1-android-aarch64',
|
||||
)
|
||||
expected_magnitudes_order = (9, 7, 5, 3, 1, 8, 6, 4, 2, 0)
|
||||
|
||||
ordered_alerts = picker._multi_criterion_sort(reversed(test_many_various_alerts))
|
||||
|
|
|
@ -6,15 +6,14 @@ from unittest.mock import Mock
|
|||
|
||||
import pytest
|
||||
|
||||
from treeherder.model.models import (MachinePlatform,
|
||||
Option,
|
||||
OptionCollection)
|
||||
from treeherder.perf.alerts import (AlertsPicker,
|
||||
BackfillReportMaintainer)
|
||||
from treeherder.perf.models import (BackfillRecord,
|
||||
BackfillReport,
|
||||
PerformanceAlert,
|
||||
PerformanceSignature)
|
||||
from treeherder.model.models import MachinePlatform, Option, OptionCollection
|
||||
from treeherder.perf.alerts import AlertsPicker, BackfillReportMaintainer
|
||||
from treeherder.perf.models import (
|
||||
BackfillRecord,
|
||||
BackfillReport,
|
||||
PerformanceAlert,
|
||||
PerformanceSignature,
|
||||
)
|
||||
|
||||
LETTERS = string.ascii_lowercase
|
||||
EPOCH = datetime.datetime.utcfromtimestamp(0)
|
||||
|
@ -24,9 +23,11 @@ RANDOM_STRINGS = set()
|
|||
@pytest.fixture(scope='module')
|
||||
def alerts_picker():
|
||||
# real-world instance
|
||||
return AlertsPicker(max_alerts=5,
|
||||
max_improvements=2,
|
||||
platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android'))
|
||||
return AlertsPicker(
|
||||
max_alerts=5,
|
||||
max_improvements=2,
|
||||
platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android'),
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -38,25 +39,19 @@ def mock_backfill_context_fetcher(backfill_record_context):
|
|||
@pytest.fixture
|
||||
def option_collection():
|
||||
option = Option.objects.create(name='opt')
|
||||
return OptionCollection.objects.create(
|
||||
option_collection_hash='my_option_hash',
|
||||
option=option)
|
||||
return OptionCollection.objects.create(option_collection_hash='my_option_hash', option=option)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def relevant_platform():
|
||||
return MachinePlatform.objects.create(
|
||||
os_name='win',
|
||||
platform='windows10',
|
||||
architecture='x86')
|
||||
return MachinePlatform.objects.create(os_name='win', platform='windows10', architecture='x86')
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def irrelevant_platform():
|
||||
return MachinePlatform.objects.create(
|
||||
os_name='OS_OF_NO_INTEREST',
|
||||
platform='PLATFORM_OF_NO_INTEREST',
|
||||
architecture='x86')
|
||||
os_name='OS_OF_NO_INTEREST', platform='PLATFORM_OF_NO_INTEREST', architecture='x86'
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -74,8 +69,14 @@ def unique_random_string():
|
|||
|
||||
|
||||
@pytest.fixture
|
||||
def create_perf_signature(test_repository, test_perf_framework, option_collection, relevant_platform,
|
||||
irrelevant_platform, unique_random_string):
|
||||
def create_perf_signature(
|
||||
test_repository,
|
||||
test_perf_framework,
|
||||
option_collection,
|
||||
relevant_platform,
|
||||
irrelevant_platform,
|
||||
unique_random_string,
|
||||
):
|
||||
def _create_perf_signature(relevant=True):
|
||||
platform = relevant_platform if relevant else irrelevant_platform
|
||||
|
||||
|
@ -88,7 +89,7 @@ def create_perf_signature(test_repository, test_perf_framework, option_collectio
|
|||
suite=unique_random_string(),
|
||||
test=unique_random_string(),
|
||||
has_subtests=False,
|
||||
last_updated=datetime.datetime.now()
|
||||
last_updated=datetime.datetime.now(),
|
||||
)
|
||||
return signature
|
||||
|
||||
|
@ -108,93 +109,95 @@ def create_alerts(create_perf_signature):
|
|||
amount_abs=50.0,
|
||||
prev_value=100.0,
|
||||
new_value=150.0,
|
||||
t_value=20.0)
|
||||
t_value=20.0,
|
||||
)
|
||||
alerts.append(alert)
|
||||
return alerts
|
||||
|
||||
return _create_alerts
|
||||
|
||||
|
||||
def test_reports_are_generated_for_relevant_alerts_only(test_perf_alert_summary,
|
||||
test_perf_framework,
|
||||
test_repository,
|
||||
create_alerts,
|
||||
alerts_picker,
|
||||
mock_backfill_context_fetcher):
|
||||
create_alerts(test_perf_alert_summary, # irrelevant alert
|
||||
relevant=False,
|
||||
amount=1)
|
||||
def test_reports_are_generated_for_relevant_alerts_only(
|
||||
test_perf_alert_summary,
|
||||
test_perf_framework,
|
||||
test_repository,
|
||||
create_alerts,
|
||||
alerts_picker,
|
||||
mock_backfill_context_fetcher,
|
||||
):
|
||||
create_alerts(test_perf_alert_summary, relevant=False, amount=1) # irrelevant alert
|
||||
|
||||
report_maintainer = BackfillReportMaintainer(alerts_picker,
|
||||
mock_backfill_context_fetcher)
|
||||
report_maintainer = BackfillReportMaintainer(alerts_picker, mock_backfill_context_fetcher)
|
||||
|
||||
report_maintainer.provide_updated_reports(since=EPOCH,
|
||||
frameworks=[test_perf_framework.name],
|
||||
repositories=[test_repository.name])
|
||||
report_maintainer.provide_updated_reports(
|
||||
since=EPOCH, frameworks=[test_perf_framework.name], repositories=[test_repository.name]
|
||||
)
|
||||
|
||||
assert not BackfillReport.objects.exists()
|
||||
|
||||
|
||||
def test_running_report_twice_on_unchanged_data_doesnt_change_anything(test_perf_alert_summary,
|
||||
test_perf_framework,
|
||||
test_repository,
|
||||
create_alerts,
|
||||
alerts_picker,
|
||||
mock_backfill_context_fetcher):
|
||||
def test_running_report_twice_on_unchanged_data_doesnt_change_anything(
|
||||
test_perf_alert_summary,
|
||||
test_perf_framework,
|
||||
test_repository,
|
||||
create_alerts,
|
||||
alerts_picker,
|
||||
mock_backfill_context_fetcher,
|
||||
):
|
||||
create_alerts(test_perf_alert_summary, amount=3) # relevant alerts
|
||||
create_alerts(test_perf_alert_summary, # irrelevant alert
|
||||
relevant=False,
|
||||
amount=1)
|
||||
create_alerts(test_perf_alert_summary, relevant=False, amount=1) # irrelevant alert
|
||||
|
||||
assert not BackfillReport.objects.exists()
|
||||
|
||||
report_maintainer = BackfillReportMaintainer(alerts_picker,
|
||||
mock_backfill_context_fetcher)
|
||||
report_maintainer = BackfillReportMaintainer(alerts_picker, mock_backfill_context_fetcher)
|
||||
|
||||
# run report once
|
||||
report_maintainer.provide_updated_reports(since=EPOCH,
|
||||
frameworks=[test_perf_framework.name],
|
||||
repositories=[test_repository.name])
|
||||
initial_records_timestamps, initial_report_timestamps = __fetch_report_timestamps(test_perf_alert_summary)
|
||||
report_maintainer.provide_updated_reports(
|
||||
since=EPOCH, frameworks=[test_perf_framework.name], repositories=[test_repository.name]
|
||||
)
|
||||
initial_records_timestamps, initial_report_timestamps = __fetch_report_timestamps(
|
||||
test_perf_alert_summary
|
||||
)
|
||||
|
||||
# run report twice (no changes happened on underlying data)
|
||||
report_maintainer.provide_updated_reports(since=EPOCH,
|
||||
frameworks=[test_perf_framework.name],
|
||||
repositories=[test_repository.name])
|
||||
report_maintainer.provide_updated_reports(
|
||||
since=EPOCH, frameworks=[test_perf_framework.name], repositories=[test_repository.name]
|
||||
)
|
||||
records_timestamps, report_timestamps = __fetch_report_timestamps(test_perf_alert_summary)
|
||||
|
||||
assert initial_report_timestamps == report_timestamps
|
||||
assert initial_records_timestamps == records_timestamps
|
||||
|
||||
|
||||
def test_reports_are_updated_after_alert_summaries_change(test_perf_alert_summary,
|
||||
test_perf_framework,
|
||||
test_repository,
|
||||
create_alerts,
|
||||
alerts_picker,
|
||||
mock_backfill_context_fetcher):
|
||||
relevant_alerts = create_alerts(test_perf_alert_summary, amount=3) # relevant alerts, all regressions
|
||||
create_alerts(test_perf_alert_summary, # irrelevant alert
|
||||
relevant=False,
|
||||
amount=1)
|
||||
def test_reports_are_updated_after_alert_summaries_change(
|
||||
test_perf_alert_summary,
|
||||
test_perf_framework,
|
||||
test_repository,
|
||||
create_alerts,
|
||||
alerts_picker,
|
||||
mock_backfill_context_fetcher,
|
||||
):
|
||||
relevant_alerts = create_alerts(
|
||||
test_perf_alert_summary, amount=3
|
||||
) # relevant alerts, all regressions
|
||||
create_alerts(test_perf_alert_summary, relevant=False, amount=1) # irrelevant alert
|
||||
|
||||
assert not BackfillReport.objects.exists()
|
||||
|
||||
report_maintainer = BackfillReportMaintainer(alerts_picker,
|
||||
mock_backfill_context_fetcher)
|
||||
report_maintainer = BackfillReportMaintainer(alerts_picker, mock_backfill_context_fetcher)
|
||||
|
||||
report_maintainer.provide_updated_reports(since=EPOCH,
|
||||
frameworks=[test_perf_framework.name],
|
||||
repositories=[test_repository.name])
|
||||
report_maintainer.provide_updated_reports(
|
||||
since=EPOCH, frameworks=[test_perf_framework.name], repositories=[test_repository.name]
|
||||
)
|
||||
|
||||
assert BackfillReport.objects.count() == 1
|
||||
assert BackfillRecord.objects.count() == 3
|
||||
|
||||
# new alerts will cause report updates
|
||||
create_alerts(test_perf_alert_summary, amount=3) # relevant alerts
|
||||
report_maintainer.provide_updated_reports(since=EPOCH,
|
||||
frameworks=[test_perf_framework.name],
|
||||
repositories=[test_repository.name])
|
||||
report_maintainer.provide_updated_reports(
|
||||
since=EPOCH, frameworks=[test_perf_framework.name], repositories=[test_repository.name]
|
||||
)
|
||||
|
||||
assert BackfillRecord.objects.count() == 5
|
||||
|
||||
|
@ -202,10 +205,12 @@ def test_reports_are_updated_after_alert_summaries_change(test_perf_alert_summar
|
|||
alert = relevant_alerts[0]
|
||||
alert.status = PerformanceAlert.ACKNOWLEDGED
|
||||
alert.save()
|
||||
initial_report_timestamps, initial_records_timestamps = __fetch_report_timestamps(test_perf_alert_summary)
|
||||
report_maintainer.provide_updated_reports(since=EPOCH,
|
||||
frameworks=[test_perf_framework.name],
|
||||
repositories=[test_repository.name])
|
||||
initial_report_timestamps, initial_records_timestamps = __fetch_report_timestamps(
|
||||
test_perf_alert_summary
|
||||
)
|
||||
report_maintainer.provide_updated_reports(
|
||||
since=EPOCH, frameworks=[test_perf_framework.name], repositories=[test_repository.name]
|
||||
)
|
||||
|
||||
report_timestamps, records_timestmaps = __fetch_report_timestamps(test_perf_alert_summary)
|
||||
assert initial_report_timestamps != report_timestamps
|
||||
|
|
|
@ -5,17 +5,26 @@ import pytest
|
|||
|
||||
from treeherder.model.models import Push
|
||||
from treeherder.perf.alerts import generate_new_alerts_in_series
|
||||
from treeherder.perf.models import (PerformanceAlert,
|
||||
PerformanceAlertSummary,
|
||||
PerformanceDatum,
|
||||
PerformanceSignature)
|
||||
from treeherder.perf.models import (
|
||||
PerformanceAlert,
|
||||
PerformanceAlertSummary,
|
||||
PerformanceDatum,
|
||||
PerformanceSignature,
|
||||
)
|
||||
|
||||
|
||||
def _verify_alert(alertid, expected_push_id, expected_prev_push_id,
|
||||
expected_signature, expected_prev_value,
|
||||
expected_new_value, expected_is_regression,
|
||||
expected_status, expected_summary_status,
|
||||
expected_classifier):
|
||||
def _verify_alert(
|
||||
alertid,
|
||||
expected_push_id,
|
||||
expected_prev_push_id,
|
||||
expected_signature,
|
||||
expected_prev_value,
|
||||
expected_new_value,
|
||||
expected_is_regression,
|
||||
expected_status,
|
||||
expected_summary_status,
|
||||
expected_classifier,
|
||||
):
|
||||
alert = PerformanceAlert.objects.get(id=alertid)
|
||||
assert alert.prev_value == expected_prev_value
|
||||
assert alert.new_value == expected_new_value
|
||||
|
@ -31,81 +40,139 @@ def _verify_alert(alertid, expected_push_id, expected_prev_push_id,
|
|||
assert summary.status == expected_summary_status
|
||||
|
||||
|
||||
def _generate_performance_data(test_repository, test_perf_signature,
|
||||
test_issue_tracker, generic_reference_data,
|
||||
base_timestamp, start_id, value, amount):
|
||||
for (t, v) in zip([i for i in range(start_id, start_id + amount)],
|
||||
[value for i in range(start_id, start_id + amount)]):
|
||||
def _generate_performance_data(
|
||||
test_repository,
|
||||
test_perf_signature,
|
||||
test_issue_tracker,
|
||||
generic_reference_data,
|
||||
base_timestamp,
|
||||
start_id,
|
||||
value,
|
||||
amount,
|
||||
):
|
||||
for (t, v) in zip(
|
||||
[i for i in range(start_id, start_id + amount)],
|
||||
[value for i in range(start_id, start_id + amount)],
|
||||
):
|
||||
push, _ = Push.objects.get_or_create(
|
||||
repository=test_repository,
|
||||
revision='1234abcd%s' % t,
|
||||
defaults={
|
||||
'author': 'foo@bar.com',
|
||||
'time': datetime.datetime.fromtimestamp(base_timestamp + t)
|
||||
})
|
||||
'time': datetime.datetime.fromtimestamp(base_timestamp + t),
|
||||
},
|
||||
)
|
||||
PerformanceDatum.objects.create(
|
||||
repository=test_repository,
|
||||
result_set_id=t,
|
||||
push=push,
|
||||
signature=test_perf_signature,
|
||||
push_timestamp=datetime.datetime.utcfromtimestamp(
|
||||
base_timestamp + t),
|
||||
value=v)
|
||||
push_timestamp=datetime.datetime.utcfromtimestamp(base_timestamp + t),
|
||||
value=v,
|
||||
)
|
||||
|
||||
|
||||
def test_detect_alerts_in_series(test_repository,
|
||||
test_issue_tracker,
|
||||
failure_classifications,
|
||||
generic_reference_data,
|
||||
test_perf_signature):
|
||||
def test_detect_alerts_in_series(
|
||||
test_repository,
|
||||
test_issue_tracker,
|
||||
failure_classifications,
|
||||
generic_reference_data,
|
||||
test_perf_signature,
|
||||
):
|
||||
|
||||
base_time = time.time() # generate it based off current time
|
||||
INTERVAL = 30
|
||||
_generate_performance_data(test_repository,
|
||||
test_perf_signature,
|
||||
test_issue_tracker,
|
||||
generic_reference_data,
|
||||
base_time, 1, 0.5, int(INTERVAL / 2))
|
||||
_generate_performance_data(test_repository,
|
||||
test_perf_signature,
|
||||
test_issue_tracker,
|
||||
generic_reference_data,
|
||||
base_time, int(INTERVAL / 2) + 1, 1.0, int(INTERVAL / 2))
|
||||
_generate_performance_data(
|
||||
test_repository,
|
||||
test_perf_signature,
|
||||
test_issue_tracker,
|
||||
generic_reference_data,
|
||||
base_time,
|
||||
1,
|
||||
0.5,
|
||||
int(INTERVAL / 2),
|
||||
)
|
||||
_generate_performance_data(
|
||||
test_repository,
|
||||
test_perf_signature,
|
||||
test_issue_tracker,
|
||||
generic_reference_data,
|
||||
base_time,
|
||||
int(INTERVAL / 2) + 1,
|
||||
1.0,
|
||||
int(INTERVAL / 2),
|
||||
)
|
||||
|
||||
generate_new_alerts_in_series(test_perf_signature)
|
||||
|
||||
assert PerformanceAlert.objects.count() == 1
|
||||
assert PerformanceAlertSummary.objects.count() == 1
|
||||
_verify_alert(1, (INTERVAL/2)+1, (INTERVAL/2), test_perf_signature, 0.5,
|
||||
1.0, True, PerformanceAlert.UNTRIAGED,
|
||||
PerformanceAlertSummary.UNTRIAGED, None)
|
||||
_verify_alert(
|
||||
1,
|
||||
(INTERVAL / 2) + 1,
|
||||
(INTERVAL / 2),
|
||||
test_perf_signature,
|
||||
0.5,
|
||||
1.0,
|
||||
True,
|
||||
PerformanceAlert.UNTRIAGED,
|
||||
PerformanceAlertSummary.UNTRIAGED,
|
||||
None,
|
||||
)
|
||||
|
||||
# verify that no new alerts generated if we rerun
|
||||
generate_new_alerts_in_series(test_perf_signature)
|
||||
assert PerformanceAlert.objects.count() == 1
|
||||
assert PerformanceAlertSummary.objects.count() == 1
|
||||
_verify_alert(1, (INTERVAL/2)+1, (INTERVAL/2), test_perf_signature, 0.5,
|
||||
1.0, True, PerformanceAlert.UNTRIAGED,
|
||||
PerformanceAlertSummary.UNTRIAGED, None)
|
||||
_verify_alert(
|
||||
1,
|
||||
(INTERVAL / 2) + 1,
|
||||
(INTERVAL / 2),
|
||||
test_perf_signature,
|
||||
0.5,
|
||||
1.0,
|
||||
True,
|
||||
PerformanceAlert.UNTRIAGED,
|
||||
PerformanceAlertSummary.UNTRIAGED,
|
||||
None,
|
||||
)
|
||||
|
||||
# add data that should be enough to generate a new alert if we rerun
|
||||
_generate_performance_data(test_repository,
|
||||
test_perf_signature,
|
||||
test_issue_tracker,
|
||||
generic_reference_data,
|
||||
base_time, (INTERVAL+1), 2.0, INTERVAL)
|
||||
_generate_performance_data(
|
||||
test_repository,
|
||||
test_perf_signature,
|
||||
test_issue_tracker,
|
||||
generic_reference_data,
|
||||
base_time,
|
||||
(INTERVAL + 1),
|
||||
2.0,
|
||||
INTERVAL,
|
||||
)
|
||||
generate_new_alerts_in_series(test_perf_signature)
|
||||
|
||||
assert PerformanceAlert.objects.count() == 2
|
||||
assert PerformanceAlertSummary.objects.count() == 2
|
||||
_verify_alert(2, INTERVAL+1, INTERVAL, test_perf_signature, 1.0, 2.0,
|
||||
True, PerformanceAlert.UNTRIAGED,
|
||||
PerformanceAlertSummary.UNTRIAGED, None)
|
||||
_verify_alert(
|
||||
2,
|
||||
INTERVAL + 1,
|
||||
INTERVAL,
|
||||
test_perf_signature,
|
||||
1.0,
|
||||
2.0,
|
||||
True,
|
||||
PerformanceAlert.UNTRIAGED,
|
||||
PerformanceAlertSummary.UNTRIAGED,
|
||||
None,
|
||||
)
|
||||
|
||||
|
||||
def test_detect_alerts_in_series_with_retriggers(
|
||||
test_repository, test_issue_tracker,
|
||||
failure_classifications, generic_reference_data, test_perf_signature):
|
||||
test_repository,
|
||||
test_issue_tracker,
|
||||
failure_classifications,
|
||||
generic_reference_data,
|
||||
test_perf_signature,
|
||||
):
|
||||
|
||||
# sometimes we detect an alert in the middle of a series
|
||||
# where there are retriggers, make sure we handle this case
|
||||
|
@ -116,45 +183,83 @@ def test_detect_alerts_in_series_with_retriggers(
|
|||
# mix)
|
||||
base_time = time.time() # generate it based off current time
|
||||
for i in range(20):
|
||||
_generate_performance_data(test_repository,
|
||||
test_perf_signature,
|
||||
test_issue_tracker,
|
||||
generic_reference_data,
|
||||
base_time, 1, 0.5, 1)
|
||||
_generate_performance_data(
|
||||
test_repository,
|
||||
test_perf_signature,
|
||||
test_issue_tracker,
|
||||
generic_reference_data,
|
||||
base_time,
|
||||
1,
|
||||
0.5,
|
||||
1,
|
||||
)
|
||||
for i in range(5):
|
||||
_generate_performance_data(test_repository,
|
||||
test_perf_signature,
|
||||
test_issue_tracker,
|
||||
generic_reference_data,
|
||||
base_time, 2, 0.5, 1)
|
||||
_generate_performance_data(
|
||||
test_repository,
|
||||
test_perf_signature,
|
||||
test_issue_tracker,
|
||||
generic_reference_data,
|
||||
base_time,
|
||||
2,
|
||||
0.5,
|
||||
1,
|
||||
)
|
||||
for i in range(15):
|
||||
_generate_performance_data(test_repository,
|
||||
test_perf_signature,
|
||||
test_issue_tracker,
|
||||
generic_reference_data,
|
||||
base_time, 2, 1.0, 1)
|
||||
_generate_performance_data(
|
||||
test_repository,
|
||||
test_perf_signature,
|
||||
test_issue_tracker,
|
||||
generic_reference_data,
|
||||
base_time,
|
||||
2,
|
||||
1.0,
|
||||
1,
|
||||
)
|
||||
|
||||
generate_new_alerts_in_series(test_perf_signature)
|
||||
_verify_alert(1, 2, 1, test_perf_signature, 0.5, 0.875, True,
|
||||
PerformanceAlert.UNTRIAGED,
|
||||
PerformanceAlertSummary.UNTRIAGED, None)
|
||||
_verify_alert(
|
||||
1,
|
||||
2,
|
||||
1,
|
||||
test_perf_signature,
|
||||
0.5,
|
||||
0.875,
|
||||
True,
|
||||
PerformanceAlert.UNTRIAGED,
|
||||
PerformanceAlertSummary.UNTRIAGED,
|
||||
None,
|
||||
)
|
||||
|
||||
|
||||
def test_no_alerts_with_old_data(
|
||||
test_repository, test_issue_tracker,
|
||||
failure_classifications, generic_reference_data, test_perf_signature):
|
||||
test_repository,
|
||||
test_issue_tracker,
|
||||
failure_classifications,
|
||||
generic_reference_data,
|
||||
test_perf_signature,
|
||||
):
|
||||
base_time = 0 # 1970, too old!
|
||||
INTERVAL = 30
|
||||
_generate_performance_data(test_repository,
|
||||
test_perf_signature,
|
||||
test_issue_tracker,
|
||||
generic_reference_data,
|
||||
base_time, 1, 0.5, int(INTERVAL / 2))
|
||||
_generate_performance_data(test_repository,
|
||||
test_perf_signature,
|
||||
test_issue_tracker,
|
||||
generic_reference_data,
|
||||
base_time, int(INTERVAL / 2) + 1, 1.0, int(INTERVAL / 2))
|
||||
_generate_performance_data(
|
||||
test_repository,
|
||||
test_perf_signature,
|
||||
test_issue_tracker,
|
||||
generic_reference_data,
|
||||
base_time,
|
||||
1,
|
||||
0.5,
|
||||
int(INTERVAL / 2),
|
||||
)
|
||||
_generate_performance_data(
|
||||
test_repository,
|
||||
test_perf_signature,
|
||||
test_issue_tracker,
|
||||
generic_reference_data,
|
||||
base_time,
|
||||
int(INTERVAL / 2) + 1,
|
||||
1.0,
|
||||
int(INTERVAL / 2),
|
||||
)
|
||||
|
||||
generate_new_alerts_in_series(test_perf_signature)
|
||||
|
||||
|
@ -163,8 +268,12 @@ def test_no_alerts_with_old_data(
|
|||
|
||||
|
||||
def test_custom_alert_threshold(
|
||||
test_repository, test_issue_tracker,
|
||||
failure_classifications, generic_reference_data, test_perf_signature):
|
||||
test_repository,
|
||||
test_issue_tracker,
|
||||
failure_classifications,
|
||||
generic_reference_data,
|
||||
test_perf_signature,
|
||||
):
|
||||
|
||||
test_perf_signature.alert_threshold = 200.0
|
||||
test_perf_signature.save()
|
||||
|
@ -174,21 +283,36 @@ def test_custom_alert_threshold(
|
|||
# of 200% that should only generate 1
|
||||
INTERVAL = 60
|
||||
base_time = time.time()
|
||||
_generate_performance_data(test_repository,
|
||||
test_perf_signature,
|
||||
test_issue_tracker,
|
||||
generic_reference_data,
|
||||
base_time, 1, 0.5, int(INTERVAL / 3))
|
||||
_generate_performance_data(test_repository,
|
||||
test_perf_signature,
|
||||
test_issue_tracker,
|
||||
generic_reference_data,
|
||||
base_time, int(INTERVAL / 3) + 1, 0.6, int(INTERVAL / 3))
|
||||
_generate_performance_data(test_repository,
|
||||
test_perf_signature,
|
||||
test_issue_tracker,
|
||||
generic_reference_data,
|
||||
base_time, 2 * int(INTERVAL / 3) + 1, 2.0, int(INTERVAL / 3))
|
||||
_generate_performance_data(
|
||||
test_repository,
|
||||
test_perf_signature,
|
||||
test_issue_tracker,
|
||||
generic_reference_data,
|
||||
base_time,
|
||||
1,
|
||||
0.5,
|
||||
int(INTERVAL / 3),
|
||||
)
|
||||
_generate_performance_data(
|
||||
test_repository,
|
||||
test_perf_signature,
|
||||
test_issue_tracker,
|
||||
generic_reference_data,
|
||||
base_time,
|
||||
int(INTERVAL / 3) + 1,
|
||||
0.6,
|
||||
int(INTERVAL / 3),
|
||||
)
|
||||
_generate_performance_data(
|
||||
test_repository,
|
||||
test_perf_signature,
|
||||
test_issue_tracker,
|
||||
generic_reference_data,
|
||||
base_time,
|
||||
2 * int(INTERVAL / 3) + 1,
|
||||
2.0,
|
||||
int(INTERVAL / 3),
|
||||
)
|
||||
|
||||
generate_new_alerts_in_series(test_perf_signature)
|
||||
|
||||
|
@ -196,14 +320,16 @@ def test_custom_alert_threshold(
|
|||
assert PerformanceAlertSummary.objects.count() == 1
|
||||
|
||||
|
||||
@pytest.mark.parametrize(('new_value', 'expected_num_alerts'),
|
||||
[(1.0, 1), (0.25, 0)])
|
||||
def test_alert_change_type_absolute(test_repository,
|
||||
test_issue_tracker,
|
||||
failure_classifications,
|
||||
generic_reference_data,
|
||||
test_perf_signature, new_value,
|
||||
expected_num_alerts):
|
||||
@pytest.mark.parametrize(('new_value', 'expected_num_alerts'), [(1.0, 1), (0.25, 0)])
|
||||
def test_alert_change_type_absolute(
|
||||
test_repository,
|
||||
test_issue_tracker,
|
||||
failure_classifications,
|
||||
generic_reference_data,
|
||||
test_perf_signature,
|
||||
new_value,
|
||||
expected_num_alerts,
|
||||
):
|
||||
# modify the test signature to say that we alert on absolute value
|
||||
# (as opposed to percentage change)
|
||||
test_perf_signature.alert_change_type = PerformanceSignature.ALERT_ABS
|
||||
|
@ -212,13 +338,26 @@ def test_alert_change_type_absolute(test_repository,
|
|||
|
||||
base_time = time.time() # generate it based off current time
|
||||
INTERVAL = 30
|
||||
_generate_performance_data(test_repository, test_perf_signature,
|
||||
test_issue_tracker, generic_reference_data,
|
||||
base_time, 1, 0.5, int(INTERVAL / 2))
|
||||
_generate_performance_data(test_repository, test_perf_signature,
|
||||
test_issue_tracker, generic_reference_data,
|
||||
base_time, int(INTERVAL / 2) + 1, new_value,
|
||||
int(INTERVAL / 2))
|
||||
_generate_performance_data(
|
||||
test_repository,
|
||||
test_perf_signature,
|
||||
test_issue_tracker,
|
||||
generic_reference_data,
|
||||
base_time,
|
||||
1,
|
||||
0.5,
|
||||
int(INTERVAL / 2),
|
||||
)
|
||||
_generate_performance_data(
|
||||
test_repository,
|
||||
test_perf_signature,
|
||||
test_issue_tracker,
|
||||
generic_reference_data,
|
||||
base_time,
|
||||
int(INTERVAL / 2) + 1,
|
||||
new_value,
|
||||
int(INTERVAL / 2),
|
||||
)
|
||||
|
||||
generate_new_alerts_in_series(test_perf_signature)
|
||||
|
||||
|
|
|
@ -32,7 +32,8 @@ def gapped_performance_data(test_perf_signature, eleven_jobs_stored, test_perf_a
|
|||
push_ids_to_keep=[1, 2, 4, 7, 9],
|
||||
highlighted_push_id=4,
|
||||
perf_alert=test_perf_alert,
|
||||
perf_signature=test_perf_signature)
|
||||
perf_signature=test_perf_signature,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -56,11 +57,14 @@ def single_performance_datum(test_perf_signature, eleven_jobs_stored, test_perf_
|
|||
push_ids_to_keep=[4],
|
||||
highlighted_push_id=4,
|
||||
perf_alert=test_perf_alert,
|
||||
perf_signature=test_perf_signature)
|
||||
perf_signature=test_perf_signature,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def retriggerable_and_nonretriggerable_performance_data(test_perf_signature, eleven_jobs_stored, test_perf_alert):
|
||||
def retriggerable_and_nonretriggerable_performance_data(
|
||||
test_perf_signature, eleven_jobs_stored, test_perf_alert
|
||||
):
|
||||
"""
|
||||
Graph view looks like:
|
||||
|
||||
|
@ -78,10 +82,14 @@ def retriggerable_and_nonretriggerable_performance_data(test_perf_signature, ele
|
|||
out_of_retrigger_range = datetime.datetime(year=2014, month=1, day=1)
|
||||
|
||||
prepare_graph_data_scenario(
|
||||
push_ids_to_keep=[4, NON_RETRIGGERABLE_JOB_ID], # generally, fixture job ids == parent push id
|
||||
push_ids_to_keep=[
|
||||
4,
|
||||
NON_RETRIGGERABLE_JOB_ID,
|
||||
], # generally, fixture job ids == parent push id
|
||||
highlighted_push_id=4,
|
||||
perf_alert=test_perf_alert,
|
||||
perf_signature=test_perf_signature)
|
||||
perf_signature=test_perf_signature,
|
||||
)
|
||||
|
||||
# make 2nd data point recent enough so it
|
||||
# won't get selected for retriggering
|
||||
|
@ -117,7 +125,7 @@ def prepare_graph_data_scenario(push_ids_to_keep, highlighted_push_id, perf_aler
|
|||
job=job,
|
||||
push=job.push,
|
||||
repository=job.repository,
|
||||
signature=perf_signature
|
||||
signature=perf_signature,
|
||||
)
|
||||
perf_datum.push.time = job.push.time
|
||||
perf_datum.push.save()
|
||||
|
@ -166,7 +174,9 @@ def test_identify_retriggerables_as_unit():
|
|||
|
||||
# Component tests
|
||||
def test_identify_retriggerables_selects_all_data_points(gapped_performance_data, test_perf_alert):
|
||||
identify_retriggerables = IdentifyAlertRetriggerables(max_data_points=5, time_interval=ONE_DAY_INTERVAL)
|
||||
identify_retriggerables = IdentifyAlertRetriggerables(
|
||||
max_data_points=5, time_interval=ONE_DAY_INTERVAL
|
||||
)
|
||||
data_points_to_retrigger = identify_retriggerables(test_perf_alert)
|
||||
|
||||
assert len(data_points_to_retrigger) == 5
|
||||
|
@ -181,8 +191,12 @@ def test_identify_retriggerables_selects_all_data_points(gapped_performance_data
|
|||
assert max_push_timestamp <= datetime.datetime(year=2013, month=11, day=14)
|
||||
|
||||
|
||||
def test_identify_retriggerables_selects_even_single_data_point(single_performance_datum, test_perf_alert):
|
||||
identify_retriggerables = IdentifyAlertRetriggerables(max_data_points=5, time_interval=ONE_DAY_INTERVAL)
|
||||
def test_identify_retriggerables_selects_even_single_data_point(
|
||||
single_performance_datum, test_perf_alert
|
||||
):
|
||||
identify_retriggerables = IdentifyAlertRetriggerables(
|
||||
max_data_points=5, time_interval=ONE_DAY_INTERVAL
|
||||
)
|
||||
data_points_to_retrigger = identify_retriggerables(test_perf_alert)
|
||||
|
||||
assert len(data_points_to_retrigger) == 1
|
||||
|
@ -190,8 +204,11 @@ def test_identify_retriggerables_selects_even_single_data_point(single_performan
|
|||
|
||||
|
||||
def test_identify_retriggerables_doesnt_select_out_of_range_data_points(
|
||||
retriggerable_and_nonretriggerable_performance_data, test_perf_alert):
|
||||
identify_retriggerables = IdentifyAlertRetriggerables(max_data_points=5, time_interval=ONE_DAY_INTERVAL)
|
||||
retriggerable_and_nonretriggerable_performance_data, test_perf_alert
|
||||
):
|
||||
identify_retriggerables = IdentifyAlertRetriggerables(
|
||||
max_data_points=5, time_interval=ONE_DAY_INTERVAL
|
||||
)
|
||||
data_points_to_retrigger = identify_retriggerables(test_perf_alert)
|
||||
|
||||
job_ids_to_retrigger = set(map(get_key("job_id"), data_points_to_retrigger))
|
||||
|
|
|
@ -1,39 +1,32 @@
|
|||
from datetime import (datetime,
|
||||
timedelta)
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
import pytest
|
||||
import simplejson as json
|
||||
from mock import (Mock,
|
||||
patch)
|
||||
from mock import Mock, patch
|
||||
|
||||
from treeherder.perf.models import (BackfillRecord,
|
||||
BackfillReport,
|
||||
PerformanceSettings)
|
||||
from treeherder.perf.secretary_tool import (SecretaryTool,
|
||||
default_serializer)
|
||||
from treeherder.perf.models import BackfillRecord, BackfillReport, PerformanceSettings
|
||||
from treeherder.perf.secretary_tool import SecretaryTool, default_serializer
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def performance_settings(db):
|
||||
settings = {
|
||||
"limits": 500,
|
||||
"last_reset_date": datetime.utcnow(),
|
||||
}
|
||||
"limits": 500,
|
||||
"last_reset_date": datetime.utcnow(),
|
||||
}
|
||||
return PerformanceSettings.objects.create(
|
||||
name="perf_sheriff_bot",
|
||||
settings=json.dumps(settings, default=default_serializer),
|
||||
name="perf_sheriff_bot", settings=json.dumps(settings, default=default_serializer),
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def expired_performance_settings(db):
|
||||
settings = {
|
||||
"limits": 500,
|
||||
"last_reset_date": datetime.utcnow() - timedelta(days=30),
|
||||
}
|
||||
"limits": 500,
|
||||
"last_reset_date": datetime.utcnow() - timedelta(days=30),
|
||||
}
|
||||
return PerformanceSettings.objects.create(
|
||||
name="perf_sheriff_bot",
|
||||
settings=json.dumps(settings, default=default_serializer),
|
||||
name="perf_sheriff_bot", settings=json.dumps(settings, default=default_serializer),
|
||||
)
|
||||
|
||||
|
||||
|
@ -46,7 +39,9 @@ def create_record():
|
|||
return _create_record
|
||||
|
||||
|
||||
def test_secretary_tool_updates_only_matured_reports(test_perf_alert, test_perf_alert_2, create_record):
|
||||
def test_secretary_tool_updates_only_matured_reports(
|
||||
test_perf_alert, test_perf_alert_2, create_record
|
||||
):
|
||||
# create new report with records
|
||||
create_record(test_perf_alert)
|
||||
# create mature report with records
|
||||
|
|
|
@ -3,33 +3,37 @@ import os
|
|||
import pytest
|
||||
|
||||
from tests.sampledata import SampleData
|
||||
from treeherder.perfalert.perfalert import (RevisionDatum,
|
||||
analyze,
|
||||
calc_t,
|
||||
default_weights,
|
||||
detect_changes,
|
||||
linear_weights)
|
||||
from treeherder.perfalert.perfalert import (
|
||||
RevisionDatum,
|
||||
analyze,
|
||||
calc_t,
|
||||
default_weights,
|
||||
detect_changes,
|
||||
linear_weights,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(("revision_data", "weight_fn",
|
||||
"expected"), [
|
||||
# common-cases (one revision, one value)
|
||||
([], default_weights, {"avg": 0.0, "n": 0, "variance": 0.0}),
|
||||
([[3.0]], default_weights, {"avg": 3.0, "n": 1, "variance": 0.0}),
|
||||
([[1.0], [2.0], [3.0], [4.0]], default_weights, {"avg": 2.5, "n": 4,
|
||||
"variance": 5.0/3.0}),
|
||||
([[1.0], [2.0], [3.0], [4.0]], linear_weights, {"avg": 2.0,
|
||||
"n": 4,
|
||||
"variance": 2.0}),
|
||||
# trickier cases (multiple data per revision)
|
||||
([[1.0, 3.0], [4.0, 4.0]], default_weights, {"avg": 3.0, "n": 4,
|
||||
"variance": 2.0}),
|
||||
([[2.0, 3.0], [4.0, 4.0]], linear_weights, {"avg": 3.0, "n": 4,
|
||||
"variance": 1.0}),
|
||||
])
|
||||
@pytest.mark.parametrize(
|
||||
("revision_data", "weight_fn", "expected"),
|
||||
[
|
||||
# common-cases (one revision, one value)
|
||||
([], default_weights, {"avg": 0.0, "n": 0, "variance": 0.0}),
|
||||
([[3.0]], default_weights, {"avg": 3.0, "n": 1, "variance": 0.0}),
|
||||
(
|
||||
[[1.0], [2.0], [3.0], [4.0]],
|
||||
default_weights,
|
||||
{"avg": 2.5, "n": 4, "variance": 5.0 / 3.0},
|
||||
),
|
||||
([[1.0], [2.0], [3.0], [4.0]], linear_weights, {"avg": 2.0, "n": 4, "variance": 2.0}),
|
||||
# trickier cases (multiple data per revision)
|
||||
([[1.0, 3.0], [4.0, 4.0]], default_weights, {"avg": 3.0, "n": 4, "variance": 2.0}),
|
||||
([[2.0, 3.0], [4.0, 4.0]], linear_weights, {"avg": 3.0, "n": 4, "variance": 1.0}),
|
||||
],
|
||||
)
|
||||
def test_analyze_fn(revision_data, weight_fn, expected):
|
||||
data = [RevisionDatum(i, i, values) for (i, values) in zip(
|
||||
range(len(revision_data)), revision_data)]
|
||||
data = [
|
||||
RevisionDatum(i, i, values) for (i, values) in zip(range(len(revision_data)), revision_data)
|
||||
]
|
||||
assert analyze(data, weight_fn) == expected
|
||||
|
||||
|
||||
|
@ -38,27 +42,32 @@ def test_weights():
|
|||
assert [linear_weights(i, 5) for i in range(5)] == [1.0, 0.8, 0.6, 0.4, 0.2]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(("old_data", "new_data", "expected"), [
|
||||
([0.0, 0.0], [1.0, 2.0], 3.0),
|
||||
([0.0, 0.0], [0.0, 0.0], 0.0),
|
||||
([0.0, 0.0], [1.0, 1.0], float('inf'))
|
||||
])
|
||||
@pytest.mark.parametrize(
|
||||
("old_data", "new_data", "expected"),
|
||||
[
|
||||
([0.0, 0.0], [1.0, 2.0], 3.0),
|
||||
([0.0, 0.0], [0.0, 0.0], 0.0),
|
||||
([0.0, 0.0], [1.0, 1.0], float('inf')),
|
||||
],
|
||||
)
|
||||
def test_calc_t(old_data, new_data, expected):
|
||||
assert calc_t([RevisionDatum(0, 0, old_data)],
|
||||
[RevisionDatum(1, 1, new_data)]) == expected
|
||||
assert calc_t([RevisionDatum(0, 0, old_data)], [RevisionDatum(1, 1, new_data)]) == expected
|
||||
|
||||
|
||||
def test_detect_changes():
|
||||
data = []
|
||||
|
||||
times = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
|
||||
values = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1]
|
||||
values = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1]
|
||||
for (t, v) in zip(times, values):
|
||||
data.append(RevisionDatum(t, t, [float(v)]))
|
||||
|
||||
result = [(d.push_timestamp, d.change_detected) for d in
|
||||
detect_changes(data, min_back_window=5, max_back_window=5,
|
||||
fore_window=5, t_threshold=2)]
|
||||
result = [
|
||||
(d.push_timestamp, d.change_detected)
|
||||
for d in detect_changes(
|
||||
data, min_back_window=5, max_back_window=5, fore_window=5, t_threshold=2
|
||||
)
|
||||
]
|
||||
assert result == [
|
||||
(0, False),
|
||||
(1, False),
|
||||
|
@ -75,7 +84,7 @@ def test_detect_changes():
|
|||
(12, False),
|
||||
(13, False),
|
||||
(14, False),
|
||||
(15, False)
|
||||
(15, False),
|
||||
]
|
||||
|
||||
|
||||
|
@ -84,27 +93,33 @@ def test_detect_changes_few_revisions_many_values():
|
|||
Tests that we correctly detect a regression with
|
||||
a small number of revisions but a large number of values
|
||||
'''
|
||||
data = [RevisionDatum(0, 0, [0]*50+[1]*30),
|
||||
RevisionDatum(1, 1, [0]*10+[1]*30),
|
||||
RevisionDatum(1, 1, [0]*10+[1]*30)]
|
||||
result = [(d.push_timestamp, d.change_detected) for d in
|
||||
detect_changes(data, min_back_window=5, max_back_window=10,
|
||||
fore_window=5, t_threshold=2)]
|
||||
data = [
|
||||
RevisionDatum(0, 0, [0] * 50 + [1] * 30),
|
||||
RevisionDatum(1, 1, [0] * 10 + [1] * 30),
|
||||
RevisionDatum(1, 1, [0] * 10 + [1] * 30),
|
||||
]
|
||||
result = [
|
||||
(d.push_timestamp, d.change_detected)
|
||||
for d in detect_changes(
|
||||
data, min_back_window=5, max_back_window=10, fore_window=5, t_threshold=2
|
||||
)
|
||||
]
|
||||
|
||||
assert result == [(0, False),
|
||||
(1, True),
|
||||
(1, False)]
|
||||
assert result == [(0, False), (1, True), (1, False)]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(("filename", "expected_timestamps"), [
|
||||
('runs1.json', [1365019665]),
|
||||
('runs2.json', [1357704596, 1358971894, 1365014104]),
|
||||
('runs3.json', [1335293827, 1338839958]),
|
||||
('runs4.json', [1364922838]),
|
||||
('runs5.json', []),
|
||||
('a11y.json', [1366197637, 1367799757]),
|
||||
('tp5rss.json', [1372846906, 1373413365, 1373424974])
|
||||
])
|
||||
@pytest.mark.parametrize(
|
||||
("filename", "expected_timestamps"),
|
||||
[
|
||||
('runs1.json', [1365019665]),
|
||||
('runs2.json', [1357704596, 1358971894, 1365014104]),
|
||||
('runs3.json', [1335293827, 1338839958]),
|
||||
('runs4.json', [1364922838]),
|
||||
('runs5.json', []),
|
||||
('a11y.json', [1366197637, 1367799757]),
|
||||
('tp5rss.json', [1372846906, 1373413365, 1373424974]),
|
||||
],
|
||||
)
|
||||
def test_detect_changes_historical_data(filename, expected_timestamps):
|
||||
"""Parse JSON produced by http://graphs.mozilla.org/api/test/runs"""
|
||||
# Configuration for Analyzer
|
||||
|
@ -117,11 +132,12 @@ def test_detect_changes_historical_data(filename, expected_timestamps):
|
|||
runs = payload['test_runs']
|
||||
data = [RevisionDatum(r[2], r[2], [r[3]]) for r in runs]
|
||||
|
||||
results = detect_changes(data,
|
||||
min_back_window=MIN_BACK_WINDOW,
|
||||
max_back_window=MAX_BACK_WINDOW,
|
||||
fore_window=FORE_WINDOW,
|
||||
t_threshold=THRESHOLD)
|
||||
regression_timestamps = [d.push_timestamp for d in results if
|
||||
d.change_detected]
|
||||
results = detect_changes(
|
||||
data,
|
||||
min_back_window=MIN_BACK_WINDOW,
|
||||
max_back_window=MAX_BACK_WINDOW,
|
||||
fore_window=FORE_WINDOW,
|
||||
t_threshold=THRESHOLD,
|
||||
)
|
||||
regression_timestamps = [d.push_timestamp for d in results if d.change_detected]
|
||||
assert regression_timestamps == expected_timestamps
|
||||
|
|
|
@ -8,7 +8,8 @@ from treeherder.services.taskcluster import TaskclusterModel
|
|||
# BackfillTool
|
||||
def test_backfilling_job_from_try_repo_raises_exception(job_from_try):
|
||||
backfill_tool = BackfillTool(
|
||||
TaskclusterModel('https://fakerooturl.org', 'FAKE_CLIENT_ID', 'FAKE_ACCESS_TOKEN'))
|
||||
TaskclusterModel('https://fakerooturl.org', 'FAKE_CLIENT_ID', 'FAKE_ACCESS_TOKEN')
|
||||
)
|
||||
|
||||
with pytest.raises(CannotBackfill):
|
||||
backfill_tool.backfill_job(job_from_try.id)
|
||||
|
|
|
@ -6,13 +6,15 @@ from treeherder.model.models import Push
|
|||
from treeherder.push_health.builds import get_build_failures
|
||||
|
||||
|
||||
def test_get_build_failures_with_parent(failure_classifications, test_push, test_repository, sample_data, mock_log_parser):
|
||||
def test_get_build_failures_with_parent(
|
||||
failure_classifications, test_push, test_repository, sample_data, mock_log_parser
|
||||
):
|
||||
parent_revision = 'abcdef77949168d16c03a4cba167678b7ab65f76'
|
||||
parent_push = Push.objects.create(
|
||||
revision=parent_revision,
|
||||
repository=test_repository,
|
||||
author='foo@bar.baz',
|
||||
time=datetime.datetime.now()
|
||||
time=datetime.datetime.now(),
|
||||
)
|
||||
|
||||
jobs = sample_data.job_data[20:25]
|
||||
|
|
|
@ -20,14 +20,17 @@ def test_intermittent_win7_reftest():
|
|||
assert failures[0]['suggestedClassification'] == 'intermittent'
|
||||
|
||||
|
||||
@pytest.mark.parametrize(('history', 'confidence', 'classification', 'fcid'), [
|
||||
({'foo': {'bing': {'baz': 2}}}, 100, 'intermittent', 1),
|
||||
({'foo': {'bing': {'bee': 2}}}, 75, 'intermittent', 1),
|
||||
({'foo': {'bee': {'bee': 2}}}, 50, 'intermittent', 1),
|
||||
({'fee': {'bee': {'bee': 2}}}, 0, 'New Failure', 1),
|
||||
# no match, but job has been classified as intermittent by hand.
|
||||
({'fee': {'bee': {'bee': 2}}}, 100, 'intermittent', 4),
|
||||
])
|
||||
@pytest.mark.parametrize(
|
||||
('history', 'confidence', 'classification', 'fcid'),
|
||||
[
|
||||
({'foo': {'bing': {'baz': 2}}}, 100, 'intermittent', 1),
|
||||
({'foo': {'bing': {'bee': 2}}}, 75, 'intermittent', 1),
|
||||
({'foo': {'bee': {'bee': 2}}}, 50, 'intermittent', 1),
|
||||
({'fee': {'bee': {'bee': 2}}}, 0, 'New Failure', 1),
|
||||
# no match, but job has been classified as intermittent by hand.
|
||||
({'fee': {'bee': {'bee': 2}}}, 100, 'intermittent', 4),
|
||||
],
|
||||
)
|
||||
def test_intermittent_confidence(history, confidence, classification, fcid):
|
||||
"""test that a failed test is classified as intermittent, confidence 100"""
|
||||
failures = [
|
||||
|
|
|
@ -3,8 +3,7 @@ import datetime
|
|||
import responses
|
||||
|
||||
from treeherder.model.models import Push
|
||||
from treeherder.push_health.compare import (get_commit_history,
|
||||
get_response_object)
|
||||
from treeherder.push_health.compare import get_commit_history, get_response_object
|
||||
|
||||
|
||||
def test_get_response_object(test_push, test_repository):
|
||||
|
@ -23,32 +22,37 @@ def test_get_commit_history_automationrelevance(test_push, test_repository):
|
|||
revision=parent_revision,
|
||||
repository=test_repository,
|
||||
author='foo@bar.baz',
|
||||
time=datetime.datetime.now()
|
||||
time=datetime.datetime.now(),
|
||||
)
|
||||
|
||||
autorel_commits = {'changesets': [
|
||||
{
|
||||
'author': 'Cheech Marin <cheech.marin@gmail.com>', 'backsoutnodes': [],
|
||||
'desc': 'Bug 1612891 - Suppress parsing easing error in early returns of ConvertKeyframeSequence.\n\nWe add a stack based class and supress the exception of parsing easing\nin the destructor, to avoid hitting the potential assertions.\n\nDifferential Revision: https://phabricator.services.mozilla.com/D64268\nDifferential Diff: PHID-DIFF-c4e7dcfpalwiem7bxsnk',
|
||||
'node': '3ca259f9cbdea763e64f10e286e58b271d89ab9d',
|
||||
'parents': [parent_revision],
|
||||
},
|
||||
{
|
||||
'author': 'libmozevent <release-mgmt-analysis@mozilla.com>',
|
||||
'desc': 'try_task_config for https://phabricator.services.mozilla.com/D64268\nDifferential Diff: PHID-DIFF-c4e7dcfpalwiem7bxsnk',
|
||||
'node': '18f68eb12ebbd88fe3a4fc3afe7df6529a0153fb',
|
||||
'parents': ['3ca259f9cbdea763e64f10e286e58b271d89ab9d'],
|
||||
}
|
||||
], 'visible': True}
|
||||
autorel_commits = {
|
||||
'changesets': [
|
||||
{
|
||||
'author': 'Cheech Marin <cheech.marin@gmail.com>',
|
||||
'backsoutnodes': [],
|
||||
'desc': 'Bug 1612891 - Suppress parsing easing error in early returns of ConvertKeyframeSequence.\n\nWe add a stack based class and supress the exception of parsing easing\nin the destructor, to avoid hitting the potential assertions.\n\nDifferential Revision: https://phabricator.services.mozilla.com/D64268\nDifferential Diff: PHID-DIFF-c4e7dcfpalwiem7bxsnk',
|
||||
'node': '3ca259f9cbdea763e64f10e286e58b271d89ab9d',
|
||||
'parents': [parent_revision],
|
||||
},
|
||||
{
|
||||
'author': 'libmozevent <release-mgmt-analysis@mozilla.com>',
|
||||
'desc': 'try_task_config for https://phabricator.services.mozilla.com/D64268\nDifferential Diff: PHID-DIFF-c4e7dcfpalwiem7bxsnk',
|
||||
'node': '18f68eb12ebbd88fe3a4fc3afe7df6529a0153fb',
|
||||
'parents': ['3ca259f9cbdea763e64f10e286e58b271d89ab9d'],
|
||||
},
|
||||
],
|
||||
'visible': True,
|
||||
}
|
||||
|
||||
autorel_url = 'https://hg.mozilla.org/{}/json-automationrelevance/{}'.format(
|
||||
test_repository.name, test_revision)
|
||||
test_repository.name, test_revision
|
||||
)
|
||||
responses.add(
|
||||
responses.GET,
|
||||
autorel_url,
|
||||
json=autorel_commits,
|
||||
content_type='application/json',
|
||||
status=200
|
||||
status=200,
|
||||
)
|
||||
|
||||
history = get_commit_history(test_repository, test_revision, test_push)
|
||||
|
@ -58,37 +62,38 @@ def test_get_commit_history_automationrelevance(test_push, test_repository):
|
|||
|
||||
|
||||
@responses.activate
|
||||
def test_get_commit_history_parent_different_repo(
|
||||
test_push,
|
||||
test_repository,
|
||||
test_repository_2
|
||||
):
|
||||
def test_get_commit_history_parent_different_repo(test_push, test_repository, test_repository_2):
|
||||
test_revision = '4c45a777949168d16c03a4cba167678b7ab65f76'
|
||||
parent_revision = 'abcdef77949168d16c03a4cba167678b7ab65f76'
|
||||
Push.objects.create(
|
||||
revision=parent_revision,
|
||||
repository=test_repository_2,
|
||||
author='foo@bar.baz',
|
||||
time=datetime.datetime.now()
|
||||
time=datetime.datetime.now(),
|
||||
)
|
||||
|
||||
autorel_commits = {'changesets': [
|
||||
{
|
||||
'author': 'Cheech Marin <cheech.marin@gmail.com>', 'backsoutnodes': [],
|
||||
'desc': 'Bug 1612891 - Suppress parsing easing error in early returns of ConvertKeyframeSequence.\n\nWe add a stack based class and supress the exception of parsing easing\nin the destructor, to avoid hitting the potential assertions.\n\nDifferential Revision: https://phabricator.services.mozilla.com/D64268\nDifferential Diff: PHID-DIFF-c4e7dcfpalwiem7bxsnk',
|
||||
'node': '3ca259f9cbdea763e64f10e286e58b271d89ab9d',
|
||||
'parents': [parent_revision],
|
||||
},
|
||||
], 'visible': True}
|
||||
autorel_commits = {
|
||||
'changesets': [
|
||||
{
|
||||
'author': 'Cheech Marin <cheech.marin@gmail.com>',
|
||||
'backsoutnodes': [],
|
||||
'desc': 'Bug 1612891 - Suppress parsing easing error in early returns of ConvertKeyframeSequence.\n\nWe add a stack based class and supress the exception of parsing easing\nin the destructor, to avoid hitting the potential assertions.\n\nDifferential Revision: https://phabricator.services.mozilla.com/D64268\nDifferential Diff: PHID-DIFF-c4e7dcfpalwiem7bxsnk',
|
||||
'node': '3ca259f9cbdea763e64f10e286e58b271d89ab9d',
|
||||
'parents': [parent_revision],
|
||||
},
|
||||
],
|
||||
'visible': True,
|
||||
}
|
||||
|
||||
autorel_url = 'https://hg.mozilla.org/{}/json-automationrelevance/{}'.format(
|
||||
test_repository.name, test_revision)
|
||||
test_repository.name, test_revision
|
||||
)
|
||||
responses.add(
|
||||
responses.GET,
|
||||
autorel_url,
|
||||
json=autorel_commits,
|
||||
content_type='application/json',
|
||||
status=200
|
||||
status=200,
|
||||
)
|
||||
|
||||
history = get_commit_history(test_repository, test_revision, test_push)
|
||||
|
@ -105,37 +110,37 @@ def test_get_commit_history_json_pushes(test_push, test_repository):
|
|||
revision=parent_revision,
|
||||
repository=test_repository,
|
||||
author='foo@bar.baz',
|
||||
time=datetime.datetime.now()
|
||||
time=datetime.datetime.now(),
|
||||
)
|
||||
|
||||
autorel_url = 'https://hg.mozilla.org/{}/json-automationrelevance/{}'.format(
|
||||
test_repository.name, test_revision)
|
||||
responses.add(
|
||||
responses.GET,
|
||||
autorel_url,
|
||||
json={},
|
||||
content_type='application/json',
|
||||
status=500
|
||||
test_repository.name, test_revision
|
||||
)
|
||||
responses.add(responses.GET, autorel_url, json={}, content_type='application/json', status=500)
|
||||
|
||||
jsonpushes_commits = {
|
||||
'pushes': {'108872': {'changesets': [
|
||||
{
|
||||
'author': 'Hiro Protagonist <hprotagonist@gmail.com>',
|
||||
'desc': 'Bug 1617666 - Use a separate Debugger to improve performance of eval.',
|
||||
'node': '4fb5e268cf7440332e917e431f14e8bb6dc41a0d',
|
||||
'parents': [parent_revision],
|
||||
'pushes': {
|
||||
'108872': {
|
||||
'changesets': [
|
||||
{
|
||||
'author': 'Hiro Protagonist <hprotagonist@gmail.com>',
|
||||
'desc': 'Bug 1617666 - Use a separate Debugger to improve performance of eval.',
|
||||
'node': '4fb5e268cf7440332e917e431f14e8bb6dc41a0d',
|
||||
'parents': [parent_revision],
|
||||
}
|
||||
]
|
||||
}
|
||||
]}}
|
||||
}
|
||||
}
|
||||
commits_url = '{}/json-pushes?version=2&full=1&changeset={}'.format(
|
||||
test_repository.url, test_revision)
|
||||
test_repository.url, test_revision
|
||||
)
|
||||
responses.add(
|
||||
responses.GET,
|
||||
commits_url,
|
||||
json=jsonpushes_commits,
|
||||
content_type='application/json',
|
||||
status=200
|
||||
status=200,
|
||||
)
|
||||
|
||||
history = get_commit_history(test_repository, test_revision, test_push)
|
||||
|
@ -150,17 +155,27 @@ def test_get_commit_history_not_found(test_push, test_repository):
|
|||
# Does not exist as a Push in the DB.
|
||||
parent_revision = 'abcdef77949168d16c03a4cba167678b7ab65f76'
|
||||
commits_url = '{}/json-pushes?version=2&full=1&changeset={}'.format(
|
||||
test_repository.url, test_revision)
|
||||
commits = {'pushes': {1: {'changesets': [
|
||||
{
|
||||
'author': 'Boris Chiou <boris.chiou@gmail.com>', 'backsoutnodes': [],
|
||||
'desc': 'Bug 1612891 - Suppress parsing easing error in early returns of ConvertKeyframeSequence.\n\nWe add a stack based class and supress the exception of parsing easing\nin the destructor, to avoid hitting the potential assertions.\n\nDifferential Revision: https://phabricator.services.mozilla.com/D64268\nDifferential Diff: PHID-DIFF-c4e7dcfpalwiem7bxsnk',
|
||||
'node': '3ca259f9cbdea763e64f10e286e58b271d89ab9d',
|
||||
'parents': [parent_revision],
|
||||
},
|
||||
]}}}
|
||||
test_repository.url, test_revision
|
||||
)
|
||||
commits = {
|
||||
'pushes': {
|
||||
1: {
|
||||
'changesets': [
|
||||
{
|
||||
'author': 'Boris Chiou <boris.chiou@gmail.com>',
|
||||
'backsoutnodes': [],
|
||||
'desc': 'Bug 1612891 - Suppress parsing easing error in early returns of ConvertKeyframeSequence.\n\nWe add a stack based class and supress the exception of parsing easing\nin the destructor, to avoid hitting the potential assertions.\n\nDifferential Revision: https://phabricator.services.mozilla.com/D64268\nDifferential Diff: PHID-DIFF-c4e7dcfpalwiem7bxsnk',
|
||||
'node': '3ca259f9cbdea763e64f10e286e58b271d89ab9d',
|
||||
'parents': [parent_revision],
|
||||
},
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
responses.add(responses.GET, commits_url, json=commits, content_type='application/json', status=200)
|
||||
responses.add(
|
||||
responses.GET, commits_url, json=commits, content_type='application/json', status=200
|
||||
)
|
||||
|
||||
parent = get_commit_history(test_repository, test_revision, test_push)
|
||||
assert parent['parentSha'] == parent_revision
|
||||
|
|
|
@ -6,24 +6,28 @@ from treeherder.model.models import Push
|
|||
from treeherder.push_health.linting import get_lint_failures
|
||||
|
||||
|
||||
def test_get_linting_failures_with_parent(failure_classifications, test_push, test_repository, sample_data, mock_log_parser):
|
||||
def test_get_linting_failures_with_parent(
|
||||
failure_classifications, test_push, test_repository, sample_data, mock_log_parser
|
||||
):
|
||||
parent_revision = 'abcdef77949168d16c03a4cba167678b7ab65f76'
|
||||
parent_push = Push.objects.create(
|
||||
revision=parent_revision,
|
||||
repository=test_repository,
|
||||
author='foo@bar.baz',
|
||||
time=datetime.datetime.now()
|
||||
time=datetime.datetime.now(),
|
||||
)
|
||||
|
||||
jobs = sample_data.job_data[20:22]
|
||||
|
||||
for blob in jobs:
|
||||
blob['revision'] = test_push.revision
|
||||
blob['job'].update({
|
||||
'result': 'testfailed',
|
||||
'taskcluster_task_id': 'V3SVuxO8TFy37En_6HcXLs',
|
||||
'taskcluster_retry_id': '0'
|
||||
})
|
||||
blob['job'].update(
|
||||
{
|
||||
'result': 'testfailed',
|
||||
'taskcluster_task_id': 'V3SVuxO8TFy37En_6HcXLs',
|
||||
'taskcluster_retry_id': '0',
|
||||
}
|
||||
)
|
||||
blob['job']['machine_platform']['platform'] = 'lint'
|
||||
store_job_data(test_repository, jobs)
|
||||
|
||||
|
|
|
@ -2,16 +2,9 @@ import datetime
|
|||
|
||||
import pytest
|
||||
|
||||
from tests.autoclassify.utils import (create_lines,
|
||||
test_line)
|
||||
from treeherder.model.models import (FailureLine,
|
||||
Job,
|
||||
Push,
|
||||
Repository,
|
||||
TaskclusterMetadata)
|
||||
from treeherder.push_health.tests import (get_test_failures,
|
||||
has_job,
|
||||
has_line)
|
||||
from tests.autoclassify.utils import create_lines, test_line
|
||||
from treeherder.model.models import FailureLine, Job, Push, Repository, TaskclusterMetadata
|
||||
from treeherder.push_health.tests import get_test_failures, has_job, has_line
|
||||
|
||||
|
||||
@pytest.mark.parametrize(('find_it',), [(True,), (False,)])
|
||||
|
@ -44,10 +37,9 @@ def test_has_line(find_it):
|
|||
assert not has_line(line, line_list)
|
||||
|
||||
|
||||
def test_get_test_failures_no_parent(failure_classifications,
|
||||
test_repository,
|
||||
test_job,
|
||||
text_log_error_lines):
|
||||
def test_get_test_failures_no_parent(
|
||||
failure_classifications, test_repository, test_job, text_log_error_lines
|
||||
):
|
||||
test_job.result = 'testfailed'
|
||||
test_job.save()
|
||||
print(test_job.taskcluster_metadata.task_id)
|
||||
|
@ -61,11 +53,9 @@ def test_get_test_failures_no_parent(failure_classifications,
|
|||
assert not need_investigation[0]['failedInParent']
|
||||
|
||||
|
||||
def test_get_test_failures_with_parent(failure_classifications,
|
||||
test_repository,
|
||||
test_job,
|
||||
mock_log_parser,
|
||||
text_log_error_lines):
|
||||
def test_get_test_failures_with_parent(
|
||||
failure_classifications, test_repository, test_job, mock_log_parser, text_log_error_lines
|
||||
):
|
||||
test_job.result = 'testfailed'
|
||||
test_job.save()
|
||||
|
||||
|
@ -73,18 +63,14 @@ def test_get_test_failures_with_parent(failure_classifications,
|
|||
revision='abcdef77949168d16c03a4cba167678b7ab65f76',
|
||||
repository=test_repository,
|
||||
author='foo@bar.baz',
|
||||
time=datetime.datetime.now()
|
||||
time=datetime.datetime.now(),
|
||||
)
|
||||
parent_job = Job.objects.first()
|
||||
parent_job.pk = None
|
||||
parent_job.push = parent_push
|
||||
parent_job.guid = 'wazzon chokey?'
|
||||
parent_job.save()
|
||||
TaskclusterMetadata.objects.create(
|
||||
job=parent_job,
|
||||
task_id='V3SVuxO8TFy37En_6HcXLs',
|
||||
retry_id=0
|
||||
)
|
||||
TaskclusterMetadata.objects.create(job=parent_job, task_id='V3SVuxO8TFy37En_6HcXLs', retry_id=0)
|
||||
|
||||
create_lines(parent_job, [(test_line, {})])
|
||||
|
||||
|
|
|
@ -7,9 +7,7 @@ import responses
|
|||
|
||||
from treeherder.config import settings
|
||||
from treeherder.model.models import Push
|
||||
from treeherder.push_health.usage import (get_latest,
|
||||
get_peak,
|
||||
get_usage)
|
||||
from treeherder.push_health.usage import get_latest, get_peak, get_usage
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -35,15 +33,18 @@ def test_latest(push_usage):
|
|||
@responses.activate
|
||||
def test_get_usage(push_usage, test_repository):
|
||||
nrql = "SELECT%20max(needInvestigation)%20FROM%20push_health_need_investigation%20FACET%20revision%20SINCE%201%20DAY%20AGO%20TIMESERIES%20where%20repo%3D'{}'%20AND%20appName%3D'{}'".format(
|
||||
'try', 'treeherder-prod')
|
||||
'try', 'treeherder-prod'
|
||||
)
|
||||
new_relic_url = '{}?nrql={}'.format(settings.NEW_RELIC_INSIGHTS_API_URL, nrql)
|
||||
|
||||
responses.add(
|
||||
responses.GET,
|
||||
new_relic_url,
|
||||
body=json.dumps(push_usage),
|
||||
status=200, content_type='application/json',
|
||||
match_querystring=True)
|
||||
status=200,
|
||||
content_type='application/json',
|
||||
match_querystring=True,
|
||||
)
|
||||
|
||||
# create the Pushes that match the usage response
|
||||
for rev in [
|
||||
|
@ -56,7 +57,7 @@ def test_get_usage(push_usage, test_repository):
|
|||
revision=rev,
|
||||
repository=test_repository,
|
||||
author='phydeaux@dog.org',
|
||||
time=datetime.datetime.now()
|
||||
time=datetime.datetime.now(),
|
||||
)
|
||||
|
||||
usage = get_usage()
|
||||
|
|
|
@ -1,61 +1,93 @@
|
|||
import pytest
|
||||
|
||||
from treeherder.push_health.utils import (clean_config,
|
||||
clean_platform,
|
||||
clean_test,
|
||||
is_valid_failure_line)
|
||||
from treeherder.push_health.utils import (
|
||||
clean_config,
|
||||
clean_platform,
|
||||
clean_test,
|
||||
is_valid_failure_line,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(('action', 'test', 'signature', 'message', 'expected'), [
|
||||
('test_result', 'dis/dat/da/odder/ting', 'sig', 'mess', 'dis/dat/da/odder/ting'),
|
||||
('crash', 'dis/dat/da/odder/ting', 'sig', 'mess', 'sig'),
|
||||
('log', 'dis/dat/da/odder/ting', 'sig', 'mess', 'mess'),
|
||||
('meh', 'dis/dat/da/odder/ting', 'sig', 'mess', 'Non-Test Error'),
|
||||
('test_result', 'pid:dis/dat/da/odder/ting', 'sig', 'mess', None),
|
||||
('test_result', 'tests/layout/this == tests/layout/that', 'sig', 'mess', 'layout/this == layout/that'),
|
||||
('test_result', 'tests/layout/this != tests/layout/that', 'sig', 'mess', 'layout/this != layout/that'),
|
||||
('test_result', 'build/tests/reftest/tests/this != build/tests/reftest/tests/that', 'sig', 'mess', 'this != that'),
|
||||
('test_result', 'http://10.0.5.5/tests/this != http://10.0.5.5/tests/that', 'sig', 'mess', 'this != that'),
|
||||
('test_result', 'build/tests/reftest/tests/this', 'sig', 'mess', 'this'),
|
||||
('test_result', 'test=jsreftest.html', 'sig', 'mess', 'jsreftest.html'),
|
||||
('test_result', 'http://10.0.5.5/tests/this/thing', 'sig', 'mess', 'this/thing'),
|
||||
('test_result', 'http://localhost:5000/tests/this/thing', 'sig', 'mess', 'thing'),
|
||||
('test_result', 'thing is done (finished)', 'sig', 'mess', 'thing is done'),
|
||||
('test_result', 'Last test finished', 'sig', 'mess', None),
|
||||
('test_result', '(SimpleTest/TestRunner.js)', 'sig', 'mess', None),
|
||||
('test_result', '/this\\thing\\there', 'sig', 'mess', 'this/thing/there'),
|
||||
])
|
||||
@pytest.mark.parametrize(
|
||||
('action', 'test', 'signature', 'message', 'expected'),
|
||||
[
|
||||
('test_result', 'dis/dat/da/odder/ting', 'sig', 'mess', 'dis/dat/da/odder/ting'),
|
||||
('crash', 'dis/dat/da/odder/ting', 'sig', 'mess', 'sig'),
|
||||
('log', 'dis/dat/da/odder/ting', 'sig', 'mess', 'mess'),
|
||||
('meh', 'dis/dat/da/odder/ting', 'sig', 'mess', 'Non-Test Error'),
|
||||
('test_result', 'pid:dis/dat/da/odder/ting', 'sig', 'mess', None),
|
||||
(
|
||||
'test_result',
|
||||
'tests/layout/this == tests/layout/that',
|
||||
'sig',
|
||||
'mess',
|
||||
'layout/this == layout/that',
|
||||
),
|
||||
(
|
||||
'test_result',
|
||||
'tests/layout/this != tests/layout/that',
|
||||
'sig',
|
||||
'mess',
|
||||
'layout/this != layout/that',
|
||||
),
|
||||
(
|
||||
'test_result',
|
||||
'build/tests/reftest/tests/this != build/tests/reftest/tests/that',
|
||||
'sig',
|
||||
'mess',
|
||||
'this != that',
|
||||
),
|
||||
(
|
||||
'test_result',
|
||||
'http://10.0.5.5/tests/this != http://10.0.5.5/tests/that',
|
||||
'sig',
|
||||
'mess',
|
||||
'this != that',
|
||||
),
|
||||
('test_result', 'build/tests/reftest/tests/this', 'sig', 'mess', 'this'),
|
||||
('test_result', 'test=jsreftest.html', 'sig', 'mess', 'jsreftest.html'),
|
||||
('test_result', 'http://10.0.5.5/tests/this/thing', 'sig', 'mess', 'this/thing'),
|
||||
('test_result', 'http://localhost:5000/tests/this/thing', 'sig', 'mess', 'thing'),
|
||||
('test_result', 'thing is done (finished)', 'sig', 'mess', 'thing is done'),
|
||||
('test_result', 'Last test finished', 'sig', 'mess', None),
|
||||
('test_result', '(SimpleTest/TestRunner.js)', 'sig', 'mess', None),
|
||||
('test_result', '/this\\thing\\there', 'sig', 'mess', 'this/thing/there'),
|
||||
],
|
||||
)
|
||||
def test_clean_test(action, test, signature, message, expected):
|
||||
assert expected == clean_test(action, test, signature, message)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(('config', 'expected'), [
|
||||
('opt', 'opt'),
|
||||
('debug', 'debug'),
|
||||
('asan', 'asan'),
|
||||
('pgo', 'opt'),
|
||||
('shippable', 'opt'),
|
||||
])
|
||||
@pytest.mark.parametrize(
|
||||
('config', 'expected'),
|
||||
[('opt', 'opt'), ('debug', 'debug'), ('asan', 'asan'), ('pgo', 'opt'), ('shippable', 'opt'),],
|
||||
)
|
||||
def test_clean_config(config, expected):
|
||||
assert expected == clean_config(config)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(('platform', 'expected'), [
|
||||
('macosx64 opt and such', 'osx-10-10 opt and such'),
|
||||
('linux doohickey', 'linux doohickey'),
|
||||
('windows gizmo', 'windows gizmo'),
|
||||
])
|
||||
@pytest.mark.parametrize(
|
||||
('platform', 'expected'),
|
||||
[
|
||||
('macosx64 opt and such', 'osx-10-10 opt and such'),
|
||||
('linux doohickey', 'linux doohickey'),
|
||||
('windows gizmo', 'windows gizmo'),
|
||||
],
|
||||
)
|
||||
def test_clean_platform(platform, expected):
|
||||
assert expected == clean_platform(platform)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(('line', 'expected'), [
|
||||
('Return code:', False),
|
||||
('unexpected status', False),
|
||||
('unexpected crashes', False),
|
||||
('exit status', False),
|
||||
('Finished in', False),
|
||||
('expect magic', True),
|
||||
])
|
||||
@pytest.mark.parametrize(
|
||||
('line', 'expected'),
|
||||
[
|
||||
('Return code:', False),
|
||||
('unexpected status', False),
|
||||
('unexpected crashes', False),
|
||||
('exit status', False),
|
||||
('Finished in', False),
|
||||
('expect magic', True),
|
||||
],
|
||||
)
|
||||
def test_is_valid_failure_line(line, expected):
|
||||
assert expected == is_valid_failure_line(line)
|
||||
|
|
|
@ -8,45 +8,25 @@ from datetime import timedelta
|
|||
|
||||
def job_data(**kwargs):
|
||||
jobs_obj = {
|
||||
"revision": kwargs.get("revision",
|
||||
"24fd64b8251fac5cf60b54a915bffa7e51f636b5"),
|
||||
"revision": kwargs.get("revision", "24fd64b8251fac5cf60b54a915bffa7e51f636b5"),
|
||||
"job": {
|
||||
|
||||
u"build_platform": build_platform(**kwargs.pop("build_platform", {})),
|
||||
|
||||
u"submit_timestamp": kwargs.pop("submit_timestamp", submit_timestamp()),
|
||||
|
||||
u"start_timestamp": kwargs.pop("start_timestamp", start_timestamp()),
|
||||
|
||||
u"name": kwargs.pop("name", u"mochitest-5"),
|
||||
|
||||
u"option_collection": option_collection(
|
||||
**kwargs.pop("option_collection", {})),
|
||||
|
||||
u"option_collection": option_collection(**kwargs.pop("option_collection", {})),
|
||||
u"log_references": log_references(kwargs.pop("log_references", [])),
|
||||
|
||||
u"who": kwargs.pop("who", u"sendchange-unittest"),
|
||||
|
||||
u"reason": kwargs.pop("reason", u"scheduler"),
|
||||
|
||||
u"artifact": kwargs.pop("artifact", {}),
|
||||
|
||||
u"machine_platform": machine_platform(
|
||||
**kwargs.pop("machine_platform", {})),
|
||||
|
||||
u"machine_platform": machine_platform(**kwargs.pop("machine_platform", {})),
|
||||
u"machine": kwargs.pop("machine", u"talos-r3-xp-088"),
|
||||
|
||||
u"state": kwargs.pop("state", u"completed"),
|
||||
|
||||
u"result": kwargs.pop("result", 0),
|
||||
|
||||
u"job_guid": kwargs.pop(
|
||||
u"job_guid", u"f3e3a9e6526881c39a3b2b6ff98510f213b3d4ed"),
|
||||
|
||||
u"job_guid": kwargs.pop(u"job_guid", u"f3e3a9e6526881c39a3b2b6ff98510f213b3d4ed"),
|
||||
u"product_name": kwargs.pop("product_name", u"firefox"),
|
||||
|
||||
u"end_timestamp": kwargs.pop("end_timestamp", end_timestamp()),
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
# defaults.update(kwargs)
|
||||
|
@ -55,9 +35,7 @@ def job_data(**kwargs):
|
|||
|
||||
|
||||
def to_seconds(td):
|
||||
return (td.microseconds +
|
||||
(td.seconds + td.days * 24 * 3600) * 10 ** 6
|
||||
) / 10 ** 6
|
||||
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / 10 ** 6
|
||||
|
||||
|
||||
def get_timestamp_days_ago(days_ago):
|
||||
|
@ -85,9 +63,7 @@ def option_collection(**kwargs):
|
|||
Return a sample data structure, with default values.
|
||||
|
||||
"""
|
||||
defaults = {
|
||||
u"debug": True
|
||||
}
|
||||
defaults = {u"debug": True}
|
||||
|
||||
defaults.update(kwargs)
|
||||
|
||||
|
@ -96,12 +72,7 @@ def option_collection(**kwargs):
|
|||
|
||||
def log_references(log_refs=None):
|
||||
if not log_refs:
|
||||
log_refs = [
|
||||
{
|
||||
u"url": u"http://ftp.mozilla.org/pub/...",
|
||||
u"name": u"unittest"
|
||||
}
|
||||
]
|
||||
log_refs = [{u"url": u"http://ftp.mozilla.org/pub/...", u"name": u"unittest"}]
|
||||
return log_refs
|
||||
|
||||
|
||||
|
|
|
@ -3,72 +3,92 @@ import os
|
|||
|
||||
|
||||
class SampleData:
|
||||
|
||||
@classmethod
|
||||
def get_perf_data(cls, filename):
|
||||
with open("{0}/sample_data/artifacts/performance/{1}".format(
|
||||
os.path.dirname(__file__), filename)) as f:
|
||||
with open(
|
||||
"{0}/sample_data/artifacts/performance/{1}".format(os.path.dirname(__file__), filename)
|
||||
) as f:
|
||||
return json.load(f)
|
||||
|
||||
def __init__(self):
|
||||
self.job_data_file = "{0}/sample_data/job_data.txt".format(
|
||||
os.path.dirname(__file__)
|
||||
)
|
||||
self.job_data_file = "{0}/sample_data/job_data.txt".format(os.path.dirname(__file__))
|
||||
|
||||
self.push_data_file = "{0}/sample_data/push_data.json".format(
|
||||
os.path.dirname(__file__)
|
||||
)
|
||||
self.push_data_file = "{0}/sample_data/push_data.json".format(os.path.dirname(__file__))
|
||||
|
||||
self.logs_dir = "{0}/sample_data/logs".format(
|
||||
os.path.dirname(__file__)
|
||||
)
|
||||
self.logs_dir = "{0}/sample_data/logs".format(os.path.dirname(__file__))
|
||||
|
||||
with open("{0}/sample_data/artifacts/text_log_summary.json".format(
|
||||
os.path.dirname(__file__))) as f:
|
||||
with open(
|
||||
"{0}/sample_data/artifacts/text_log_summary.json".format(os.path.dirname(__file__))
|
||||
) as f:
|
||||
self.text_log_summary = json.load(f)
|
||||
|
||||
with open("{0}/sample_data/pulse_consumer/taskcluster_pulse_messages.json".format(
|
||||
os.path.dirname(__file__))) as f:
|
||||
with open(
|
||||
"{0}/sample_data/pulse_consumer/taskcluster_pulse_messages.json".format(
|
||||
os.path.dirname(__file__)
|
||||
)
|
||||
) as f:
|
||||
self.taskcluster_pulse_messages = json.load(f)
|
||||
|
||||
with open("{0}/sample_data/pulse_consumer/taskcluster_tasks.json".format(
|
||||
os.path.dirname(__file__))) as f:
|
||||
with open(
|
||||
"{0}/sample_data/pulse_consumer/taskcluster_tasks.json".format(
|
||||
os.path.dirname(__file__)
|
||||
)
|
||||
) as f:
|
||||
self.taskcluster_tasks = json.load(f)
|
||||
|
||||
with open("{0}/sample_data/pulse_consumer/taskcluster_transformed_jobs.json".format(
|
||||
os.path.dirname(__file__))) as f:
|
||||
with open(
|
||||
"{0}/sample_data/pulse_consumer/taskcluster_transformed_jobs.json".format(
|
||||
os.path.dirname(__file__)
|
||||
)
|
||||
) as f:
|
||||
self.taskcluster_transformed_jobs = json.load(f)
|
||||
|
||||
with open("{0}/sample_data/pulse_consumer/job_data.json".format(
|
||||
os.path.dirname(__file__))) as f:
|
||||
with open(
|
||||
"{0}/sample_data/pulse_consumer/job_data.json".format(os.path.dirname(__file__))
|
||||
) as f:
|
||||
self.pulse_jobs = json.load(f)
|
||||
|
||||
with open("{0}/sample_data/pulse_consumer/transformed_job_data.json".format(
|
||||
os.path.dirname(__file__))) as f:
|
||||
with open(
|
||||
"{0}/sample_data/pulse_consumer/transformed_job_data.json".format(
|
||||
os.path.dirname(__file__)
|
||||
)
|
||||
) as f:
|
||||
self.transformed_pulse_jobs = json.load(f)
|
||||
|
||||
with open("{0}/sample_data/pulse_consumer/github_push.json".format(
|
||||
os.path.dirname(__file__))) as f:
|
||||
with open(
|
||||
"{0}/sample_data/pulse_consumer/github_push.json".format(os.path.dirname(__file__))
|
||||
) as f:
|
||||
self.github_push = json.load(f)
|
||||
|
||||
with open("{0}/sample_data/pulse_consumer/transformed_gh_push.json".format(
|
||||
os.path.dirname(__file__))) as f:
|
||||
with open(
|
||||
"{0}/sample_data/pulse_consumer/transformed_gh_push.json".format(
|
||||
os.path.dirname(__file__)
|
||||
)
|
||||
) as f:
|
||||
self.transformed_github_push = json.load(f)
|
||||
|
||||
with open("{0}/sample_data/pulse_consumer/github_pr.json".format(
|
||||
os.path.dirname(__file__))) as f:
|
||||
with open(
|
||||
"{0}/sample_data/pulse_consumer/github_pr.json".format(os.path.dirname(__file__))
|
||||
) as f:
|
||||
self.github_pr = json.load(f)
|
||||
|
||||
with open("{0}/sample_data/pulse_consumer/transformed_gh_pr.json".format(
|
||||
os.path.dirname(__file__))) as f:
|
||||
with open(
|
||||
"{0}/sample_data/pulse_consumer/transformed_gh_pr.json".format(
|
||||
os.path.dirname(__file__)
|
||||
)
|
||||
) as f:
|
||||
self.transformed_github_pr = json.load(f)
|
||||
|
||||
with open("{0}/sample_data/pulse_consumer/hg_push.json".format(
|
||||
os.path.dirname(__file__))) as f:
|
||||
with open(
|
||||
"{0}/sample_data/pulse_consumer/hg_push.json".format(os.path.dirname(__file__))
|
||||
) as f:
|
||||
self.hg_push = json.load(f)
|
||||
|
||||
with open("{0}/sample_data/pulse_consumer/transformed_hg_push.json".format(
|
||||
os.path.dirname(__file__))) as f:
|
||||
with open(
|
||||
"{0}/sample_data/pulse_consumer/transformed_hg_push.json".format(
|
||||
os.path.dirname(__file__)
|
||||
)
|
||||
) as f:
|
||||
self.transformed_hg_push = json.load(f)
|
||||
|
||||
self.job_data = []
|
||||
|
|
|
@ -1,10 +1,8 @@
|
|||
from pypom import (Page,
|
||||
Region)
|
||||
from pypom import Page, Region
|
||||
from selenium.webdriver.common.by import By
|
||||
|
||||
|
||||
class Base(Page):
|
||||
|
||||
@property
|
||||
def header(self):
|
||||
return self.Header(self)
|
||||
|
@ -21,11 +19,9 @@ class Base(Page):
|
|||
# Initially try to compare with the text of the menu item.
|
||||
# But if there's an image instead of just text, then compare the
|
||||
# ``alt`` property of the image instead.
|
||||
self.wait.until(lambda _: self.is_element_displayed(
|
||||
*self._app_menu_locator))
|
||||
self.wait.until(lambda _: self.is_element_displayed(*self._app_menu_locator))
|
||||
menu = self.find_element(*self._app_menu_locator).text
|
||||
return menu if menu else self.find_element(
|
||||
*self._app_logo_locator).get_attribute("alt")
|
||||
return menu if menu else self.find_element(*self._app_logo_locator).get_attribute("alt")
|
||||
|
||||
def switch_app(self):
|
||||
self.find_element(*self._app_menu_locator).click()
|
||||
|
|
|
@ -14,6 +14,7 @@ class Perfherder(Base):
|
|||
def switch_to_treeherder(self):
|
||||
self.header.switch_app()
|
||||
from pages.treeherder import Treeherder
|
||||
|
||||
return Treeherder(self.driver, self.base_url).wait_for_page_to_load()
|
||||
|
||||
class GraphTooltip(Region):
|
||||
|
|
|
@ -43,19 +43,16 @@ class Treeherder(Base):
|
|||
|
||||
@property
|
||||
def active_watched_repo(self):
|
||||
self.wait.until(lambda _: self.is_element_displayed(
|
||||
*self._active_watched_repo_locator))
|
||||
self.wait.until(lambda _: self.is_element_displayed(*self._active_watched_repo_locator))
|
||||
return self.find_element(*self._active_watched_repo_locator).text
|
||||
|
||||
@property
|
||||
def all_jobs(self):
|
||||
return list(itertools.chain.from_iterable(
|
||||
r.jobs for r in self.pushes))
|
||||
return list(itertools.chain.from_iterable(r.jobs for r in self.pushes))
|
||||
|
||||
@property
|
||||
def all_job_groups(self):
|
||||
return list(itertools.chain.from_iterable(
|
||||
r.job_groups for r in self.pushes))
|
||||
return list(itertools.chain.from_iterable(r.job_groups for r in self.pushes))
|
||||
|
||||
def clear_filter(self, method='pointer'):
|
||||
if method == 'keyboard':
|
||||
|
@ -139,6 +136,7 @@ class Treeherder(Base):
|
|||
def switch_to_perfherder(self):
|
||||
self.header.switch_app()
|
||||
from pages.perfherder import Perfherder
|
||||
|
||||
return Perfherder(self.driver, self.base_url).wait_for_page_to_load()
|
||||
|
||||
def toggle_failures(self):
|
||||
|
@ -237,7 +235,10 @@ class Treeherder(Base):
|
|||
|
||||
@property
|
||||
def job_groups(self):
|
||||
return [self.JobGroup(self.page, root=el) for el in self.find_elements(*self._job_groups_locator)]
|
||||
return [
|
||||
self.JobGroup(self.page, root=el)
|
||||
for el in self.find_elements(*self._job_groups_locator)
|
||||
]
|
||||
|
||||
@property
|
||||
def jobs(self):
|
||||
|
@ -245,7 +246,9 @@ class Treeherder(Base):
|
|||
|
||||
@property
|
||||
def commits(self):
|
||||
return [self.page.Commit(self.page, el) for el in self.find_elements(*self._commits_locator)]
|
||||
return [
|
||||
self.page.Commit(self.page, el) for el in self.find_elements(*self._commits_locator)
|
||||
]
|
||||
|
||||
def filter_by_author(self):
|
||||
self.find_element(*self._author_locator).click()
|
||||
|
@ -270,7 +273,6 @@ class Treeherder(Base):
|
|||
self.page.wait_for_page_to_load()
|
||||
|
||||
class Job(Region):
|
||||
|
||||
def click(self):
|
||||
self.root.click()
|
||||
self.wait.until(lambda _: self.page.details_panel.is_open)
|
||||
|
@ -299,7 +301,10 @@ class Treeherder(Base):
|
|||
|
||||
@property
|
||||
def jobs(self):
|
||||
return [Treeherder.ResultSet.Job(self.page, root=el) for el in self.find_elements(*self._jobs_locator)]
|
||||
return [
|
||||
Treeherder.ResultSet.Job(self.page, root=el)
|
||||
for el in self.find_elements(*self._jobs_locator)
|
||||
]
|
||||
|
||||
class Commit(Region):
|
||||
|
||||
|
@ -334,9 +339,11 @@ class Treeherder(Base):
|
|||
|
||||
@property
|
||||
def is_open(self):
|
||||
return self.root.is_displayed() and \
|
||||
not self.find_elements(*self._loading_locator) and \
|
||||
self.job_details.result
|
||||
return (
|
||||
self.root.is_displayed()
|
||||
and not self.find_elements(*self._loading_locator)
|
||||
and self.job_details.result
|
||||
)
|
||||
|
||||
@property
|
||||
def job_details(self):
|
||||
|
@ -345,7 +352,10 @@ class Treeherder(Base):
|
|||
class SummaryPanel(Region):
|
||||
|
||||
_root_locator = (By.ID, 'summary-panel')
|
||||
_keywords_locator = (By.CSS_SELECTOR, 'a[title="Filter jobs containing these keywords"]')
|
||||
_keywords_locator = (
|
||||
By.CSS_SELECTOR,
|
||||
'a[title="Filter jobs containing these keywords"]',
|
||||
)
|
||||
_log_viewer_locator = (By.CLASS_NAME, 'logviewer-btn')
|
||||
_result_locator = (By.CSS_SELECTOR, '#result-status-pane div:nth-of-type(1) span')
|
||||
|
||||
|
@ -371,4 +381,5 @@ class Treeherder(Base):
|
|||
self.driver.switch_to.window(handles[0])
|
||||
|
||||
from pages.log_viewer import LogViewer
|
||||
|
||||
return LogViewer(self.driver).wait_for_page_to_load()
|
||||
|
|
|
@ -9,10 +9,7 @@ def test_jobs(eleven_job_blobs, create_jobs):
|
|||
job_blobs = []
|
||||
for guid in [b['job']['job_guid'] for b in eleven_job_blobs]:
|
||||
job = copy.deepcopy(eleven_job_blobs[0])
|
||||
job['job'].update({
|
||||
'job_guid': guid,
|
||||
'job_symbol': 'job',
|
||||
'group_symbol': 'Group'})
|
||||
job['job'].update({'job_guid': guid, 'job_symbol': 'job', 'group_symbol': 'Group'})
|
||||
job_blobs.append(job)
|
||||
return create_jobs(job_blobs)
|
||||
|
||||
|
|
|
@ -14,10 +14,8 @@ def test_job(eleven_job_blobs, create_jobs):
|
|||
@pytest.fixture(name='log')
|
||||
def fixture_log(test_job):
|
||||
return JobLog.objects.create(
|
||||
job=test_job,
|
||||
name='log1',
|
||||
url='https://example.com',
|
||||
status=JobLog.PARSED)
|
||||
job=test_job, name='log1', url='https://example.com', status=JobLog.PARSED
|
||||
)
|
||||
|
||||
|
||||
def test_open_log_viewer(base_url, selenium, log):
|
||||
|
|
|
@ -8,7 +8,7 @@ RESULTS = ['testfailed', 'busted', 'exception']
|
|||
def test_jobs(eleven_job_blobs, create_jobs):
|
||||
for i, status in enumerate(RESULTS):
|
||||
eleven_job_blobs[i]['job']['result'] = status
|
||||
return create_jobs(eleven_job_blobs[0:len(RESULTS)])
|
||||
return create_jobs(eleven_job_blobs[0 : len(RESULTS)])
|
||||
|
||||
|
||||
def test_select_next_unclassified_job(base_url, selenium, test_jobs):
|
||||
|
|
|
@ -2,15 +2,13 @@ import pytest
|
|||
from django.conf import settings
|
||||
|
||||
from tests.conftest import IS_WINDOWS
|
||||
from treeherder.services.pulse.consumers import (Consumers,
|
||||
PulseConsumer)
|
||||
from treeherder.services.pulse.consumers import Consumers, PulseConsumer
|
||||
|
||||
from .utils import create_and_destroy_exchange
|
||||
|
||||
|
||||
def test_Consumers():
|
||||
class TestConsumer:
|
||||
|
||||
def prepare(self):
|
||||
self.prepared = True
|
||||
|
||||
|
@ -41,5 +39,11 @@ def test_PulseConsumer(pulse_connection):
|
|||
pass
|
||||
|
||||
with create_and_destroy_exchange(pulse_connection, "foobar"):
|
||||
cons = TestConsumer({"root_url": "https://firefox-ci-tc.services.mozilla.com", "pulse_url": settings.CELERY_BROKER_URL}, None)
|
||||
cons = TestConsumer(
|
||||
{
|
||||
"root_url": "https://firefox-ci-tc.services.mozilla.com",
|
||||
"pulse_url": settings.CELERY_BROKER_URL,
|
||||
},
|
||||
None,
|
||||
)
|
||||
cons.prepare()
|
||||
|
|
|
@ -1,14 +1,11 @@
|
|||
import json
|
||||
from os.path import (dirname,
|
||||
join)
|
||||
from os.path import dirname, join
|
||||
|
||||
import pytest
|
||||
|
||||
from treeherder.services.taskcluster import TaskclusterModel
|
||||
|
||||
SAMPLE_DATA_PATH = join(
|
||||
dirname(dirname(__file__)),
|
||||
'sample_data')
|
||||
SAMPLE_DATA_PATH = join(dirname(dirname(__file__)), 'sample_data')
|
||||
|
||||
|
||||
def load_json_fixture(from_file):
|
||||
|
@ -18,38 +15,43 @@ def load_json_fixture(from_file):
|
|||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def actions_json(): return load_json_fixture('initialActions.json')
|
||||
def actions_json():
|
||||
return load_json_fixture('initialActions.json')
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def expected_actions_json(): return load_json_fixture('reducedActions.json')
|
||||
def expected_actions_json():
|
||||
return load_json_fixture('reducedActions.json')
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def original_task(): return load_json_fixture('originalTask.json')
|
||||
def original_task():
|
||||
return load_json_fixture('originalTask.json')
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def expected_backfill_task(): return load_json_fixture('backfilltask.json')
|
||||
def expected_backfill_task():
|
||||
return load_json_fixture('backfilltask.json')
|
||||
|
||||
|
||||
# TaskclusterModel
|
||||
def test_filter_relevant_actions(actions_json, original_task, expected_actions_json):
|
||||
reduced_actions_json = TaskclusterModel._filter_relevant_actions(actions_json,
|
||||
original_task)
|
||||
reduced_actions_json = TaskclusterModel._filter_relevant_actions(actions_json, original_task)
|
||||
|
||||
assert reduced_actions_json == expected_actions_json
|
||||
|
||||
|
||||
def test_task_in_context():
|
||||
# match
|
||||
tag_set_list, task_tags = [load_json_fixture(f)
|
||||
for f in ("matchingTagSetList.json", "matchingTaskTags.json")]
|
||||
tag_set_list, task_tags = [
|
||||
load_json_fixture(f) for f in ("matchingTagSetList.json", "matchingTaskTags.json")
|
||||
]
|
||||
assert TaskclusterModel._task_in_context(tag_set_list, task_tags) is True
|
||||
|
||||
# mismatch
|
||||
tag_set_list, task_tags = [load_json_fixture(f)
|
||||
for f in ("mismatchingTagSetList.json", "mismatchingTaskTags.json")]
|
||||
tag_set_list, task_tags = [
|
||||
load_json_fixture(f) for f in ("mismatchingTagSetList.json", "mismatchingTaskTags.json")
|
||||
]
|
||||
assert TaskclusterModel._task_in_context(tag_set_list, task_tags) is False
|
||||
|
||||
|
||||
|
|
|
@ -1,11 +1,9 @@
|
|||
import pytest
|
||||
|
||||
from treeherder.model.models import (Job,
|
||||
JobNote)
|
||||
from treeherder.model.models import Job, JobNote
|
||||
from treeherder.seta.common import job_priority_index
|
||||
from treeherder.seta.models import JobPriority
|
||||
from treeherder.seta.settings import (SETA_HIGH_VALUE_PRIORITY,
|
||||
SETA_LOW_VALUE_PRIORITY)
|
||||
from treeherder.seta.settings import SETA_HIGH_VALUE_PRIORITY, SETA_LOW_VALUE_PRIORITY
|
||||
from treeherder.seta.update_job_priority import _sanitize_data
|
||||
|
||||
|
||||
|
@ -13,38 +11,44 @@ from treeherder.seta.update_job_priority import _sanitize_data
|
|||
def runnable_jobs_data():
|
||||
repository_name = 'test_treeherder_jobs'
|
||||
runnable_jobs = [
|
||||
{
|
||||
"build_system_type": "buildbot",
|
||||
"job_type_name": "W3C Web Platform Tests",
|
||||
"platform": "windows8-64",
|
||||
"platform_option": "debug",
|
||||
"ref_data_name": "Windows 8 64-bit {} debug test web-platform-tests-1".format(repository_name),
|
||||
}, {
|
||||
"build_system_type": "buildbot",
|
||||
"job_type_name": "Reftest e10s",
|
||||
"platform": "linux32",
|
||||
"platform_option": "opt",
|
||||
"ref_data_name": "Ubuntu VM 12.04 {} opt test reftest-e10s-1".format(repository_name),
|
||||
}, {
|
||||
"build_system_type": "buildbot",
|
||||
"job_type_name": "Build",
|
||||
"platform": "osx-10-7",
|
||||
"platform_option": "opt",
|
||||
"ref_data_name": "OS X 10.7 {} build".format(repository_name),
|
||||
}, {
|
||||
"build_system_type": "taskcluster",
|
||||
"job_type_name": "test-linux32/opt-reftest-e10s-1",
|
||||
"platform": "linux32",
|
||||
"platform_option": "opt",
|
||||
"ref_data_name": "test-linux32/opt-reftest-e10s-1",
|
||||
}, {
|
||||
"build_system_type": "taskcluster",
|
||||
"job_type_name": "test-linux64/opt-reftest-e10s-2",
|
||||
"platform": "linux64",
|
||||
"platform_option": "opt",
|
||||
"ref_data_name": "test-linux64/opt-reftest-e10s-2",
|
||||
}
|
||||
]
|
||||
{
|
||||
"build_system_type": "buildbot",
|
||||
"job_type_name": "W3C Web Platform Tests",
|
||||
"platform": "windows8-64",
|
||||
"platform_option": "debug",
|
||||
"ref_data_name": "Windows 8 64-bit {} debug test web-platform-tests-1".format(
|
||||
repository_name
|
||||
),
|
||||
},
|
||||
{
|
||||
"build_system_type": "buildbot",
|
||||
"job_type_name": "Reftest e10s",
|
||||
"platform": "linux32",
|
||||
"platform_option": "opt",
|
||||
"ref_data_name": "Ubuntu VM 12.04 {} opt test reftest-e10s-1".format(repository_name),
|
||||
},
|
||||
{
|
||||
"build_system_type": "buildbot",
|
||||
"job_type_name": "Build",
|
||||
"platform": "osx-10-7",
|
||||
"platform_option": "opt",
|
||||
"ref_data_name": "OS X 10.7 {} build".format(repository_name),
|
||||
},
|
||||
{
|
||||
"build_system_type": "taskcluster",
|
||||
"job_type_name": "test-linux32/opt-reftest-e10s-1",
|
||||
"platform": "linux32",
|
||||
"platform_option": "opt",
|
||||
"ref_data_name": "test-linux32/opt-reftest-e10s-1",
|
||||
},
|
||||
{
|
||||
"build_system_type": "taskcluster",
|
||||
"job_type_name": "test-linux64/opt-reftest-e10s-2",
|
||||
"platform": "linux64",
|
||||
"platform_option": "opt",
|
||||
"ref_data_name": "test-linux64/opt-reftest-e10s-2",
|
||||
},
|
||||
]
|
||||
|
||||
return runnable_jobs
|
||||
|
||||
|
@ -56,7 +60,7 @@ def tc_latest_gecko_decision_index(test_repository):
|
|||
"taskId": "XVDNiP07RNaaEghhvkZJWg",
|
||||
"rank": 0,
|
||||
"data": {},
|
||||
"expires": "2018-01-04T20:36:11.375Z"
|
||||
"expires": "2018-01-04T20:36:11.375Z",
|
||||
}
|
||||
|
||||
|
||||
|
@ -82,13 +86,15 @@ def all_job_priorities_stored(job_priority_list):
|
|||
def job_priority_list(sanitized_data):
|
||||
jp_list = []
|
||||
for datum in sanitized_data:
|
||||
jp_list.append(JobPriority(
|
||||
testtype=datum['testtype'],
|
||||
buildtype=datum['platform_option'],
|
||||
platform=datum['platform'],
|
||||
buildsystem=datum['build_system_type'],
|
||||
priority=SETA_HIGH_VALUE_PRIORITY,
|
||||
))
|
||||
jp_list.append(
|
||||
JobPriority(
|
||||
testtype=datum['testtype'],
|
||||
buildtype=datum['platform_option'],
|
||||
platform=datum['platform'],
|
||||
buildsystem=datum['build_system_type'],
|
||||
priority=SETA_HIGH_VALUE_PRIORITY,
|
||||
)
|
||||
)
|
||||
# Mark the reftest-e10s-2 TC job as low priority (unique to TC)
|
||||
if datum['testtype'] == 'reftest-e10s-2':
|
||||
jp_list[-1].priority = SETA_LOW_VALUE_PRIORITY
|
||||
|
@ -105,8 +111,9 @@ def jp_index_fixture(job_priority_list):
|
|||
|
||||
|
||||
@pytest.fixture
|
||||
def fifteen_jobs_with_notes(eleven_jobs_stored, taskcluster_jobs_stored, test_user,
|
||||
failure_classifications):
|
||||
def fifteen_jobs_with_notes(
|
||||
eleven_jobs_stored, taskcluster_jobs_stored, test_user, failure_classifications
|
||||
):
|
||||
"""provide 15 jobs with job notes."""
|
||||
counter = 0
|
||||
for job in Job.objects.all():
|
||||
|
@ -114,54 +121,56 @@ def fifteen_jobs_with_notes(eleven_jobs_stored, taskcluster_jobs_stored, test_us
|
|||
|
||||
# add 5 valid job notes related to 'this is revision x'
|
||||
if counter < 6:
|
||||
JobNote.objects.create(job=job,
|
||||
failure_classification_id=2,
|
||||
user=test_user, text="this is revision x")
|
||||
JobNote.objects.create(
|
||||
job=job, failure_classification_id=2, user=test_user, text="this is revision x"
|
||||
)
|
||||
continue
|
||||
|
||||
# add 3 valid job notes with raw revision 31415926535
|
||||
if counter < 9:
|
||||
JobNote.objects.create(job=job,
|
||||
failure_classification_id=2,
|
||||
user=test_user, text="314159265358")
|
||||
JobNote.objects.create(
|
||||
job=job, failure_classification_id=2, user=test_user, text="314159265358"
|
||||
)
|
||||
continue
|
||||
|
||||
# Add 3 job notes with full url to revision, expected to map to 31415926535
|
||||
if counter < 12:
|
||||
JobNote.objects.create(job=job,
|
||||
failure_classification_id=2,
|
||||
user=test_user, text="http://hg.mozilla.org/mozilla-central/314159265358")
|
||||
JobNote.objects.create(
|
||||
job=job,
|
||||
failure_classification_id=2,
|
||||
user=test_user,
|
||||
text="http://hg.mozilla.org/mozilla-central/314159265358",
|
||||
)
|
||||
continue
|
||||
|
||||
# Add 1 valid job with trailing slash, expected to map to 31415926535
|
||||
if counter < 13:
|
||||
JobNote.objects.create(job=job,
|
||||
failure_classification_id=2,
|
||||
user=test_user, text="http://hg.mozilla.org/mozilla-central/314159265358/")
|
||||
JobNote.objects.create(
|
||||
job=job,
|
||||
failure_classification_id=2,
|
||||
user=test_user,
|
||||
text="http://hg.mozilla.org/mozilla-central/314159265358/",
|
||||
)
|
||||
continue
|
||||
|
||||
# Add 1 job with invalid revision text, expect it to be ignored
|
||||
if counter < 14:
|
||||
# We will ignore this based on text length
|
||||
JobNote.objects.create(job=job,
|
||||
failure_classification_id=2,
|
||||
user=test_user, text="too short")
|
||||
JobNote.objects.create(
|
||||
job=job, failure_classification_id=2, user=test_user, text="too short"
|
||||
)
|
||||
continue
|
||||
|
||||
# Add 1 job with no revision text, expect it to be ignored
|
||||
if counter < 15:
|
||||
# We will ignore this based on blank note
|
||||
JobNote.objects.create(job=job,
|
||||
failure_classification_id=2,
|
||||
user=test_user, text="")
|
||||
JobNote.objects.create(job=job, failure_classification_id=2, user=test_user, text="")
|
||||
continue
|
||||
|
||||
# Add 1 more job with invalid revision text, expect it to be ignored
|
||||
if counter < 16:
|
||||
# We will ignore this based on effectively blank note
|
||||
JobNote.objects.create(job=job,
|
||||
failure_classification_id=2,
|
||||
user=test_user, text="/")
|
||||
JobNote.objects.create(job=job, failure_classification_id=2, user=test_user, text="/")
|
||||
continue
|
||||
|
||||
# if we have any more jobs defined it will break this test, ignore
|
||||
|
@ -186,7 +195,8 @@ def failures_fixed_by_commit():
|
|||
(u'b2g_mozilla-release_inari_dep', u'opt', u'b2g-device-image'),
|
||||
(u'b2g_mozilla-release_nexus-4_dep', u'opt', u'b2g-device-image'),
|
||||
(u'mochitest-devtools-chrome-3', u'debug', u'linux64'),
|
||||
]}
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
|
|
@ -4,7 +4,9 @@ from treeherder.seta.analyze_failures import get_failures_fixed_by_commit
|
|||
|
||||
|
||||
@pytest.mark.django_db()
|
||||
def test_analyze_failures(fifteen_jobs_with_notes, failures_fixed_by_commit, patched_seta_fixed_by_commit_repos):
|
||||
def test_analyze_failures(
|
||||
fifteen_jobs_with_notes, failures_fixed_by_commit, patched_seta_fixed_by_commit_repos
|
||||
):
|
||||
ret = get_failures_fixed_by_commit()
|
||||
exp = failures_fixed_by_commit
|
||||
|
||||
|
|
|
@ -3,23 +3,25 @@ import datetime
|
|||
import pytest
|
||||
from mock import patch
|
||||
|
||||
from treeherder.seta.job_priorities import (SetaError,
|
||||
seta_job_scheduling)
|
||||
from treeherder.seta.job_priorities import SetaError, seta_job_scheduling
|
||||
|
||||
|
||||
@pytest.mark.django_db()
|
||||
@patch('treeherder.seta.job_priorities.SETAJobPriorities._validate_request', return_value=None)
|
||||
@patch('treeherder.etl.seta.list_runnable_jobs')
|
||||
def test_gecko_decision_task(runnable_jobs_list, validate_request,
|
||||
test_repository, runnable_jobs_data,
|
||||
all_job_priorities_stored):
|
||||
def test_gecko_decision_task(
|
||||
runnable_jobs_list,
|
||||
validate_request,
|
||||
test_repository,
|
||||
runnable_jobs_data,
|
||||
all_job_priorities_stored,
|
||||
):
|
||||
'''
|
||||
When the Gecko decision task calls SETA it will return all jobs that are less likely to catch
|
||||
a regression (low value jobs).
|
||||
'''
|
||||
runnable_jobs_list.return_value = runnable_jobs_data
|
||||
jobs = seta_job_scheduling(project=test_repository.name,
|
||||
build_system_type='taskcluster')
|
||||
jobs = seta_job_scheduling(project=test_repository.name, build_system_type='taskcluster')
|
||||
assert len(jobs['jobtypes'][str(datetime.date.today())]) == 1
|
||||
|
||||
|
||||
|
@ -31,5 +33,7 @@ def test_gecko_decision_task_invalid_repo():
|
|||
with pytest.raises(SetaError) as exception_info:
|
||||
seta_job_scheduling(project='mozilla-repo-x', build_system_type='taskcluster')
|
||||
|
||||
assert str(exception_info.value) == "The specified project repo 'mozilla-repo-x' " \
|
||||
"is not supported by SETA."
|
||||
assert (
|
||||
str(exception_info.value) == "The specified project repo 'mozilla-repo-x' "
|
||||
"is not supported by SETA."
|
||||
)
|
||||
|
|
|
@ -12,22 +12,26 @@ YESTERDAY = timezone.now() - datetime.timedelta(days=1)
|
|||
|
||||
# JobPriority tests
|
||||
def test_expired_job_priority():
|
||||
jp = JobPriority(testtype='web-platform-tests-1',
|
||||
buildtype='opt',
|
||||
platform='windows8-64',
|
||||
priority=1,
|
||||
expiration_date=YESTERDAY,
|
||||
buildsystem='taskcluster')
|
||||
jp = JobPriority(
|
||||
testtype='web-platform-tests-1',
|
||||
buildtype='opt',
|
||||
platform='windows8-64',
|
||||
priority=1,
|
||||
expiration_date=YESTERDAY,
|
||||
buildsystem='taskcluster',
|
||||
)
|
||||
assert jp.has_expired()
|
||||
|
||||
|
||||
def test_not_expired_job_priority():
|
||||
jp = JobPriority(testtype='web-platform-tests-1',
|
||||
buildtype='opt',
|
||||
platform='windows8-64',
|
||||
priority=1,
|
||||
expiration_date=TOMORROW,
|
||||
buildsystem='taskcluster')
|
||||
jp = JobPriority(
|
||||
testtype='web-platform-tests-1',
|
||||
buildtype='opt',
|
||||
platform='windows8-64',
|
||||
priority=1,
|
||||
expiration_date=TOMORROW,
|
||||
buildsystem='taskcluster',
|
||||
)
|
||||
assert not jp.has_expired()
|
||||
|
||||
|
||||
|
@ -41,7 +45,8 @@ def test_null_testtype():
|
|||
platform='windows8-64',
|
||||
priority=1,
|
||||
expiration_date=TOMORROW,
|
||||
buildsystem='taskcluster')
|
||||
buildsystem='taskcluster',
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.django_db()
|
||||
|
@ -53,5 +58,6 @@ def test_null_expiration_date():
|
|||
platform='windows8-64',
|
||||
priority=1,
|
||||
expiration_date=None,
|
||||
buildsystem='taskcluster')
|
||||
buildsystem='taskcluster',
|
||||
)
|
||||
assert jp.expiration_date is None
|
||||
|
|
|
@ -2,11 +2,13 @@ import pytest
|
|||
from mock import patch
|
||||
|
||||
from treeherder.seta.models import JobPriority
|
||||
from treeherder.seta.update_job_priority import (_initialize_values,
|
||||
_sanitize_data,
|
||||
_unique_key,
|
||||
_update_table,
|
||||
query_sanitized_data)
|
||||
from treeherder.seta.update_job_priority import (
|
||||
_initialize_values,
|
||||
_sanitize_data,
|
||||
_unique_key,
|
||||
_update_table,
|
||||
query_sanitized_data,
|
||||
)
|
||||
|
||||
|
||||
def test_unique_key():
|
||||
|
@ -14,7 +16,7 @@ def test_unique_key():
|
|||
'build_system_type': 'buildbot',
|
||||
'platform': 'windows8-64',
|
||||
'platform_option': 'opt',
|
||||
'testtype': 'web-platform-tests-1'
|
||||
'testtype': 'web-platform-tests-1',
|
||||
}
|
||||
assert _unique_key(new_job), ('web-platform-tests-1', 'opt', 'windows8-64')
|
||||
|
||||
|
@ -48,8 +50,7 @@ def test_initialize_values_no_data():
|
|||
|
||||
@patch.object(JobPriority, 'save')
|
||||
@patch('treeherder.seta.update_job_priority._initialize_values')
|
||||
def test_update_table_empty_table(initial_values, jp_save,
|
||||
sanitized_data):
|
||||
def test_update_table_empty_table(initial_values, jp_save, sanitized_data):
|
||||
'''
|
||||
We test that starting from an empty table
|
||||
'''
|
||||
|
@ -67,21 +68,31 @@ def test_update_table_job_from_other_buildsysten(all_job_priorities_stored):
|
|||
'build_system_type': 'buildbot',
|
||||
'platform': 'linux64',
|
||||
'platform_option': 'opt',
|
||||
'testtype': 'reftest-e10s-2'
|
||||
'testtype': 'reftest-e10s-2',
|
||||
}
|
||||
# Before calling update_table the priority is only for TaskCluster
|
||||
assert len(JobPriority.objects.filter(
|
||||
buildsystem='taskcluster',
|
||||
buildtype=data['platform_option'],
|
||||
platform=data['platform'],
|
||||
testtype=data['testtype'],
|
||||
)) == 1
|
||||
assert (
|
||||
len(
|
||||
JobPriority.objects.filter(
|
||||
buildsystem='taskcluster',
|
||||
buildtype=data['platform_option'],
|
||||
platform=data['platform'],
|
||||
testtype=data['testtype'],
|
||||
)
|
||||
)
|
||||
== 1
|
||||
)
|
||||
# We are checking that only 1 job was updated
|
||||
ret_val = _update_table([data])
|
||||
assert ret_val == (0, 0, 1)
|
||||
assert len(JobPriority.objects.filter(
|
||||
buildsystem='*',
|
||||
buildtype=data['platform_option'],
|
||||
platform=data['platform'],
|
||||
testtype=data['testtype'],
|
||||
)) == 1
|
||||
assert (
|
||||
len(
|
||||
JobPriority.objects.filter(
|
||||
buildsystem='*',
|
||||
buildtype=data['platform_option'],
|
||||
platform=data['platform'],
|
||||
testtype=data['testtype'],
|
||||
)
|
||||
)
|
||||
== 1
|
||||
)
|
||||
|
|
|
@ -22,9 +22,7 @@ BUGFILER_API_URL = "https://thisisnotbugzilla.org"
|
|||
# access. But if we use the defaults in config.settings, we also get the
|
||||
# ``ModelBackend``, which will try to access the DB. This ensures we don't
|
||||
# do that, since we don't have any tests that use the ``ModelBackend``.
|
||||
AUTHENTICATION_BACKENDS = (
|
||||
'treeherder.auth.backends.AuthBackend',
|
||||
)
|
||||
AUTHENTICATION_BACKENDS = ('treeherder.auth.backends.AuthBackend',)
|
||||
|
||||
# For Push Health Usage dashboard
|
||||
NEW_RELIC_INSIGHTS_API_KEY = "123"
|
||||
|
|
|
@ -40,7 +40,9 @@ def test_get_tls_redis_url():
|
|||
https://devcenter.heroku.com/articles/securing-heroku-redis#connecting-directly-to-stunnel
|
||||
"""
|
||||
REDIS_URL = 'redis://h:abc8069@ec2-12-34-56-78.compute-1.amazonaws.com:8069'
|
||||
TLS_REDIS_URL = 'rediss://h:abc8069@ec2-12-34-56-78.compute-1.amazonaws.com:8070?ssl_cert_reqs=none'
|
||||
TLS_REDIS_URL = (
|
||||
'rediss://h:abc8069@ec2-12-34-56-78.compute-1.amazonaws.com:8070?ssl_cert_reqs=none'
|
||||
)
|
||||
assert get_tls_redis_url(REDIS_URL) == TLS_REDIS_URL
|
||||
|
||||
|
||||
|
|
|
@ -9,8 +9,7 @@ from treeherder.etl.push import store_push_data
|
|||
from treeherder.model import models
|
||||
|
||||
|
||||
def do_job_ingestion(test_repository, job_data, sample_push,
|
||||
verify_data=True):
|
||||
def do_job_ingestion(test_repository, job_data, sample_push, verify_data=True):
|
||||
"""
|
||||
Ingest ``job_data`` which will be JSON job blobs.
|
||||
|
||||
|
@ -57,18 +56,24 @@ def do_job_ingestion(test_repository, job_data, sample_push,
|
|||
job = blob['job']
|
||||
|
||||
build_platforms_ref.add(
|
||||
"-".join([
|
||||
job.get('build_platform', {}).get('os_name', 'unknown'),
|
||||
job.get('build_platform', {}).get('platform', 'unknown'),
|
||||
job.get('build_platform', {}).get('architecture', 'unknown')
|
||||
]))
|
||||
"-".join(
|
||||
[
|
||||
job.get('build_platform', {}).get('os_name', 'unknown'),
|
||||
job.get('build_platform', {}).get('platform', 'unknown'),
|
||||
job.get('build_platform', {}).get('architecture', 'unknown'),
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
machine_platforms_ref.add(
|
||||
"-".join([
|
||||
job.get('machine_platform', {}).get('os_name', 'unknown'),
|
||||
job.get('machine_platform', {}).get('platform', 'unknown'),
|
||||
job.get('machine_platform', {}).get('architecture', 'unknown')
|
||||
]))
|
||||
"-".join(
|
||||
[
|
||||
job.get('machine_platform', {}).get('os_name', 'unknown'),
|
||||
job.get('machine_platform', {}).get('platform', 'unknown'),
|
||||
job.get('machine_platform', {}).get('architecture', 'unknown'),
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
machines_ref.add(job.get('machine', 'unknown'))
|
||||
|
||||
|
@ -110,11 +115,8 @@ def verify_build_platforms(build_platforms_ref):
|
|||
build_platforms_set = set()
|
||||
for build_platform in models.BuildPlatform.objects.all():
|
||||
build_platforms_set.add(
|
||||
"-".join([
|
||||
build_platform.os_name,
|
||||
build_platform.platform,
|
||||
build_platform.architecture
|
||||
]))
|
||||
"-".join([build_platform.os_name, build_platform.platform, build_platform.architecture])
|
||||
)
|
||||
|
||||
assert build_platforms_ref.issubset(build_platforms_set)
|
||||
|
||||
|
@ -124,11 +126,10 @@ def verify_machine_platforms(machine_platforms_ref):
|
|||
machine_platforms_set = set()
|
||||
for machine_platform in models.MachinePlatform.objects.all():
|
||||
machine_platforms_set.add(
|
||||
"-".join([
|
||||
machine_platform.os_name,
|
||||
machine_platform.platform,
|
||||
machine_platform.architecture
|
||||
]))
|
||||
"-".join(
|
||||
[machine_platform.os_name, machine_platform.platform, machine_platform.architecture]
|
||||
)
|
||||
)
|
||||
|
||||
assert machine_platforms_ref.issubset(machine_platforms_set)
|
||||
|
||||
|
@ -161,8 +162,7 @@ def verify_products(products_ref):
|
|||
|
||||
def verify_pushes(test_repository, pushes_ref):
|
||||
|
||||
return pushes_ref.issubset(models.Push.objects.values_list(
|
||||
'revision', flat=True))
|
||||
return pushes_ref.issubset(models.Push.objects.values_list('revision', flat=True))
|
||||
|
||||
|
||||
def verify_log_urls(test_repository, log_urls_ref):
|
||||
|
@ -173,8 +173,9 @@ def verify_log_urls(test_repository, log_urls_ref):
|
|||
|
||||
|
||||
def verify_superseded(expected_superseded_job_guids):
|
||||
superseeded_guids = models.Job.objects.filter(
|
||||
result='superseded').values_list('guid', flat=True)
|
||||
superseeded_guids = models.Job.objects.filter(result='superseded').values_list(
|
||||
'guid', flat=True
|
||||
)
|
||||
assert set(superseeded_guids) == expected_superseded_job_guids
|
||||
|
||||
|
||||
|
@ -209,7 +210,8 @@ def create_generic_job(guid, repository, push_id, generic_reference_data):
|
|||
submit_time=job_time,
|
||||
start_time=job_time,
|
||||
end_time=job_time,
|
||||
tier=1)
|
||||
tier=1,
|
||||
)
|
||||
|
||||
|
||||
def add_log_response(filename):
|
||||
|
@ -225,9 +227,6 @@ def add_log_response(filename):
|
|||
responses.GET,
|
||||
log_url,
|
||||
body=content,
|
||||
adding_headers={
|
||||
'Content-Encoding': 'gzip',
|
||||
'Content-Length': str(len(content)),
|
||||
}
|
||||
adding_headers={'Content-Encoding': 'gzip', 'Content-Length': str(len(content)),},
|
||||
)
|
||||
return log_url
|
||||
|
|
|
@ -9,9 +9,9 @@ from treeherder.model.models import Job
|
|||
|
||||
|
||||
@pytest.mark.skip("Test needs fixing in bug: 1307289 (plus upgrade from jobs to tasks)")
|
||||
def test_retry_missing_revision_succeeds(sample_data, sample_push,
|
||||
test_repository, mock_log_parser,
|
||||
monkeypatch):
|
||||
def test_retry_missing_revision_succeeds(
|
||||
sample_data, sample_push, test_repository, mock_log_parser, monkeypatch
|
||||
):
|
||||
"""
|
||||
Ensure that when the missing push exists after a retry, that the job
|
||||
is then ingested.
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
from tests.autoclassify.utils import (create_failure_lines,
|
||||
test_line)
|
||||
from tests.autoclassify.utils import create_failure_lines, test_line
|
||||
from treeherder.model.models import FailureLine
|
||||
from treeherder.utils.queryset import (chunked_qs,
|
||||
chunked_qs_reverse)
|
||||
from treeherder.utils.queryset import chunked_qs, chunked_qs_reverse
|
||||
|
||||
|
||||
def test_chunked_qs(test_job):
|
||||
|
|
|
@ -1,65 +1,69 @@
|
|||
import pytest
|
||||
|
||||
from treeherder.utils.taskcluster_lib_scopes import (patternMatch,
|
||||
satisfiesExpression)
|
||||
from treeherder.utils.taskcluster_lib_scopes import patternMatch, satisfiesExpression
|
||||
|
||||
|
||||
# satisfiesExpression()
|
||||
@pytest.mark.parametrize('scopeset, expression', [
|
||||
[[], {'AllOf': []}],
|
||||
[['A'], {'AllOf': ['A']}],
|
||||
[['A', 'B'], 'A'],
|
||||
[['a*', 'b*', 'c*'], 'abc'],
|
||||
[['abc'], {'AnyOf': ['abc', 'def']}],
|
||||
[['def'], {'AnyOf': ['abc', 'def']}],
|
||||
[['abc', 'def'], {'AnyOf': ['abc', 'def']}],
|
||||
[['abc*'], {'AnyOf': ['abc', 'def']}],
|
||||
[['abc*'], {'AnyOf': ['abc']}],
|
||||
[['abc*', 'def*'], {'AnyOf': ['abc', 'def']}],
|
||||
[['foo'], {'AllOf': [{'AnyOf': [{'AllOf': ['foo']}, {'AllOf': ['bar']}]}]}],
|
||||
[['a*', 'b*', 'c*'], {'AnyOf': ['cfoo', 'dfoo']}],
|
||||
[['a*', 'b*', 'c*'], {'AnyOf': ['bx', 'by']}],
|
||||
[['a*', 'b*', 'c*'], {'AllOf': ['bx', 'cx']}],
|
||||
# complex expression with only
|
||||
# some AnyOf branches matching
|
||||
@pytest.mark.parametrize(
|
||||
'scopeset, expression',
|
||||
[
|
||||
['a*', 'b*', 'c*'],
|
||||
{'AnyOf': [
|
||||
{'AllOf': ['ax', 'jx']}, # doesn't match
|
||||
{'AllOf': ['bx', 'cx']}, # does match
|
||||
'bbb',
|
||||
]},
|
||||
[[], {'AllOf': []}],
|
||||
[['A'], {'AllOf': ['A']}],
|
||||
[['A', 'B'], 'A'],
|
||||
[['a*', 'b*', 'c*'], 'abc'],
|
||||
[['abc'], {'AnyOf': ['abc', 'def']}],
|
||||
[['def'], {'AnyOf': ['abc', 'def']}],
|
||||
[['abc', 'def'], {'AnyOf': ['abc', 'def']}],
|
||||
[['abc*'], {'AnyOf': ['abc', 'def']}],
|
||||
[['abc*'], {'AnyOf': ['abc']}],
|
||||
[['abc*', 'def*'], {'AnyOf': ['abc', 'def']}],
|
||||
[['foo'], {'AllOf': [{'AnyOf': [{'AllOf': ['foo']}, {'AllOf': ['bar']}]}]}],
|
||||
[['a*', 'b*', 'c*'], {'AnyOf': ['cfoo', 'dfoo']}],
|
||||
[['a*', 'b*', 'c*'], {'AnyOf': ['bx', 'by']}],
|
||||
[['a*', 'b*', 'c*'], {'AllOf': ['bx', 'cx']}],
|
||||
# complex expression with only
|
||||
# some AnyOf branches matching
|
||||
[
|
||||
['a*', 'b*', 'c*'],
|
||||
{
|
||||
'AnyOf': [
|
||||
{'AllOf': ['ax', 'jx']}, # doesn't match
|
||||
{'AllOf': ['bx', 'cx']}, # does match
|
||||
'bbb',
|
||||
]
|
||||
},
|
||||
],
|
||||
],
|
||||
])
|
||||
)
|
||||
def test_expression_is_satisfied(scopeset, expression):
|
||||
assert satisfiesExpression(scopeset, expression) is True
|
||||
|
||||
|
||||
@pytest.mark.parametrize('scopeset, expression', [
|
||||
[[], {'AnyOf': []}],
|
||||
[[], 'missing-scope'],
|
||||
[['wrong-scope'], 'missing-scope'],
|
||||
[['ghi'], {'AnyOf': ['abc', 'def']}],
|
||||
[['ghi*'], {'AnyOf': ['abc', 'def']}],
|
||||
[['ghi', 'fff'], {'AnyOf': ['abc', 'def']}],
|
||||
[['ghi*', 'fff*'], {'AnyOf': ['abc', 'def']}],
|
||||
[['abc'], {'AnyOf': ['ghi']}],
|
||||
[['abc*'], {'AllOf': ['abc', 'ghi']}],
|
||||
[[''], {'AnyOf': ['abc', 'def']}],
|
||||
[['abc:def'], {'AnyOf': ['abc', 'def']}],
|
||||
[['xyz', 'abc'], {'AllOf': [{'AnyOf': [{'AllOf': ['foo']}, {'AllOf': ['bar']}]}]}],
|
||||
[['a*', 'b*', 'c*'], {'AllOf': ['bx', 'cx', {'AnyOf': ['xxx', 'yyyy']}]}],
|
||||
])
|
||||
@pytest.mark.parametrize(
|
||||
'scopeset, expression',
|
||||
[
|
||||
[[], {'AnyOf': []}],
|
||||
[[], 'missing-scope'],
|
||||
[['wrong-scope'], 'missing-scope'],
|
||||
[['ghi'], {'AnyOf': ['abc', 'def']}],
|
||||
[['ghi*'], {'AnyOf': ['abc', 'def']}],
|
||||
[['ghi', 'fff'], {'AnyOf': ['abc', 'def']}],
|
||||
[['ghi*', 'fff*'], {'AnyOf': ['abc', 'def']}],
|
||||
[['abc'], {'AnyOf': ['ghi']}],
|
||||
[['abc*'], {'AllOf': ['abc', 'ghi']}],
|
||||
[[''], {'AnyOf': ['abc', 'def']}],
|
||||
[['abc:def'], {'AnyOf': ['abc', 'def']}],
|
||||
[['xyz', 'abc'], {'AllOf': [{'AnyOf': [{'AllOf': ['foo']}, {'AllOf': ['bar']}]}]}],
|
||||
[['a*', 'b*', 'c*'], {'AllOf': ['bx', 'cx', {'AnyOf': ['xxx', 'yyyy']}]}],
|
||||
],
|
||||
)
|
||||
def test_expression_is_not_satisfied(scopeset, expression):
|
||||
assert not satisfiesExpression(scopeset, expression)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('scopeset', [
|
||||
None,
|
||||
'scopeset_argument',
|
||||
('scopeset', 'argument'),
|
||||
{'scopeset', 'argument'},
|
||||
])
|
||||
@pytest.mark.parametrize(
|
||||
'scopeset', [None, 'scopeset_argument', ('scopeset', 'argument'), {'scopeset', 'argument'},]
|
||||
)
|
||||
def test_wrong_scopeset_type_raises_exception(scopeset):
|
||||
with pytest.raises(TypeError):
|
||||
satisfiesExpression(scopeset, 'in-tree:hook-action:{hook_group_id}/{hook_id}')
|
||||
|
@ -70,18 +74,16 @@ def test_identical_scope_and_pattern_are_matching():
|
|||
assert patternMatch('mock:scope', 'mock:scope') is True
|
||||
|
||||
|
||||
@pytest.mark.parametrize('pattern, scope', [
|
||||
('matching*', 'matching'),
|
||||
('matching*', 'matching/scope')
|
||||
])
|
||||
@pytest.mark.parametrize(
|
||||
'pattern, scope', [('matching*', 'matching'), ('matching*', 'matching/scope')]
|
||||
)
|
||||
def test_starred_patterns_are_matching(pattern, scope):
|
||||
assert patternMatch(pattern, scope) is True
|
||||
|
||||
|
||||
@pytest.mark.parametrize('pattern, scope', [
|
||||
('matching*', 'mismatching'),
|
||||
('match*ing', 'matching'),
|
||||
('*matching', 'matching')
|
||||
])
|
||||
@pytest.mark.parametrize(
|
||||
'pattern, scope',
|
||||
[('matching*', 'mismatching'), ('match*ing', 'matching'), ('*matching', 'matching')],
|
||||
)
|
||||
def test_starred_patterns_dont_matching(pattern, scope):
|
||||
assert not patternMatch(pattern, scope)
|
||||
|
|
|
@ -47,14 +47,18 @@ def test_post_no_auth():
|
|||
|
||||
# Auth Login and Logout Tests
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize(('id_token_sub', 'id_token_email', 'expected_username'), [
|
||||
('ad|Mozilla-LDAP|biped', 'biped@mozilla.com', 'mozilla-ldap/biped@mozilla.com'),
|
||||
('email', 'biped@mozilla.com', 'email/biped@mozilla.com'),
|
||||
('oauth2|biped', 'biped@mozilla.com', 'oauth2/biped@mozilla.com'),
|
||||
('github|0000', 'biped@gmail.com', 'github/biped@gmail.com'),
|
||||
('google-oauth2|0000', 'biped@mozilla.com', 'google/biped@mozilla.com'),
|
||||
])
|
||||
@pytest.mark.parametrize(
|
||||
('id_token_sub', 'id_token_email', 'expected_username'),
|
||||
[
|
||||
('ad|Mozilla-LDAP|biped', 'biped@mozilla.com', 'mozilla-ldap/biped@mozilla.com'),
|
||||
('email', 'biped@mozilla.com', 'email/biped@mozilla.com'),
|
||||
('oauth2|biped', 'biped@mozilla.com', 'oauth2/biped@mozilla.com'),
|
||||
('github|0000', 'biped@gmail.com', 'github/biped@gmail.com'),
|
||||
('google-oauth2|0000', 'biped@mozilla.com', 'google/biped@mozilla.com'),
|
||||
],
|
||||
)
|
||||
def test_login_logout_relogin(client, monkeypatch, id_token_sub, id_token_email, expected_username):
|
||||
"""
|
||||
Test that a new user is able to log in via a variety of identity providers,
|
||||
|
@ -79,7 +83,7 @@ def test_login_logout_relogin(client, monkeypatch, id_token_sub, id_token_email,
|
|||
reverse('auth-login'),
|
||||
HTTP_AUTHORIZATION='Bearer meh',
|
||||
HTTP_ID_TOKEN='meh',
|
||||
HTTP_ACCESS_TOKEN_EXPIRES_AT=str(access_token_expiration_timestamp)
|
||||
HTTP_ACCESS_TOKEN_EXPIRES_AT=str(access_token_expiration_timestamp),
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert resp.json() == {
|
||||
|
@ -110,7 +114,7 @@ def test_login_logout_relogin(client, monkeypatch, id_token_sub, id_token_email,
|
|||
reverse('auth-login'),
|
||||
HTTP_AUTHORIZATION='Bearer meh',
|
||||
HTTP_ID_TOKEN='meh',
|
||||
HTTP_ACCESS_TOKEN_EXPIRES_AT=str(access_token_expiration_timestamp)
|
||||
HTTP_ACCESS_TOKEN_EXPIRES_AT=str(access_token_expiration_timestamp),
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert resp.json()['username'] == expected_username
|
||||
|
@ -138,7 +142,7 @@ def test_login_same_email_different_provider(test_ldap_user, client, monkeypatch
|
|||
reverse('auth-login'),
|
||||
HTTP_AUTHORIZATION='Bearer meh',
|
||||
HTTP_ID_TOKEN='meh',
|
||||
HTTP_ACCESS_TOKEN_EXPIRES_AT=str(access_token_expiration_timestamp)
|
||||
HTTP_ACCESS_TOKEN_EXPIRES_AT=str(access_token_expiration_timestamp),
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert resp.json()['username'] == 'email/user@foo.com'
|
||||
|
@ -160,7 +164,7 @@ def test_login_unknown_identity_provider(client, monkeypatch):
|
|||
reverse("auth-login"),
|
||||
HTTP_AUTHORIZATION="Bearer meh",
|
||||
HTTP_ID_TOKEN="meh",
|
||||
HTTP_ACCESS_TOKEN_EXPIRES_AT=str(access_token_expiration_timestamp)
|
||||
HTTP_ACCESS_TOKEN_EXPIRES_AT=str(access_token_expiration_timestamp),
|
||||
)
|
||||
assert resp.status_code == 403
|
||||
assert resp.json()["detail"] == "Unrecognized identity"
|
||||
|
@ -174,7 +178,11 @@ def test_login_not_active(test_ldap_user, client, monkeypatch):
|
|||
access_token_expiration_timestamp = now_in_seconds + one_hour_in_seconds
|
||||
|
||||
def userinfo_mock(*args, **kwargs):
|
||||
return {'sub': 'Mozilla-LDAP', 'email': test_ldap_user.email, 'exp': id_token_expiration_timestamp}
|
||||
return {
|
||||
'sub': 'Mozilla-LDAP',
|
||||
'email': test_ldap_user.email,
|
||||
'exp': id_token_expiration_timestamp,
|
||||
}
|
||||
|
||||
monkeypatch.setattr(AuthBackend, '_get_user_info', userinfo_mock)
|
||||
|
||||
|
@ -185,7 +193,7 @@ def test_login_not_active(test_ldap_user, client, monkeypatch):
|
|||
reverse("auth-login"),
|
||||
HTTP_AUTHORIZATION="Bearer meh",
|
||||
HTTP_ID_TOKEN="meh",
|
||||
HTTP_ACCESS_TOKEN_EXPIRES_AT=str(access_token_expiration_timestamp)
|
||||
HTTP_ACCESS_TOKEN_EXPIRES_AT=str(access_token_expiration_timestamp),
|
||||
)
|
||||
assert resp.status_code == 403
|
||||
assert resp.json()["detail"] == "This user has been disabled."
|
||||
|
@ -197,35 +205,21 @@ def test_login_authorization_header_missing(client):
|
|||
assert resp.json()["detail"] == "Authorization header is expected"
|
||||
|
||||
|
||||
@pytest.mark.parametrize('auth_header_value', [
|
||||
'foo',
|
||||
'Bearer ',
|
||||
'Bearer foo bar',
|
||||
])
|
||||
@pytest.mark.parametrize('auth_header_value', ['foo', 'Bearer ', 'Bearer foo bar',])
|
||||
def test_login_authorization_header_malformed(client, auth_header_value):
|
||||
resp = client.get(
|
||||
reverse('auth-login'),
|
||||
HTTP_AUTHORIZATION=auth_header_value,
|
||||
)
|
||||
resp = client.get(reverse('auth-login'), HTTP_AUTHORIZATION=auth_header_value,)
|
||||
assert resp.status_code == 403
|
||||
assert resp.json()['detail'] == "Authorization header must be of form 'Bearer {token}'"
|
||||
|
||||
|
||||
def test_login_id_token_header_missing(client):
|
||||
resp = client.get(
|
||||
reverse('auth-login'),
|
||||
HTTP_AUTHORIZATION='Bearer abc',
|
||||
)
|
||||
resp = client.get(reverse('auth-login'), HTTP_AUTHORIZATION='Bearer abc',)
|
||||
assert resp.status_code == 403
|
||||
assert resp.json()['detail'] == 'Id-Token header is expected'
|
||||
|
||||
|
||||
def test_login_id_token_malformed(client):
|
||||
resp = client.get(
|
||||
reverse('auth-login'),
|
||||
HTTP_AUTHORIZATION='Bearer abc',
|
||||
HTTP_ID_TOKEN='aaa',
|
||||
)
|
||||
resp = client.get(reverse('auth-login'), HTTP_AUTHORIZATION='Bearer abc', HTTP_ID_TOKEN='aaa',)
|
||||
assert resp.status_code == 403
|
||||
assert resp.json()['detail'] == 'Unable to decode the Id token header'
|
||||
|
||||
|
@ -243,9 +237,9 @@ def test_login_id_token_missing_rsa_key_id(client):
|
|||
# "typ": "JWT"
|
||||
# }
|
||||
# (and default payload)
|
||||
'eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.' +
|
||||
'eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.' +
|
||||
'SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c'
|
||||
'eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.'
|
||||
+ 'eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.'
|
||||
+ 'SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c'
|
||||
),
|
||||
)
|
||||
assert resp.status_code == 403
|
||||
|
@ -266,9 +260,9 @@ def test_login_id_token_unknown_rsa_key_id(client):
|
|||
# "kid": "1234"
|
||||
# }
|
||||
# (and default payload)
|
||||
'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6IjEyMzQifQ.' +
|
||||
'eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.' +
|
||||
'Fghd96rsPbzEOGv0mMn4DDBf86PiW_ztPcAbDQoeA6s'
|
||||
'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6IjEyMzQifQ.'
|
||||
+ 'eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.'
|
||||
+ 'Fghd96rsPbzEOGv0mMn4DDBf86PiW_ztPcAbDQoeA6s'
|
||||
),
|
||||
)
|
||||
assert resp.status_code == 403
|
||||
|
@ -289,10 +283,10 @@ def test_login_id_token_invalid_signature(client):
|
|||
# "kid": "MkZDNDcyRkNGRTFDNjlBNjZFOEJBN0ZBNzJBQTNEMDhCMEEwNkFGOA"
|
||||
# }
|
||||
# (and default payload)
|
||||
'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6Ik1rWkRORGN5UmtOR1JURkROamxCTmp' +
|
||||
'aRk9FSkJOMFpCTnpKQlFUTkVNRGhDTUVFd05rRkdPQSJ9.' +
|
||||
'eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.' +
|
||||
'this_signature_is_not_valid'
|
||||
'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6Ik1rWkRORGN5UmtOR1JURkROamxCTmp'
|
||||
+ 'aRk9FSkJOMFpCTnpKQlFUTkVNRGhDTUVFd05rRkdPQSJ9.'
|
||||
+ 'eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.'
|
||||
+ 'this_signature_is_not_valid'
|
||||
),
|
||||
)
|
||||
assert resp.status_code == 403
|
||||
|
@ -308,11 +302,7 @@ def test_login_access_token_expiry_header_missing(client, monkeypatch):
|
|||
|
||||
monkeypatch.setattr(AuthBackend, '_get_user_info', userinfo_mock)
|
||||
|
||||
resp = client.get(
|
||||
reverse('auth-login'),
|
||||
HTTP_AUTHORIZATION='Bearer foo',
|
||||
HTTP_ID_TOKEN='bar',
|
||||
)
|
||||
resp = client.get(reverse('auth-login'), HTTP_AUTHORIZATION='Bearer foo', HTTP_ID_TOKEN='bar',)
|
||||
assert resp.status_code == 403
|
||||
assert resp.json()['detail'] == 'Access-Token-Expires-At header is expected'
|
||||
|
||||
|
@ -374,7 +364,7 @@ def test_login_id_token_expires_before_access_token(test_ldap_user, client, monk
|
|||
reverse('auth-login'),
|
||||
HTTP_AUTHORIZATION='Bearer meh',
|
||||
HTTP_ID_TOKEN='meh',
|
||||
HTTP_ACCESS_TOKEN_EXPIRES_AT=str(access_token_expiration_timestamp)
|
||||
HTTP_ACCESS_TOKEN_EXPIRES_AT=str(access_token_expiration_timestamp),
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert client.session.get_expiry_age() == pytest.approx(one_hour_in_seconds, abs=5)
|
||||
|
|
|
@ -3,16 +3,15 @@ import json
|
|||
import pytest
|
||||
from django.urls import reverse
|
||||
|
||||
from treeherder.model.models import (BugJobMap,
|
||||
Job)
|
||||
from treeherder.model.models import BugJobMap, Job
|
||||
|
||||
|
||||
@pytest.mark.parametrize('test_no_auth,test_duplicate_handling', [
|
||||
(True, False),
|
||||
(False, False),
|
||||
(False, True)])
|
||||
def test_create_bug_job_map(client, test_job, test_user, bugs,
|
||||
test_no_auth, test_duplicate_handling):
|
||||
@pytest.mark.parametrize(
|
||||
'test_no_auth,test_duplicate_handling', [(True, False), (False, False), (False, True)]
|
||||
)
|
||||
def test_create_bug_job_map(
|
||||
client, test_job, test_user, bugs, test_no_auth, test_duplicate_handling
|
||||
):
|
||||
"""
|
||||
test creating a single note via endpoint
|
||||
"""
|
||||
|
@ -20,11 +19,7 @@ def test_create_bug_job_map(client, test_job, test_user, bugs,
|
|||
if not test_no_auth:
|
||||
client.force_authenticate(user=test_user)
|
||||
|
||||
submit_obj = {
|
||||
u"job_id": test_job.id,
|
||||
u"bug_id": bug.id,
|
||||
u"type": u"manual"
|
||||
}
|
||||
submit_obj = {u"job_id": test_job.id, u"bug_id": bug.id, u"type": u"manual"}
|
||||
|
||||
# if testing duplicate handling, submit twice
|
||||
if test_duplicate_handling:
|
||||
|
@ -35,7 +30,7 @@ def test_create_bug_job_map(client, test_job, test_user, bugs,
|
|||
for _ in range(num_times):
|
||||
resp = client.post(
|
||||
reverse("bug-job-map-list", kwargs={"project": test_job.repository.name}),
|
||||
data=submit_obj
|
||||
data=submit_obj,
|
||||
)
|
||||
|
||||
if test_no_auth:
|
||||
|
@ -59,31 +54,28 @@ def test_bug_job_map_list(client, test_repository, eleven_jobs_stored, test_user
|
|||
expected = list()
|
||||
|
||||
for (i, job) in enumerate(jobs):
|
||||
bjm = BugJobMap.create(
|
||||
job_id=job.id,
|
||||
bug_id=bugs[i].id,
|
||||
user=test_user,
|
||||
)
|
||||
bjm = BugJobMap.create(job_id=job.id, bug_id=bugs[i].id, user=test_user,)
|
||||
|
||||
expected.append({
|
||||
"job_id": job.id,
|
||||
"bug_id": bugs[i].id,
|
||||
"created": bjm.created.isoformat(),
|
||||
"who": test_user.email
|
||||
})
|
||||
expected.append(
|
||||
{
|
||||
"job_id": job.id,
|
||||
"bug_id": bugs[i].id,
|
||||
"created": bjm.created.isoformat(),
|
||||
"who": test_user.email,
|
||||
}
|
||||
)
|
||||
|
||||
# verify that API works with different combinations of job_id= parameters
|
||||
for job_range in [(0, 1), (0, 2), (0, 9)]:
|
||||
resp = client.get(
|
||||
reverse("bug-job-map-list", kwargs={"project": test_repository.name}),
|
||||
data={'job_id': [job.id for job in
|
||||
jobs[job_range[0]:job_range[1]]]})
|
||||
data={'job_id': [job.id for job in jobs[job_range[0] : job_range[1]]]},
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert resp.json() == expected[job_range[0]:job_range[1]]
|
||||
assert resp.json() == expected[job_range[0] : job_range[1]]
|
||||
|
||||
|
||||
def test_bug_job_map_detail(client, eleven_jobs_stored, test_repository,
|
||||
test_user, bugs):
|
||||
def test_bug_job_map_detail(client, eleven_jobs_stored, test_repository, test_user, bugs):
|
||||
"""
|
||||
test retrieving a list of bug_job_map
|
||||
"""
|
||||
|
@ -91,19 +83,12 @@ def test_bug_job_map_detail(client, eleven_jobs_stored, test_repository,
|
|||
bug = bugs[0]
|
||||
expected = list()
|
||||
|
||||
bjm = BugJobMap.create(
|
||||
job_id=job.id,
|
||||
bug_id=bug.id,
|
||||
user=test_user,
|
||||
)
|
||||
bjm = BugJobMap.create(job_id=job.id, bug_id=bug.id, user=test_user,)
|
||||
|
||||
pk = "{0}-{1}".format(job.id, bug.id)
|
||||
|
||||
resp = client.get(
|
||||
reverse("bug-job-map-detail", kwargs={
|
||||
"project": test_repository.name,
|
||||
"pk": pk
|
||||
})
|
||||
reverse("bug-job-map-detail", kwargs={"project": test_repository.name, "pk": pk})
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
|
||||
|
@ -111,14 +96,15 @@ def test_bug_job_map_detail(client, eleven_jobs_stored, test_repository,
|
|||
"job_id": job.id,
|
||||
"bug_id": bug.id,
|
||||
"created": bjm.created.isoformat(),
|
||||
"who": test_user.email
|
||||
"who": test_user.email,
|
||||
}
|
||||
assert resp.json() == expected
|
||||
|
||||
|
||||
@pytest.mark.parametrize('test_no_auth', [True, False])
|
||||
def test_bug_job_map_delete(client, eleven_jobs_stored, test_repository,
|
||||
test_user, test_no_auth, bugs):
|
||||
def test_bug_job_map_delete(
|
||||
client, eleven_jobs_stored, test_repository, test_user, test_no_auth, bugs
|
||||
):
|
||||
"""
|
||||
test deleting a bug_job_map object
|
||||
"""
|
||||
|
@ -126,9 +112,7 @@ def test_bug_job_map_delete(client, eleven_jobs_stored, test_repository,
|
|||
bug = bugs[0]
|
||||
|
||||
BugJobMap.create(
|
||||
job_id=job.id,
|
||||
bug_id=bug.id,
|
||||
user=test_user,
|
||||
job_id=job.id, bug_id=bug.id, user=test_user,
|
||||
)
|
||||
|
||||
if not test_no_auth:
|
||||
|
@ -137,10 +121,7 @@ def test_bug_job_map_delete(client, eleven_jobs_stored, test_repository,
|
|||
pk = "{0}-{1}".format(job.id, bug.id)
|
||||
|
||||
resp = client.delete(
|
||||
reverse("bug-job-map-detail", kwargs={
|
||||
"project": test_repository.name,
|
||||
"pk": pk
|
||||
})
|
||||
reverse("bug-job-map-detail", kwargs={"project": test_repository.name, "pk": pk})
|
||||
)
|
||||
|
||||
if test_no_auth:
|
||||
|
@ -160,7 +141,7 @@ def test_bug_job_map_bad_job_id(client, test_repository):
|
|||
|
||||
resp = client.get(
|
||||
reverse("bug-job-map-list", kwargs={"project": test_repository.name}),
|
||||
data={'job_id': bad_job_id}
|
||||
data={'job_id': bad_job_id},
|
||||
)
|
||||
|
||||
assert resp.status_code == 400
|
||||
|
|
|
@ -18,18 +18,22 @@ def test_create_bug(client, eleven_jobs_stored, activate_responses, test_user):
|
|||
assert requestheaders['x-bugzilla-api-key'] == "12345helloworld"
|
||||
assert requestdata['type'] == "defect"
|
||||
assert requestdata['product'] == "Bugzilla"
|
||||
assert requestdata['description'] == u"**Filed by:** {}\nIntermittent Description".format(test_user.email.replace('@', " [at] "))
|
||||
assert requestdata['description'] == u"**Filed by:** {}\nIntermittent Description".format(
|
||||
test_user.email.replace('@', " [at] ")
|
||||
)
|
||||
assert requestdata['component'] == "Administration"
|
||||
assert requestdata['summary'] == u"Intermittent summary"
|
||||
assert requestdata['comment_tags'] == "treeherder"
|
||||
assert requestdata['version'] == "4.0.17"
|
||||
assert requestdata['keywords'] == ["intermittent-failure"]
|
||||
resp_body = {"id": 323}
|
||||
return(200, headers, json.dumps(resp_body))
|
||||
return (200, headers, json.dumps(resp_body))
|
||||
|
||||
responses.add_callback(
|
||||
responses.POST, "https://thisisnotbugzilla.org/rest/bug",
|
||||
callback=request_callback, match_querystring=False,
|
||||
responses.POST,
|
||||
"https://thisisnotbugzilla.org/rest/bug",
|
||||
callback=request_callback,
|
||||
match_querystring=False,
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
|
@ -46,7 +50,7 @@ def test_create_bug(client, eleven_jobs_stored, activate_responses, test_user):
|
|||
"comment": u"Intermittent Description",
|
||||
"comment_tags": "treeherder",
|
||||
"keywords": ["intermittent-failure"],
|
||||
}
|
||||
},
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert resp.json()['success'] == 323
|
||||
|
@ -64,18 +68,24 @@ def test_create_bug_with_unicode(client, eleven_jobs_stored, activate_responses,
|
|||
assert requestheaders['x-bugzilla-api-key'] == "12345helloworld"
|
||||
assert requestdata['type'] == "defect"
|
||||
assert requestdata['product'] == "Bugzilla"
|
||||
assert requestdata['description'] == u"**Filed by:** {}\nIntermittent “description” string".format(test_user.email.replace('@', " [at] "))
|
||||
assert requestdata[
|
||||
'description'
|
||||
] == u"**Filed by:** {}\nIntermittent “description” string".format(
|
||||
test_user.email.replace('@', " [at] ")
|
||||
)
|
||||
assert requestdata['component'] == "Administration"
|
||||
assert requestdata['summary'] == u"Intermittent “summary”"
|
||||
assert requestdata['comment_tags'] == "treeherder"
|
||||
assert requestdata['version'] == "4.0.17"
|
||||
assert requestdata['keywords'] == ["intermittent-failure"]
|
||||
resp_body = {"id": 323}
|
||||
return(200, headers, json.dumps(resp_body))
|
||||
return (200, headers, json.dumps(resp_body))
|
||||
|
||||
responses.add_callback(
|
||||
responses.POST, "https://thisisnotbugzilla.org/rest/bug",
|
||||
callback=request_callback, match_querystring=False,
|
||||
responses.POST,
|
||||
"https://thisisnotbugzilla.org/rest/bug",
|
||||
callback=request_callback,
|
||||
match_querystring=False,
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
|
@ -92,7 +102,7 @@ def test_create_bug_with_unicode(client, eleven_jobs_stored, activate_responses,
|
|||
"comment": u"Intermittent “description” string",
|
||||
"comment_tags": "treeherder",
|
||||
"keywords": ["intermittent-failure"],
|
||||
}
|
||||
},
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert resp.json()['success'] == 323
|
||||
|
@ -110,7 +120,9 @@ def test_create_crash_bug(client, eleven_jobs_stored, activate_responses, test_u
|
|||
assert requestheaders['x-bugzilla-api-key'] == "12345helloworld"
|
||||
assert requestdata['type'] == "defect"
|
||||
assert requestdata['product'] == "Bugzilla"
|
||||
assert requestdata['description'] == u"**Filed by:** {}\nIntermittent Description".format(test_user.email.replace('@', " [at] "))
|
||||
assert requestdata['description'] == u"**Filed by:** {}\nIntermittent Description".format(
|
||||
test_user.email.replace('@', " [at] ")
|
||||
)
|
||||
assert requestdata['component'] == "Administration"
|
||||
assert requestdata['summary'] == u"Intermittent summary"
|
||||
assert requestdata['comment_tags'] == "treeherder"
|
||||
|
@ -119,11 +131,13 @@ def test_create_crash_bug(client, eleven_jobs_stored, activate_responses, test_u
|
|||
assert requestdata['cf_crash_signature'] == "[@crashsig]"
|
||||
assert requestdata['priority'] == '--'
|
||||
resp_body = {"id": 323}
|
||||
return(200, headers, json.dumps(resp_body))
|
||||
return (200, headers, json.dumps(resp_body))
|
||||
|
||||
responses.add_callback(
|
||||
responses.POST, "https://thisisnotbugzilla.org/rest/bug",
|
||||
callback=request_callback, match_querystring=False,
|
||||
responses.POST,
|
||||
"https://thisisnotbugzilla.org/rest/bug",
|
||||
callback=request_callback,
|
||||
match_querystring=False,
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
|
@ -142,7 +156,7 @@ def test_create_crash_bug(client, eleven_jobs_stored, activate_responses, test_u
|
|||
"crash_signature": "[@crashsig]",
|
||||
"priority": "--",
|
||||
"keywords": ["intermittent-failure", "crash"],
|
||||
}
|
||||
},
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert resp.json()['success'] == 323
|
||||
|
@ -170,11 +184,13 @@ def test_create_unauthenticated_bug(client, eleven_jobs_stored, activate_respons
|
|||
assert requestdata['blocks'] == "1234"
|
||||
assert requestdata['see_also'] == "12345"
|
||||
resp_body = {"id": 323}
|
||||
return(200, headers, json.dumps(resp_body))
|
||||
return (200, headers, json.dumps(resp_body))
|
||||
|
||||
responses.add_callback(
|
||||
responses.POST, "https://thisisnotbugzilla.org/rest/bug",
|
||||
callback=request_callback, match_querystring=False,
|
||||
responses.POST,
|
||||
"https://thisisnotbugzilla.org/rest/bug",
|
||||
callback=request_callback,
|
||||
match_querystring=False,
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
|
@ -192,13 +208,15 @@ def test_create_unauthenticated_bug(client, eleven_jobs_stored, activate_respons
|
|||
"depends_on": "123",
|
||||
"blocks": "1234",
|
||||
"see_also": "12345",
|
||||
}
|
||||
},
|
||||
)
|
||||
assert resp.status_code == 403
|
||||
assert resp.json()['detail'] == "Authentication credentials were not provided."
|
||||
|
||||
|
||||
def test_create_bug_with_long_crash_signature(client, eleven_jobs_stored, activate_responses, test_user):
|
||||
def test_create_bug_with_long_crash_signature(
|
||||
client, eleven_jobs_stored, activate_responses, test_user
|
||||
):
|
||||
"""
|
||||
test successfully creating a bug in bugzilla
|
||||
"""
|
||||
|
@ -221,11 +239,13 @@ def test_create_bug_with_long_crash_signature(client, eleven_jobs_stored, activa
|
|||
assert requestdata['blocks'] == "1234"
|
||||
assert requestdata['see_also'] == "12345"
|
||||
resp_body = {"id": 323}
|
||||
return(200, headers, json.dumps(resp_body))
|
||||
return (200, headers, json.dumps(resp_body))
|
||||
|
||||
responses.add_callback(
|
||||
responses.POST, "https://thisisnotbugzilla.org/rest/bug",
|
||||
callback=request_callback, match_querystring=False,
|
||||
responses.POST,
|
||||
"https://thisisnotbugzilla.org/rest/bug",
|
||||
callback=request_callback,
|
||||
match_querystring=False,
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
|
@ -247,7 +267,7 @@ def test_create_bug_with_long_crash_signature(client, eleven_jobs_stored, activa
|
|||
"depends_on": "123",
|
||||
"blocks": "1234",
|
||||
"see_also": "12345",
|
||||
}
|
||||
},
|
||||
)
|
||||
assert resp.status_code == 400
|
||||
assert resp.json()['failure'] == "Crash signature can't be more than 2048 characters."
|
||||
|
|
|
@ -3,8 +3,7 @@ from datetime import datetime
|
|||
from django.db import transaction
|
||||
from django.urls import reverse
|
||||
|
||||
from treeherder.changelog.models import (Changelog,
|
||||
ChangelogFile)
|
||||
from treeherder.changelog.models import Changelog, ChangelogFile
|
||||
|
||||
|
||||
def test_changelog_list(client, test_job_with_notes):
|
||||
|
|
|
@ -11,13 +11,11 @@ def test_valid_report(client):
|
|||
'document-uri': 'http://localhost:8000/',
|
||||
'original-policy': '...',
|
||||
'referrer': '',
|
||||
'violated-directive': 'connect-src'
|
||||
'violated-directive': 'connect-src',
|
||||
}
|
||||
}
|
||||
response = client.post(
|
||||
reverse('csp-report'),
|
||||
data=json.dumps(valid_report),
|
||||
content_type='application/csp-report',
|
||||
reverse('csp-report'), data=json.dumps(valid_report), content_type='application/csp-report',
|
||||
)
|
||||
assert response.status_code == 200
|
||||
|
||||
|
|
|
@ -4,10 +4,7 @@ from treeherder.model.models import BugJobMap
|
|||
|
||||
|
||||
def test_failures(bug_data, client):
|
||||
expected = [{
|
||||
'bug_count': 1,
|
||||
'bug_id': bug_data['bug_id']
|
||||
}]
|
||||
expected = [{'bug_count': 1, 'bug_id': bug_data['bug_id']}]
|
||||
|
||||
resp = client.get(reverse('failures') + bug_data['query_string'])
|
||||
assert resp.status_code == 200
|
||||
|
@ -15,21 +12,24 @@ def test_failures(bug_data, client):
|
|||
|
||||
|
||||
def test_failures_by_bug(bug_data, client):
|
||||
expected = [{
|
||||
'bug_id': bug_data['bug_id'],
|
||||
'build_type': bug_data['option'].name,
|
||||
'job_id': bug_data['job'].id,
|
||||
'push_time': bug_data['job'].push.time.strftime('%Y-%m-%d %H:%M:%S'),
|
||||
'platform': bug_data['job'].machine_platform.platform,
|
||||
'revision': bug_data['job'].push.revision,
|
||||
'test_suite': bug_data['job'].signature.job_type_name,
|
||||
'tree': bug_data['job'].repository.name,
|
||||
'machine_name': bug_data['job'].machine.name,
|
||||
'lines': []
|
||||
}]
|
||||
expected = [
|
||||
{
|
||||
'bug_id': bug_data['bug_id'],
|
||||
'build_type': bug_data['option'].name,
|
||||
'job_id': bug_data['job'].id,
|
||||
'push_time': bug_data['job'].push.time.strftime('%Y-%m-%d %H:%M:%S'),
|
||||
'platform': bug_data['job'].machine_platform.platform,
|
||||
'revision': bug_data['job'].push.revision,
|
||||
'test_suite': bug_data['job'].signature.job_type_name,
|
||||
'tree': bug_data['job'].repository.name,
|
||||
'machine_name': bug_data['job'].machine.name,
|
||||
'lines': [],
|
||||
}
|
||||
]
|
||||
|
||||
resp = client.get(reverse('failures-by-bug') + bug_data['query_string'] + '&bug={}'.format(
|
||||
bug_data['bug_id']))
|
||||
resp = client.get(
|
||||
reverse('failures-by-bug') + bug_data['query_string'] + '&bug={}'.format(bug_data['bug_id'])
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert resp.json() == expected
|
||||
|
||||
|
@ -39,19 +39,22 @@ def test_failure_count_by_bug(bug_data, client, test_run_data):
|
|||
bugs = list(BugJobMap.objects.all())
|
||||
|
||||
for bug in bugs:
|
||||
if (bug.job.repository.name == bug_data['tree'] and
|
||||
bug.bug_id == bug_data['bug_id'] and
|
||||
bug.job.push.time.strftime('%Y-%m-%d') == test_run_data['push_time']):
|
||||
if (
|
||||
bug.job.repository.name == bug_data['tree']
|
||||
and bug.bug_id == bug_data['bug_id']
|
||||
and bug.job.push.time.strftime('%Y-%m-%d') == test_run_data['push_time']
|
||||
):
|
||||
failure_count += 1
|
||||
|
||||
expected = {
|
||||
'date': test_run_data['push_time'],
|
||||
'test_runs': test_run_data['test_runs'],
|
||||
'failure_count': failure_count,
|
||||
'date': test_run_data['push_time'],
|
||||
'test_runs': test_run_data['test_runs'],
|
||||
'failure_count': failure_count,
|
||||
}
|
||||
|
||||
resp = client.get(reverse('failure-count') + bug_data['query_string'] + '&bug={}'.format(
|
||||
bug_data['bug_id']))
|
||||
resp = client.get(
|
||||
reverse('failure-count') + bug_data['query_string'] + '&bug={}'.format(bug_data['bug_id'])
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert resp.json()[0] == expected
|
||||
|
||||
|
@ -60,9 +63,11 @@ def test_failure_count(bug_data, client, test_run_data):
|
|||
failure_count = 0
|
||||
|
||||
for job in list(bug_data['jobs']):
|
||||
if (job.repository.name == bug_data['tree'] and
|
||||
job.failure_classification_id == 4 and
|
||||
job.push.time.strftime('%Y-%m-%d') == test_run_data['push_time']):
|
||||
if (
|
||||
job.repository.name == bug_data['tree']
|
||||
and job.failure_classification_id == 4
|
||||
and job.push.time.strftime('%Y-%m-%d') == test_run_data['push_time']
|
||||
):
|
||||
failure_count += 1
|
||||
|
||||
expected = {
|
||||
|
|
|
@ -1,29 +1,16 @@
|
|||
from django.urls import reverse
|
||||
|
||||
from tests.test_utils import create_generic_job
|
||||
from treeherder.model.models import (Job,
|
||||
JobDetail,
|
||||
Repository)
|
||||
from treeherder.model.models import Job, JobDetail, Repository
|
||||
|
||||
|
||||
def test_job_details(test_repository, failure_classifications,
|
||||
generic_reference_data, push_stored, client):
|
||||
def test_job_details(
|
||||
test_repository, failure_classifications, generic_reference_data, push_stored, client
|
||||
):
|
||||
details = {
|
||||
'abcd': {
|
||||
'title': 'title',
|
||||
'value': 'value1',
|
||||
'url': None
|
||||
},
|
||||
'efgh': {
|
||||
'title': None,
|
||||
'value': 'value2',
|
||||
'url': None
|
||||
},
|
||||
'ijkl': {
|
||||
'title': 'title3',
|
||||
'value': 'value3',
|
||||
'url': 'https://localhost/foo'
|
||||
}
|
||||
'abcd': {'title': 'title', 'value': 'value1', 'url': None},
|
||||
'efgh': {'title': None, 'value': 'value2', 'url': None},
|
||||
'ijkl': {'title': 'title3', 'value': 'value3', 'url': 'https://localhost/foo'},
|
||||
}
|
||||
|
||||
# create some job details for some fake jobs
|
||||
|
@ -32,7 +19,8 @@ def test_job_details(test_repository, failure_classifications,
|
|||
name=test_repository.name + '_2',
|
||||
dvcs_type=test_repository.dvcs_type,
|
||||
url=test_repository.url + '_2',
|
||||
codebase=test_repository.codebase)
|
||||
codebase=test_repository.codebase,
|
||||
)
|
||||
|
||||
i = 1
|
||||
for (job_guid, params) in details.items():
|
||||
|
@ -45,10 +33,8 @@ def test_job_details(test_repository, failure_classifications,
|
|||
push_id = 2
|
||||
i = 1
|
||||
print(i, repository)
|
||||
job = create_generic_job(job_guid, repository, push_id,
|
||||
generic_reference_data)
|
||||
JobDetail.objects.create(
|
||||
job=job, **params)
|
||||
job = create_generic_job(job_guid, repository, push_id, generic_reference_data)
|
||||
JobDetail.objects.create(job=job, **params)
|
||||
i += 1
|
||||
print(JobDetail.objects.filter(job__guid='abcd'))
|
||||
|
||||
|
@ -59,8 +45,7 @@ def test_job_details(test_repository, failure_classifications,
|
|||
# filter to just get one guid at a time
|
||||
for guid_identifier in ['job_guid', 'job__guid']:
|
||||
for (guid, detail) in details.items():
|
||||
resp = client.get(reverse('jobdetail-list') + '?{}={}'.format(
|
||||
guid_identifier, guid))
|
||||
resp = client.get(reverse('jobdetail-list') + '?{}={}'.format(guid_identifier, guid))
|
||||
assert resp.status_code == 200
|
||||
assert len(resp.json()['results']) == 1
|
||||
result = resp.json()['results'][0]
|
||||
|
@ -69,68 +54,59 @@ def test_job_details(test_repository, failure_classifications,
|
|||
assert result == detail
|
||||
|
||||
# filter to get first with (just) job_id
|
||||
resp = client.get(reverse('jobdetail-list') +
|
||||
'?job_id=1')
|
||||
resp = client.get(reverse('jobdetail-list') + '?job_id=1')
|
||||
assert resp.status_code == 200
|
||||
assert len(resp.json()['results']) == 1
|
||||
assert set([v['job_guid'] for v in resp.json()['results']]) == set(
|
||||
['abcd'])
|
||||
assert set([v['job_guid'] for v in resp.json()['results']]) == set(['abcd'])
|
||||
|
||||
# filter to get the first and second with job_id__in and repository
|
||||
resp = client.get(reverse('jobdetail-list') +
|
||||
'?repository={}&job_id__in=1,2'.format(
|
||||
test_repository.name))
|
||||
resp = client.get(
|
||||
reverse('jobdetail-list') + '?repository={}&job_id__in=1,2'.format(test_repository.name)
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert len(resp.json()['results']) == 2
|
||||
assert set([v['job_guid'] for v in resp.json()['results']]) == set(
|
||||
['abcd', 'efgh'])
|
||||
assert set([v['job_guid'] for v in resp.json()['results']]) == set(['abcd', 'efgh'])
|
||||
|
||||
# filter to get the last element with job_id__in and repository
|
||||
resp = client.get(reverse('jobdetail-list') +
|
||||
'?repository={}&job_id__in=3'.format(
|
||||
test_repository2.name))
|
||||
resp = client.get(
|
||||
reverse('jobdetail-list') + '?repository={}&job_id__in=3'.format(test_repository2.name)
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert len(resp.json()['results']) == 1
|
||||
assert set([v['job_guid'] for v in resp.json()['results']]) == set(
|
||||
['ijkl'])
|
||||
assert set([v['job_guid'] for v in resp.json()['results']]) == set(['ijkl'])
|
||||
|
||||
# make sure that filtering by repository with a job id in
|
||||
# a different repository returns no results
|
||||
resp = client.get(reverse('jobdetail-list') +
|
||||
'?repository={}&job_id__in=3'.format(
|
||||
test_repository.name))
|
||||
resp = client.get(
|
||||
reverse('jobdetail-list') + '?repository={}&job_id__in=3'.format(test_repository.name)
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert resp.json()['results'] == []
|
||||
|
||||
# add an extra one, but filter to just get those with a specific title.
|
||||
# we should only have one
|
||||
JobDetail.objects.create(title='title2', job=Job.objects.get(guid='abcd'),
|
||||
value='foo')
|
||||
resp = client.get(reverse('jobdetail-list') +
|
||||
'?title=title&job_guid=abcd')
|
||||
JobDetail.objects.create(title='title2', job=Job.objects.get(guid='abcd'), value='foo')
|
||||
resp = client.get(reverse('jobdetail-list') + '?title=title&job_guid=abcd')
|
||||
assert resp.status_code == 200
|
||||
assert len(resp.json()['results']) == 1
|
||||
assert set([v['job_guid'] for v in resp.json()['results']]) == set(['abcd'])
|
||||
|
||||
# should also be able to filter by value
|
||||
resp = client.get(reverse('jobdetail-list') +
|
||||
'?value=value1&job_guid=abcd')
|
||||
resp = client.get(reverse('jobdetail-list') + '?value=value1&job_guid=abcd')
|
||||
assert resp.status_code == 200
|
||||
assert resp.json()['results'] == [{
|
||||
'job_guid': 'abcd',
|
||||
'job_id': 1,
|
||||
'title': 'title',
|
||||
'url': None,
|
||||
'value': 'value1'
|
||||
}]
|
||||
assert resp.json()['results'] == [
|
||||
{'job_guid': 'abcd', 'job_id': 1, 'title': 'title', 'url': None, 'value': 'value1'}
|
||||
]
|
||||
|
||||
# Should be able to filter by push_id
|
||||
resp = client.get(reverse('jobdetail-list') + '?push_id=2')
|
||||
assert resp.status_code == 200
|
||||
assert resp.json()['results'] == [{
|
||||
'job_guid': 'ijkl',
|
||||
'job_id': 3,
|
||||
'title': 'title3',
|
||||
'url': 'https://localhost/foo',
|
||||
'value': 'value3'
|
||||
}]
|
||||
assert resp.json()['results'] == [
|
||||
{
|
||||
'job_guid': 'ijkl',
|
||||
'job_id': 3,
|
||||
'title': 'title3',
|
||||
'url': 'https://localhost/foo',
|
||||
'value': 'value3',
|
||||
}
|
||||
]
|
||||
|
|
|
@ -4,35 +4,26 @@ from tests.test_utils import create_generic_job
|
|||
from treeherder.model.models import JobLog
|
||||
|
||||
|
||||
def test_get_job_log_urls(test_repository, push_stored,
|
||||
failure_classifications,
|
||||
generic_reference_data, client):
|
||||
job1 = create_generic_job('1234', test_repository, 1,
|
||||
generic_reference_data)
|
||||
job2 = create_generic_job('5678', test_repository, 1,
|
||||
generic_reference_data)
|
||||
def test_get_job_log_urls(
|
||||
test_repository, push_stored, failure_classifications, generic_reference_data, client
|
||||
):
|
||||
job1 = create_generic_job('1234', test_repository, 1, generic_reference_data)
|
||||
job2 = create_generic_job('5678', test_repository, 1, generic_reference_data)
|
||||
|
||||
JobLog.objects.create(job=job1,
|
||||
name='test_log_1',
|
||||
url='http://google.com',
|
||||
status=JobLog.PENDING)
|
||||
JobLog.objects.create(job=job1,
|
||||
name='test_log_2',
|
||||
url='http://yahoo.com',
|
||||
status=JobLog.PARSED)
|
||||
JobLog.objects.create(job=job2,
|
||||
name='test_log_3',
|
||||
url='http://yahoo.com',
|
||||
status=JobLog.PARSED)
|
||||
JobLog.objects.create(
|
||||
job=job1, name='test_log_1', url='http://google.com', status=JobLog.PENDING
|
||||
)
|
||||
JobLog.objects.create(job=job1, name='test_log_2', url='http://yahoo.com', status=JobLog.PARSED)
|
||||
JobLog.objects.create(job=job2, name='test_log_3', url='http://yahoo.com', status=JobLog.PARSED)
|
||||
|
||||
resp = client.get(reverse('job-log-url-list',
|
||||
kwargs={"project": test_repository.name}) +
|
||||
'?job_id=1')
|
||||
resp = client.get(
|
||||
reverse('job-log-url-list', kwargs={"project": test_repository.name}) + '?job_id=1'
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert len(resp.json()) == 2
|
||||
|
||||
resp = client.get(reverse('job-log-url-list',
|
||||
kwargs={"project": test_repository.name}) +
|
||||
'?job_id=1&job_id=2')
|
||||
resp = client.get(
|
||||
reverse('job-log-url-list', kwargs={"project": test_repository.name}) + '?job_id=1&job_id=2'
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert len(resp.json()) == 3
|
||||
|
|
|
@ -5,27 +5,22 @@ from dateutil import parser
|
|||
from django.urls import reverse
|
||||
from rest_framework.status import HTTP_400_BAD_REQUEST
|
||||
|
||||
from treeherder.model.models import (Job,
|
||||
TextLogError,
|
||||
TextLogStep)
|
||||
from treeherder.model.models import Job, TextLogError, TextLogStep
|
||||
|
||||
|
||||
@pytest.mark.parametrize(('offset', 'count', 'expected_num'),
|
||||
[(None, None, 10),
|
||||
(None, 5, 5),
|
||||
(5, None, 6),
|
||||
(0, 5, 5),
|
||||
(10, 10, 1)])
|
||||
def test_job_list(client, eleven_jobs_stored, test_repository,
|
||||
offset, count, expected_num):
|
||||
@pytest.mark.parametrize(
|
||||
('offset', 'count', 'expected_num'),
|
||||
[(None, None, 10), (None, 5, 5), (5, None, 6), (0, 5, 5), (10, 10, 1)],
|
||||
)
|
||||
def test_job_list(client, eleven_jobs_stored, test_repository, offset, count, expected_num):
|
||||
"""
|
||||
test retrieving a list of json blobs from the jobs-list
|
||||
endpoint.
|
||||
"""
|
||||
url = reverse("jobs-list",
|
||||
kwargs={"project": test_repository.name})
|
||||
params = '&'.join(['{}={}'.format(k, v) for k, v in
|
||||
[('offset', offset), ('count', count)] if v])
|
||||
url = reverse("jobs-list", kwargs={"project": test_repository.name})
|
||||
params = '&'.join(
|
||||
['{}={}'.format(k, v) for k, v in [('offset', offset), ('count', count)] if v]
|
||||
)
|
||||
if params:
|
||||
url += '?{}'.format(params)
|
||||
resp = client.get(url)
|
||||
|
@ -71,7 +66,7 @@ def test_job_list(client, eleven_jobs_stored, test_repository,
|
|||
"ref_data_name",
|
||||
"signature",
|
||||
"task_id",
|
||||
"retry_id"
|
||||
"retry_id",
|
||||
]
|
||||
for job in jobs:
|
||||
assert set(job.keys()) == set(exp_keys)
|
||||
|
@ -81,8 +76,7 @@ def test_job_list_bad_project(client, transactional_db):
|
|||
"""
|
||||
test retrieving a job list with a bad project throws 404.
|
||||
"""
|
||||
badurl = reverse("jobs-list",
|
||||
kwargs={"project": "badproject"})
|
||||
badurl = reverse("jobs-list", kwargs={"project": "badproject"})
|
||||
|
||||
resp = client.get(badurl)
|
||||
assert resp.status_code == 404
|
||||
|
@ -92,8 +86,7 @@ def test_job_list_equals_filter(client, eleven_jobs_stored, test_repository):
|
|||
"""
|
||||
test retrieving a job list with a querystring filter.
|
||||
"""
|
||||
url = reverse("jobs-list",
|
||||
kwargs={"project": test_repository.name})
|
||||
url = reverse("jobs-list", kwargs={"project": test_repository.name})
|
||||
final_url = url + "?job_guid=f1c75261017c7c5ce3000931dce4c442fe0a1297"
|
||||
|
||||
resp = client.get(final_url)
|
||||
|
@ -123,7 +116,10 @@ job_filter_values = [
|
|||
(u'option_collection_hash', u'32faaecac742100f7753f0c1d0aa0add01b4046b'),
|
||||
(u'platform', u'osx-10-7'),
|
||||
(u'reason', u'scheduler'),
|
||||
(u'ref_data_name', u'Rev4 MacOSX Lion 10.7 mozilla-release debug test mochitest-browser-chrome'),
|
||||
(
|
||||
u'ref_data_name',
|
||||
u'Rev4 MacOSX Lion 10.7 mozilla-release debug test mochitest-browser-chrome',
|
||||
),
|
||||
(u'result', u'success'),
|
||||
(u'result_set_id', 4),
|
||||
(u'signature', u'aebe9066ff1c765815ec0513a3389238c80ef166'),
|
||||
|
@ -131,7 +127,7 @@ job_filter_values = [
|
|||
(u'state', u'completed'),
|
||||
(u'submit_timestamp', 1384356854),
|
||||
(u'tier', 1),
|
||||
(u'who', u'tests-mozilla-release-lion-debug-unittest')
|
||||
(u'who', u'tests-mozilla-release-lion-debug-unittest'),
|
||||
]
|
||||
|
||||
|
||||
|
@ -146,8 +142,7 @@ def test_job_list_filter_fields(client, eleven_jobs_stored, test_repository, fie
|
|||
The field of ``last_modified`` is auto-generated, so just skipping that
|
||||
to make this test easy.
|
||||
"""
|
||||
url = reverse("jobs-list",
|
||||
kwargs={"project": test_repository.name})
|
||||
url = reverse("jobs-list", kwargs={"project": test_repository.name})
|
||||
final_url = url + "?{}={}".format(fieldname, expected)
|
||||
resp = client.get(final_url)
|
||||
assert resp.status_code == 200
|
||||
|
@ -159,11 +154,12 @@ def test_job_list_in_filter(client, eleven_jobs_stored, test_repository):
|
|||
"""
|
||||
test retrieving a job list with a querystring filter.
|
||||
"""
|
||||
url = reverse("jobs-list",
|
||||
kwargs={"project": test_repository.name})
|
||||
final_url = url + ("?job_guid__in="
|
||||
"f1c75261017c7c5ce3000931dce4c442fe0a1297,"
|
||||
"9abb6f7d54a49d763c584926377f09835c5e1a32")
|
||||
url = reverse("jobs-list", kwargs={"project": test_repository.name})
|
||||
final_url = url + (
|
||||
"?job_guid__in="
|
||||
"f1c75261017c7c5ce3000931dce4c442fe0a1297,"
|
||||
"9abb6f7d54a49d763c584926377f09835c5e1a32"
|
||||
)
|
||||
|
||||
resp = client.get(final_url)
|
||||
assert resp.status_code == 200
|
||||
|
@ -176,23 +172,19 @@ def test_job_detail(client, test_job):
|
|||
endpoint.
|
||||
"""
|
||||
resp = client.get(
|
||||
reverse("jobs-detail",
|
||||
kwargs={"project": test_job.repository.name,
|
||||
"pk": test_job.id})
|
||||
reverse("jobs-detail", kwargs={"project": test_job.repository.name, "pk": test_job.id})
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert isinstance(resp.json(), dict)
|
||||
assert resp.json()["id"] == test_job.id
|
||||
|
||||
resp = client.get(
|
||||
reverse("jobs-detail",
|
||||
kwargs={"project": test_job.repository.name,
|
||||
"pk": test_job.id})
|
||||
reverse("jobs-detail", kwargs={"project": test_job.repository.name, "pk": test_job.id})
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert resp.json()["taskcluster_metadata"] == {
|
||||
"task_id": 'V3SVuxO8TFy37En_6HcXLs',
|
||||
"retry_id": 0
|
||||
"retry_id": 0,
|
||||
}
|
||||
|
||||
|
||||
|
@ -201,8 +193,7 @@ def test_job_detail_bad_project(client, transactional_db):
|
|||
test retrieving a single job from the jobs-detail
|
||||
endpoint.
|
||||
"""
|
||||
badurl = reverse("jobs-detail",
|
||||
kwargs={"project": "badproject", "pk": 1})
|
||||
badurl = reverse("jobs-detail", kwargs={"project": "badproject", "pk": 1})
|
||||
resp = client.get(badurl)
|
||||
assert resp.status_code == 404
|
||||
|
||||
|
@ -213,36 +204,37 @@ def test_job_detail_not_found(client, test_repository):
|
|||
endpoint.
|
||||
"""
|
||||
resp = client.get(
|
||||
reverse("jobs-detail",
|
||||
kwargs={"project": test_repository.name, "pk": -32767}),
|
||||
reverse("jobs-detail", kwargs={"project": test_repository.name, "pk": -32767}),
|
||||
)
|
||||
assert resp.status_code == 404
|
||||
|
||||
|
||||
def test_text_log_steps_and_errors(client, test_job):
|
||||
|
||||
TextLogStep.objects.create(job=test_job,
|
||||
name='step1',
|
||||
started=datetime.datetime.utcfromtimestamp(0),
|
||||
finished=datetime.datetime.utcfromtimestamp(100),
|
||||
started_line_number=1,
|
||||
finished_line_number=100,
|
||||
result=TextLogStep.SUCCESS)
|
||||
step2 = TextLogStep.objects.create(job=test_job,
|
||||
name='step2',
|
||||
started=datetime.datetime.utcfromtimestamp(101),
|
||||
finished=datetime.datetime.utcfromtimestamp(200),
|
||||
started_line_number=101,
|
||||
finished_line_number=200,
|
||||
result=TextLogStep.TEST_FAILED)
|
||||
TextLogError.objects.create(step=step2, line='failure 1',
|
||||
line_number=101)
|
||||
TextLogError.objects.create(step=step2, line='failure 2',
|
||||
line_number=102)
|
||||
TextLogStep.objects.create(
|
||||
job=test_job,
|
||||
name='step1',
|
||||
started=datetime.datetime.utcfromtimestamp(0),
|
||||
finished=datetime.datetime.utcfromtimestamp(100),
|
||||
started_line_number=1,
|
||||
finished_line_number=100,
|
||||
result=TextLogStep.SUCCESS,
|
||||
)
|
||||
step2 = TextLogStep.objects.create(
|
||||
job=test_job,
|
||||
name='step2',
|
||||
started=datetime.datetime.utcfromtimestamp(101),
|
||||
finished=datetime.datetime.utcfromtimestamp(200),
|
||||
started_line_number=101,
|
||||
finished_line_number=200,
|
||||
result=TextLogStep.TEST_FAILED,
|
||||
)
|
||||
TextLogError.objects.create(step=step2, line='failure 1', line_number=101)
|
||||
TextLogError.objects.create(step=step2, line='failure 2', line_number=102)
|
||||
resp = client.get(
|
||||
reverse("jobs-text-log-steps",
|
||||
kwargs={"project": test_job.repository.name,
|
||||
"pk": test_job.id})
|
||||
reverse(
|
||||
"jobs-text-log-steps", kwargs={"project": test_job.repository.name, "pk": test_job.id}
|
||||
)
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert resp.json() == [
|
||||
|
@ -254,7 +246,7 @@ def test_text_log_steps_and_errors(client, test_job):
|
|||
'name': 'step1',
|
||||
'result': 'success',
|
||||
'started': '1970-01-01T00:00:00',
|
||||
'started_line_number': 1
|
||||
'started_line_number': 1,
|
||||
},
|
||||
{
|
||||
'errors': [
|
||||
|
@ -265,11 +257,11 @@ def test_text_log_steps_and_errors(client, test_job):
|
|||
'bug_suggestions': {
|
||||
'search': 'failure 1',
|
||||
'search_terms': ['failure 1'],
|
||||
'bugs': {'open_recent': [], 'all_others': []}
|
||||
'bugs': {'open_recent': [], 'all_others': []},
|
||||
},
|
||||
'metadata': None,
|
||||
'matches': [],
|
||||
'classified_failures': []
|
||||
'classified_failures': [],
|
||||
},
|
||||
{
|
||||
'id': 2,
|
||||
|
@ -278,12 +270,12 @@ def test_text_log_steps_and_errors(client, test_job):
|
|||
'bug_suggestions': {
|
||||
'search': 'failure 2',
|
||||
'search_terms': ['failure 2'],
|
||||
'bugs': {'open_recent': [], 'all_others': []}
|
||||
'bugs': {'open_recent': [], 'all_others': []},
|
||||
},
|
||||
'metadata': None,
|
||||
'matches': [],
|
||||
'classified_failures': []
|
||||
}
|
||||
'classified_failures': [],
|
||||
},
|
||||
],
|
||||
'finished': '1970-01-01T00:03:20',
|
||||
'finished_line_number': 200,
|
||||
|
@ -291,35 +283,37 @@ def test_text_log_steps_and_errors(client, test_job):
|
|||
'name': 'step2',
|
||||
'result': 'testfailed',
|
||||
'started': '1970-01-01T00:01:41',
|
||||
'started_line_number': 101
|
||||
}
|
||||
'started_line_number': 101,
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
def test_text_log_errors(client, test_job):
|
||||
|
||||
TextLogStep.objects.create(job=test_job,
|
||||
name='step1',
|
||||
started=datetime.datetime.utcfromtimestamp(0),
|
||||
finished=datetime.datetime.utcfromtimestamp(100),
|
||||
started_line_number=1,
|
||||
finished_line_number=100,
|
||||
result=TextLogStep.SUCCESS)
|
||||
step2 = TextLogStep.objects.create(job=test_job,
|
||||
name='step2',
|
||||
started=datetime.datetime.utcfromtimestamp(101),
|
||||
finished=datetime.datetime.utcfromtimestamp(200),
|
||||
started_line_number=101,
|
||||
finished_line_number=200,
|
||||
result=TextLogStep.TEST_FAILED)
|
||||
TextLogError.objects.create(step=step2, line='failure 1',
|
||||
line_number=101)
|
||||
TextLogError.objects.create(step=step2, line='failure 2',
|
||||
line_number=102)
|
||||
TextLogStep.objects.create(
|
||||
job=test_job,
|
||||
name='step1',
|
||||
started=datetime.datetime.utcfromtimestamp(0),
|
||||
finished=datetime.datetime.utcfromtimestamp(100),
|
||||
started_line_number=1,
|
||||
finished_line_number=100,
|
||||
result=TextLogStep.SUCCESS,
|
||||
)
|
||||
step2 = TextLogStep.objects.create(
|
||||
job=test_job,
|
||||
name='step2',
|
||||
started=datetime.datetime.utcfromtimestamp(101),
|
||||
finished=datetime.datetime.utcfromtimestamp(200),
|
||||
started_line_number=101,
|
||||
finished_line_number=200,
|
||||
result=TextLogStep.TEST_FAILED,
|
||||
)
|
||||
TextLogError.objects.create(step=step2, line='failure 1', line_number=101)
|
||||
TextLogError.objects.create(step=step2, line='failure 2', line_number=102)
|
||||
resp = client.get(
|
||||
reverse("jobs-text-log-errors",
|
||||
kwargs={"project": test_job.repository.name,
|
||||
"pk": test_job.id})
|
||||
reverse(
|
||||
"jobs-text-log-errors", kwargs={"project": test_job.repository.name, "pk": test_job.id}
|
||||
)
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert resp.json() == [
|
||||
|
@ -330,11 +324,11 @@ def test_text_log_errors(client, test_job):
|
|||
'bug_suggestions': {
|
||||
'search': 'failure 1',
|
||||
'search_terms': ['failure 1'],
|
||||
'bugs': {'open_recent': [], 'all_others': []}
|
||||
'bugs': {'open_recent': [], 'all_others': []},
|
||||
},
|
||||
'metadata': None,
|
||||
'matches': [],
|
||||
'classified_failures': []
|
||||
'classified_failures': [],
|
||||
},
|
||||
{
|
||||
'id': 2,
|
||||
|
@ -343,32 +337,29 @@ def test_text_log_errors(client, test_job):
|
|||
'bug_suggestions': {
|
||||
'search': 'failure 2',
|
||||
'search_terms': ['failure 2'],
|
||||
'bugs': {'open_recent': [], 'all_others': []}
|
||||
'bugs': {'open_recent': [], 'all_others': []},
|
||||
},
|
||||
'metadata': None,
|
||||
'matches': [],
|
||||
'classified_failures': []
|
||||
}
|
||||
'classified_failures': [],
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(('offset', 'count', 'expected_num'),
|
||||
[(None, None, 3),
|
||||
(None, 2, 2),
|
||||
(1, None, 2),
|
||||
(0, 1, 1),
|
||||
(2, 10, 1)])
|
||||
def test_list_similar_jobs(client, eleven_jobs_stored,
|
||||
offset, count, expected_num):
|
||||
@pytest.mark.parametrize(
|
||||
('offset', 'count', 'expected_num'),
|
||||
[(None, None, 3), (None, 2, 2), (1, None, 2), (0, 1, 1), (2, 10, 1)],
|
||||
)
|
||||
def test_list_similar_jobs(client, eleven_jobs_stored, offset, count, expected_num):
|
||||
"""
|
||||
test retrieving similar jobs
|
||||
"""
|
||||
job = Job.objects.get(id=1)
|
||||
|
||||
url = reverse("jobs-similar-jobs",
|
||||
kwargs={"project": job.repository.name, "pk": job.id})
|
||||
params = '&'.join(['{}={}'.format(k, v) for k, v in
|
||||
[('offset', offset), ('count', count)] if v])
|
||||
url = reverse("jobs-similar-jobs", kwargs={"project": job.repository.name, "pk": job.id})
|
||||
params = '&'.join(
|
||||
['{}={}'.format(k, v) for k, v in [('offset', offset), ('count', count)] if v]
|
||||
)
|
||||
if params:
|
||||
url += '?{}'.format(params)
|
||||
resp = client.get(url)
|
||||
|
@ -384,22 +375,26 @@ def test_list_similar_jobs(client, eleven_jobs_stored,
|
|||
assert len(similar_jobs['results']) == expected_num
|
||||
|
||||
|
||||
@pytest.mark.parametrize('lm_key,lm_value,exp_status, exp_job_count', [
|
||||
("last_modified__gt", "2016-07-18T22:16:58.000", 200, 8),
|
||||
("last_modified__lt", "2016-07-18T22:16:58.000", 200, 3),
|
||||
("last_modified__gt", "-Infinity", HTTP_400_BAD_REQUEST, 0),
|
||||
("last_modified__gt", "whatever", HTTP_400_BAD_REQUEST, 0),
|
||||
])
|
||||
def test_last_modified(client, eleven_jobs_stored, test_repository,
|
||||
lm_key, lm_value, exp_status, exp_job_count):
|
||||
@pytest.mark.parametrize(
|
||||
'lm_key,lm_value,exp_status, exp_job_count',
|
||||
[
|
||||
("last_modified__gt", "2016-07-18T22:16:58.000", 200, 8),
|
||||
("last_modified__lt", "2016-07-18T22:16:58.000", 200, 3),
|
||||
("last_modified__gt", "-Infinity", HTTP_400_BAD_REQUEST, 0),
|
||||
("last_modified__gt", "whatever", HTTP_400_BAD_REQUEST, 0),
|
||||
],
|
||||
)
|
||||
def test_last_modified(
|
||||
client, eleven_jobs_stored, test_repository, lm_key, lm_value, exp_status, exp_job_count
|
||||
):
|
||||
try:
|
||||
param_date = parser.parse(lm_value)
|
||||
newer_date = param_date - datetime.timedelta(minutes=10)
|
||||
|
||||
# modify job last_modified for 3 jobs
|
||||
Job.objects.filter(
|
||||
id__in=[j.id for j in Job.objects.all()[:3]]).update(
|
||||
last_modified=newer_date)
|
||||
Job.objects.filter(id__in=[j.id for j in Job.objects.all()[:3]]).update(
|
||||
last_modified=newer_date
|
||||
)
|
||||
except ValueError:
|
||||
# no problem. these params are the wrong
|
||||
pass
|
||||
|
|
|
@ -3,8 +3,7 @@ import json
|
|||
import pytest
|
||||
from django.urls import reverse
|
||||
|
||||
from treeherder.model.models import (Job,
|
||||
JobNote)
|
||||
from treeherder.model.models import Job, JobNote
|
||||
|
||||
|
||||
def test_note_list(client, test_job_with_notes):
|
||||
|
@ -12,22 +11,23 @@ def test_note_list(client, test_job_with_notes):
|
|||
test retrieving a list of notes from the note-list endpoint
|
||||
"""
|
||||
resp = client.get(
|
||||
reverse("note-list", kwargs={
|
||||
"project": test_job_with_notes.repository.name
|
||||
}),
|
||||
{"job_id": test_job_with_notes.id}
|
||||
reverse("note-list", kwargs={"project": test_job_with_notes.repository.name}),
|
||||
{"job_id": test_job_with_notes.id},
|
||||
)
|
||||
|
||||
assert resp.status_code == 200
|
||||
assert isinstance(resp.json(), list)
|
||||
assert resp.json() == [{
|
||||
"id": note.id,
|
||||
"job_id": note.job.id,
|
||||
"failure_classification_id": note.failure_classification.id,
|
||||
"who": note.user.email,
|
||||
"created": note.created.isoformat(),
|
||||
"text": note.text
|
||||
} for note in JobNote.objects.filter(job=test_job_with_notes)]
|
||||
assert resp.json() == [
|
||||
{
|
||||
"id": note.id,
|
||||
"job_id": note.job.id,
|
||||
"failure_classification_id": note.failure_classification.id,
|
||||
"who": note.user.email,
|
||||
"created": note.created.isoformat(),
|
||||
"text": note.text,
|
||||
}
|
||||
for note in JobNote.objects.filter(job=test_job_with_notes)
|
||||
]
|
||||
|
||||
|
||||
def test_note_detail(client, test_job_with_notes):
|
||||
|
@ -38,11 +38,7 @@ def test_note_detail(client, test_job_with_notes):
|
|||
note = JobNote.objects.get(id=1)
|
||||
|
||||
resp = client.get(
|
||||
reverse("note-detail",
|
||||
kwargs={
|
||||
"project": test_job_with_notes.repository.name,
|
||||
"pk": 1
|
||||
})
|
||||
reverse("note-detail", kwargs={"project": test_job_with_notes.repository.name, "pk": 1})
|
||||
)
|
||||
|
||||
assert resp.status_code == 200
|
||||
|
@ -53,7 +49,7 @@ def test_note_detail(client, test_job_with_notes):
|
|||
"failure_classification_id": 2,
|
||||
"who": note.user.email,
|
||||
"created": note.created.isoformat(),
|
||||
"text": "you look like a man-o-lantern"
|
||||
"text": "you look like a man-o-lantern",
|
||||
}
|
||||
|
||||
|
||||
|
@ -63,8 +59,7 @@ def test_note_detail_not_found(client, test_repository):
|
|||
endpoint.
|
||||
"""
|
||||
resp = client.get(
|
||||
reverse("note-detail",
|
||||
kwargs={"project": test_repository.name, "pk": -32767}),
|
||||
reverse("note-detail", kwargs={"project": test_repository.name, "pk": -32767}),
|
||||
)
|
||||
assert resp.status_code == 404
|
||||
|
||||
|
@ -74,10 +69,7 @@ def test_note_detail_bad_project(client, test_repository):
|
|||
test retrieving a HTTP 404 from the note-detail
|
||||
endpoint.
|
||||
"""
|
||||
resp = client.get(
|
||||
reverse("note-detail",
|
||||
kwargs={"project": "foo", "pk": -32767}),
|
||||
)
|
||||
resp = client.get(reverse("note-detail", kwargs={"project": "foo", "pk": -32767}),)
|
||||
assert resp.status_code == 404
|
||||
|
||||
|
||||
|
@ -95,7 +87,7 @@ def test_create_note(client, test_job, test_user, test_no_auth):
|
|||
"job_id": test_job.id,
|
||||
"failure_classification_id": 2,
|
||||
"who": test_user.email,
|
||||
"text": "you look like a man-o-lantern"
|
||||
"text": "you look like a man-o-lantern",
|
||||
},
|
||||
)
|
||||
|
||||
|
@ -117,13 +109,13 @@ def test_create_note(client, test_job, test_user, test_no_auth):
|
|||
|
||||
# verify that the job's last_modified field got updated
|
||||
old_last_modified = test_job.last_modified
|
||||
assert old_last_modified < Job.objects.values_list(
|
||||
'last_modified', flat=True).get(id=test_job.id)
|
||||
assert old_last_modified < Job.objects.values_list('last_modified', flat=True).get(
|
||||
id=test_job.id
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('test_no_auth', [True, False])
|
||||
def test_delete_note(client, test_job_with_notes, test_repository,
|
||||
test_sheriff, test_no_auth):
|
||||
def test_delete_note(client, test_job_with_notes, test_repository, test_sheriff, test_no_auth):
|
||||
"""
|
||||
test deleting a single note via endpoint
|
||||
"""
|
||||
|
@ -133,8 +125,9 @@ def test_delete_note(client, test_job_with_notes, test_repository,
|
|||
notes_count = JobNote.objects.count()
|
||||
|
||||
resp = client.delete(
|
||||
reverse("note-detail", kwargs={"project": test_repository.name,
|
||||
"pk": test_job_with_notes.id}),
|
||||
reverse(
|
||||
"note-detail", kwargs={"project": test_repository.name, "pk": test_job_with_notes.id}
|
||||
),
|
||||
)
|
||||
new_notes_count = JobNote.objects.count()
|
||||
|
||||
|
|
|
@ -12,5 +12,5 @@ def test_option_collection_list(client, sample_option_collections):
|
|||
assert len(response) == 2
|
||||
assert response == [
|
||||
{'option_collection_hash': 'option_hash1', 'options': [{'name': 'opt1'}]},
|
||||
{'option_collection_hash': 'option_hash2', 'options': [{'name': 'opt2'}]}
|
||||
{'option_collection_hash': 'option_hash2', 'options': [{'name': 'opt2'}]},
|
||||
]
|
||||
|
|
|
@ -4,9 +4,7 @@ import pytest
|
|||
from django.urls import reverse
|
||||
from first import first
|
||||
|
||||
from treeherder.perf.models import (PerformanceAlert,
|
||||
PerformanceAlertSummary,
|
||||
PerformanceFramework)
|
||||
from treeherder.perf.models import PerformanceAlert, PerformanceAlertSummary, PerformanceFramework
|
||||
|
||||
|
||||
def test_alerts_get(client, test_repository, test_perf_alert):
|
||||
|
@ -17,86 +15,97 @@ def test_alerts_get(client, test_repository, test_perf_alert):
|
|||
assert resp.json()['next'] is None
|
||||
assert resp.json()['previous'] is None
|
||||
assert len(resp.json()['results']) == 1
|
||||
assert set(resp.json()['results'][0].keys()) == set([
|
||||
'amount_pct',
|
||||
'amount_abs',
|
||||
'id',
|
||||
'is_regression',
|
||||
'starred',
|
||||
'manually_created',
|
||||
'new_value',
|
||||
'prev_value',
|
||||
'related_summary_id',
|
||||
'series_signature',
|
||||
'summary_id',
|
||||
'status',
|
||||
't_value',
|
||||
'classifier',
|
||||
'classifier_email',
|
||||
'backfill_record'
|
||||
])
|
||||
assert set(resp.json()['results'][0].keys()) == set(
|
||||
[
|
||||
'amount_pct',
|
||||
'amount_abs',
|
||||
'id',
|
||||
'is_regression',
|
||||
'starred',
|
||||
'manually_created',
|
||||
'new_value',
|
||||
'prev_value',
|
||||
'related_summary_id',
|
||||
'series_signature',
|
||||
'summary_id',
|
||||
'status',
|
||||
't_value',
|
||||
'classifier',
|
||||
'classifier_email',
|
||||
'backfill_record',
|
||||
]
|
||||
)
|
||||
assert resp.json()['results'][0]['related_summary_id'] is None
|
||||
|
||||
|
||||
def test_alerts_put(client, push_stored, test_repository,
|
||||
test_perf_alert, test_perf_alert_summary_2, test_user,
|
||||
test_sheriff):
|
||||
def test_alerts_put(
|
||||
client,
|
||||
push_stored,
|
||||
test_repository,
|
||||
test_perf_alert,
|
||||
test_perf_alert_summary_2,
|
||||
test_user,
|
||||
test_sheriff,
|
||||
):
|
||||
resp = client.get(reverse('performance-alerts-list'))
|
||||
assert resp.status_code == 200
|
||||
assert resp.json()['results'][0]['related_summary_id'] is None
|
||||
|
||||
# verify that we fail if not authenticated
|
||||
resp = client.put(reverse('performance-alerts-list') + '1/', {
|
||||
'related_summary_id': 2,
|
||||
'status': PerformanceAlert.DOWNSTREAM
|
||||
})
|
||||
resp = client.put(
|
||||
reverse('performance-alerts-list') + '1/',
|
||||
{'related_summary_id': 2, 'status': PerformanceAlert.DOWNSTREAM},
|
||||
)
|
||||
assert resp.status_code == 403
|
||||
assert PerformanceAlert.objects.get(id=1).related_summary_id is None
|
||||
|
||||
# verify that we fail if authenticated, but not staff
|
||||
client.force_authenticate(user=test_user)
|
||||
resp = client.put(reverse('performance-alerts-list') + '1/', {
|
||||
'related_summary_id': 2,
|
||||
'status': PerformanceAlert.DOWNSTREAM
|
||||
})
|
||||
resp = client.put(
|
||||
reverse('performance-alerts-list') + '1/',
|
||||
{'related_summary_id': 2, 'status': PerformanceAlert.DOWNSTREAM},
|
||||
)
|
||||
assert resp.status_code == 403
|
||||
assert PerformanceAlert.objects.get(id=1).related_summary_id is None
|
||||
|
||||
# verify that we succeed if authenticated + staff
|
||||
client.force_authenticate(user=test_sheriff)
|
||||
resp = client.put(reverse('performance-alerts-list') + '1/', {
|
||||
'related_summary_id': 2,
|
||||
'status': PerformanceAlert.DOWNSTREAM
|
||||
})
|
||||
resp = client.put(
|
||||
reverse('performance-alerts-list') + '1/',
|
||||
{'related_summary_id': 2, 'status': PerformanceAlert.DOWNSTREAM},
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert PerformanceAlert.objects.get(id=1).related_summary_id == 2
|
||||
assert PerformanceAlert.objects.get(id=1).classifier == test_sheriff
|
||||
|
||||
# verify that we can unset it too
|
||||
resp = client.put(reverse('performance-alerts-list') + '1/', {
|
||||
'related_summary_id': None,
|
||||
'status': PerformanceAlert.UNTRIAGED
|
||||
})
|
||||
resp = client.put(
|
||||
reverse('performance-alerts-list') + '1/',
|
||||
{'related_summary_id': None, 'status': PerformanceAlert.UNTRIAGED},
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert PerformanceAlert.objects.get(id=1).related_summary_id is None
|
||||
|
||||
|
||||
def test_reassign_different_repository(authorized_sheriff_client,
|
||||
push_stored,
|
||||
test_repository, test_repository_2,
|
||||
test_perf_alert,
|
||||
test_perf_alert_summary_2,
|
||||
test_sheriff):
|
||||
def test_reassign_different_repository(
|
||||
authorized_sheriff_client,
|
||||
push_stored,
|
||||
test_repository,
|
||||
test_repository_2,
|
||||
test_perf_alert,
|
||||
test_perf_alert_summary_2,
|
||||
test_sheriff,
|
||||
):
|
||||
# verify that we can't reassign to another performance alert summary
|
||||
# with a different repository unless the new status is downstream
|
||||
test_perf_alert_summary_2.repository = test_repository_2
|
||||
test_perf_alert_summary_2.save()
|
||||
|
||||
# reassign to summary with different repository, should fail
|
||||
resp = authorized_sheriff_client.put(reverse('performance-alerts-list') + '1/', {
|
||||
'related_summary_id': test_perf_alert_summary_2.id,
|
||||
'status': PerformanceAlert.REASSIGNED
|
||||
})
|
||||
resp = authorized_sheriff_client.put(
|
||||
reverse('performance-alerts-list') + '1/',
|
||||
{'related_summary_id': test_perf_alert_summary_2.id, 'status': PerformanceAlert.REASSIGNED},
|
||||
)
|
||||
assert resp.status_code == 400
|
||||
test_perf_alert.refresh_from_db()
|
||||
assert test_perf_alert.related_summary_id is None
|
||||
|
@ -104,32 +113,34 @@ def test_reassign_different_repository(authorized_sheriff_client,
|
|||
|
||||
# mark downstream of summary with different repository,
|
||||
# should succeed
|
||||
resp = authorized_sheriff_client.put(reverse('performance-alerts-list') + '1/', {
|
||||
'related_summary_id': test_perf_alert_summary_2.id,
|
||||
'status': PerformanceAlert.DOWNSTREAM
|
||||
})
|
||||
resp = authorized_sheriff_client.put(
|
||||
reverse('performance-alerts-list') + '1/',
|
||||
{'related_summary_id': test_perf_alert_summary_2.id, 'status': PerformanceAlert.DOWNSTREAM},
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
test_perf_alert.refresh_from_db()
|
||||
assert test_perf_alert.related_summary_id == test_perf_alert_summary_2.id
|
||||
assert test_perf_alert.classifier == test_sheriff
|
||||
|
||||
|
||||
def test_reassign_different_framework(authorized_sheriff_client,
|
||||
push_stored,
|
||||
test_repository, test_repository_2,
|
||||
test_perf_alert,
|
||||
test_perf_alert_summary_2):
|
||||
def test_reassign_different_framework(
|
||||
authorized_sheriff_client,
|
||||
push_stored,
|
||||
test_repository,
|
||||
test_repository_2,
|
||||
test_perf_alert,
|
||||
test_perf_alert_summary_2,
|
||||
):
|
||||
# try to assign to an alert with a different framework,
|
||||
# should fail
|
||||
framework_2 = PerformanceFramework.objects.create(
|
||||
name='test_talos_2', enabled=True)
|
||||
framework_2 = PerformanceFramework.objects.create(name='test_talos_2', enabled=True)
|
||||
test_perf_alert_summary_2.framework = framework_2
|
||||
test_perf_alert_summary_2.save()
|
||||
|
||||
resp = authorized_sheriff_client.put(reverse('performance-alerts-list') + '1/', {
|
||||
'related_summary_id': test_perf_alert_summary_2.id,
|
||||
'status': PerformanceAlert.REASSIGNED
|
||||
})
|
||||
resp = authorized_sheriff_client.put(
|
||||
reverse('performance-alerts-list') + '1/',
|
||||
{'related_summary_id': test_perf_alert_summary_2.id, 'status': PerformanceAlert.REASSIGNED},
|
||||
)
|
||||
assert resp.status_code == 400
|
||||
test_perf_alert.refresh_from_db()
|
||||
assert test_perf_alert.related_summary_id is None
|
||||
|
@ -140,31 +151,26 @@ def test_reassign_different_framework(authorized_sheriff_client,
|
|||
def alert_create_post_blob(test_perf_alert_summary, test_perf_signature):
|
||||
# this blob should be sufficient to create a new alert (assuming
|
||||
# the user of this API is authorized to do so!)
|
||||
return {
|
||||
'summary_id': test_perf_alert_summary.id,
|
||||
'signature_id': test_perf_signature.id
|
||||
}
|
||||
return {'summary_id': test_perf_alert_summary.id, 'signature_id': test_perf_signature.id}
|
||||
|
||||
|
||||
def test_alerts_post(client, alert_create_post_blob,
|
||||
test_user, test_sheriff, generate_enough_perf_datum):
|
||||
def test_alerts_post(
|
||||
client, alert_create_post_blob, test_user, test_sheriff, generate_enough_perf_datum
|
||||
):
|
||||
|
||||
# verify that we fail if not authenticated
|
||||
resp = client.post(reverse('performance-alerts-list'),
|
||||
alert_create_post_blob)
|
||||
resp = client.post(reverse('performance-alerts-list'), alert_create_post_blob)
|
||||
assert resp.status_code == 403
|
||||
|
||||
# verify that we fail if authenticated, but not staff
|
||||
client.force_authenticate(user=test_user)
|
||||
resp = client.post(reverse('performance-alerts-list'),
|
||||
alert_create_post_blob)
|
||||
resp = client.post(reverse('performance-alerts-list'), alert_create_post_blob)
|
||||
assert resp.status_code == 403
|
||||
assert PerformanceAlert.objects.count() == 0
|
||||
|
||||
# verify that we succeed if staff + authenticated
|
||||
client.force_authenticate(user=test_sheriff)
|
||||
resp = client.post(reverse('performance-alerts-list'),
|
||||
alert_create_post_blob)
|
||||
resp = client.post(reverse('performance-alerts-list'), alert_create_post_blob)
|
||||
assert resp.status_code == 200
|
||||
assert PerformanceAlert.objects.count() == 1
|
||||
|
||||
|
@ -179,31 +185,33 @@ def test_alerts_post(client, alert_create_post_blob,
|
|||
assert alert.summary.id == 1
|
||||
|
||||
|
||||
def test_alerts_post_insufficient_data(authorized_sheriff_client,
|
||||
test_repository,
|
||||
test_perf_alert_summary,
|
||||
test_perf_signature,
|
||||
alert_create_post_blob):
|
||||
def test_alerts_post_insufficient_data(
|
||||
authorized_sheriff_client,
|
||||
test_repository,
|
||||
test_perf_alert_summary,
|
||||
test_perf_signature,
|
||||
alert_create_post_blob,
|
||||
):
|
||||
# we should not succeed if insufficient data is passed through
|
||||
for removed_key in ['summary_id', 'signature_id']:
|
||||
new_post_blob = copy.copy(alert_create_post_blob)
|
||||
del new_post_blob[removed_key]
|
||||
|
||||
resp = authorized_sheriff_client.post(reverse('performance-alerts-list'),
|
||||
new_post_blob)
|
||||
resp = authorized_sheriff_client.post(reverse('performance-alerts-list'), new_post_blob)
|
||||
assert resp.status_code == 400
|
||||
assert PerformanceAlert.objects.count() == 0
|
||||
|
||||
|
||||
@pytest.mark.xfail
|
||||
def test_nudge_alert_towards_conflicting_one(authorized_sheriff_client,
|
||||
test_perf_alert,
|
||||
test_conflicting_perf_alert):
|
||||
def test_nudge_alert_towards_conflicting_one(
|
||||
authorized_sheriff_client, test_perf_alert, test_conflicting_perf_alert
|
||||
):
|
||||
assert test_conflicting_perf_alert.first_triaged is None
|
||||
old_conflicting_update = test_conflicting_perf_alert.last_updated
|
||||
|
||||
resp = authorized_sheriff_client.put(reverse('performance-alerts-list') + '1/',
|
||||
{'prev_push_id': 2, 'push_id': 3})
|
||||
resp = authorized_sheriff_client.put(
|
||||
reverse('performance-alerts-list') + '1/', {'prev_push_id': 2, 'push_id': 3}
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
test_conflicting_perf_alert.refresh_from_db()
|
||||
|
||||
|
@ -218,20 +226,20 @@ def test_nudge_alert_towards_conflicting_one(authorized_sheriff_client,
|
|||
|
||||
|
||||
@pytest.mark.xfail
|
||||
@pytest.mark.parametrize("perf_datum_id, towards_push_ids",
|
||||
[(3, {'prev_push_id': 1, 'push_id': 2}),
|
||||
(2, {'prev_push_id': 2, 'push_id': 3})])
|
||||
def test_nudge_alert_to_changeset_without_alert_summary(authorized_sheriff_client,
|
||||
test_perf_alert,
|
||||
test_perf_data,
|
||||
perf_datum_id,
|
||||
towards_push_ids):
|
||||
link_alert_summary_in_perf_data(test_perf_data, test_perf_alert,
|
||||
perf_datum_id)
|
||||
@pytest.mark.parametrize(
|
||||
"perf_datum_id, towards_push_ids",
|
||||
[(3, {'prev_push_id': 1, 'push_id': 2}), (2, {'prev_push_id': 2, 'push_id': 3})],
|
||||
)
|
||||
def test_nudge_alert_to_changeset_without_alert_summary(
|
||||
authorized_sheriff_client, test_perf_alert, test_perf_data, perf_datum_id, towards_push_ids
|
||||
):
|
||||
link_alert_summary_in_perf_data(test_perf_data, test_perf_alert, perf_datum_id)
|
||||
|
||||
old_alert_summary_id = test_perf_alert.summary.id
|
||||
|
||||
resp = authorized_sheriff_client.put(reverse('performance-alerts-list') + '1/', towards_push_ids)
|
||||
resp = authorized_sheriff_client.put(
|
||||
reverse('performance-alerts-list') + '1/', towards_push_ids
|
||||
)
|
||||
|
||||
assert resp.status_code == 200
|
||||
|
||||
|
@ -252,28 +260,29 @@ def test_nudge_alert_to_changeset_without_alert_summary(authorized_sheriff_clien
|
|||
|
||||
|
||||
@pytest.mark.xfail
|
||||
@pytest.mark.parametrize("perf_datum_ids, alert_id_to_move, towards_push_ids",
|
||||
[((2, 3), 2, {'push_id': 2, 'prev_push_id': 1}),
|
||||
(None, 1, {'push_id': 3, 'prev_push_id': 2})])
|
||||
def test_nudge_alert_to_changeset_with_an_alert_summary(authorized_sheriff_client,
|
||||
test_perf_alert,
|
||||
test_perf_alert_2,
|
||||
test_perf_alert_summary,
|
||||
test_perf_alert_summary_2,
|
||||
test_perf_data,
|
||||
perf_datum_ids,
|
||||
alert_id_to_move,
|
||||
towards_push_ids):
|
||||
@pytest.mark.parametrize(
|
||||
"perf_datum_ids, alert_id_to_move, towards_push_ids",
|
||||
[((2, 3), 2, {'push_id': 2, 'prev_push_id': 1}), (None, 1, {'push_id': 3, 'prev_push_id': 2})],
|
||||
)
|
||||
def test_nudge_alert_to_changeset_with_an_alert_summary(
|
||||
authorized_sheriff_client,
|
||||
test_perf_alert,
|
||||
test_perf_alert_2,
|
||||
test_perf_alert_summary,
|
||||
test_perf_alert_summary_2,
|
||||
test_perf_data,
|
||||
perf_datum_ids,
|
||||
alert_id_to_move,
|
||||
towards_push_ids,
|
||||
):
|
||||
"""
|
||||
push_ids: 1 [2 summary_2+alert] -nudge-> [3 summary+alert_2] 4
|
||||
<-nudge-
|
||||
"""
|
||||
alert_to_move, target_summary = test_perf_alert, test_perf_alert_summary_2
|
||||
if perf_datum_ids:
|
||||
link_alert_summary_in_perf_data(test_perf_data, test_perf_alert,
|
||||
perf_datum_ids[0])
|
||||
link_alert_summary_in_perf_data(test_perf_data, test_perf_alert_2,
|
||||
perf_datum_ids[1])
|
||||
link_alert_summary_in_perf_data(test_perf_data, test_perf_alert, perf_datum_ids[0])
|
||||
link_alert_summary_in_perf_data(test_perf_data, test_perf_alert_2, perf_datum_ids[1])
|
||||
associate_perf_data_to_alert(test_perf_data, test_perf_alert_2)
|
||||
alert_to_move, target_summary = test_perf_alert_2, test_perf_alert_summary
|
||||
old_alert_summary_id = alert_to_move.summary.id
|
||||
|
@ -287,7 +296,8 @@ def test_nudge_alert_to_changeset_with_an_alert_summary(authorized_sheriff_clien
|
|||
assert target_summary.first_triaged is None
|
||||
|
||||
resp = authorized_sheriff_client.put(
|
||||
reverse('performance-alerts-list') + str(alert_id_to_move) + '/', towards_push_ids)
|
||||
reverse('performance-alerts-list') + str(alert_id_to_move) + '/', towards_push_ids
|
||||
)
|
||||
|
||||
assert resp.status_code == 200
|
||||
|
||||
|
@ -323,22 +333,23 @@ def test_nudge_alert_to_changeset_with_an_alert_summary(authorized_sheriff_clien
|
|||
|
||||
|
||||
@pytest.mark.xfail
|
||||
def test_nudge_left_alert_from_alert_summary_with_more_alerts(authorized_sheriff_client,
|
||||
test_perf_alert,
|
||||
test_perf_alert_2,
|
||||
test_perf_alert_summary,
|
||||
test_perf_alert_summary_2,
|
||||
test_perf_data):
|
||||
def test_nudge_left_alert_from_alert_summary_with_more_alerts(
|
||||
authorized_sheriff_client,
|
||||
test_perf_alert,
|
||||
test_perf_alert_2,
|
||||
test_perf_alert_summary,
|
||||
test_perf_alert_summary_2,
|
||||
test_perf_data,
|
||||
):
|
||||
associate_perf_data_to_alert(test_perf_data, test_perf_alert_2)
|
||||
|
||||
old_alert_summary_id = test_perf_alert_2.summary.id
|
||||
test_perf_alert.summary = test_perf_alert_summary_2
|
||||
test_perf_alert.save()
|
||||
|
||||
resp = authorized_sheriff_client.put(reverse('performance-alerts-list') + '2/', {
|
||||
'push_id': 2,
|
||||
'prev_push_id': 1
|
||||
})
|
||||
resp = authorized_sheriff_client.put(
|
||||
reverse('performance-alerts-list') + '2/', {'push_id': 2, 'prev_push_id': 1}
|
||||
)
|
||||
|
||||
assert resp.status_code == 200
|
||||
|
||||
|
@ -361,12 +372,14 @@ def test_nudge_left_alert_from_alert_summary_with_more_alerts(authorized_sheriff
|
|||
|
||||
|
||||
@pytest.mark.xfail
|
||||
def test_nudge_right_alert_from_alert_summary_with_more_alerts(authorized_sheriff_client,
|
||||
test_perf_alert,
|
||||
test_perf_alert_2,
|
||||
test_perf_alert_summary,
|
||||
test_perf_alert_summary_2,
|
||||
test_perf_data):
|
||||
def test_nudge_right_alert_from_alert_summary_with_more_alerts(
|
||||
authorized_sheriff_client,
|
||||
test_perf_alert,
|
||||
test_perf_alert_2,
|
||||
test_perf_alert_summary,
|
||||
test_perf_alert_summary_2,
|
||||
test_perf_data,
|
||||
):
|
||||
"""
|
||||
| push 2 | | push 3 |
|
||||
| --------------- | | --------------- |
|
||||
|
@ -382,10 +395,9 @@ def test_nudge_right_alert_from_alert_summary_with_more_alerts(authorized_sherif
|
|||
test_perf_alert_2.summary = test_perf_alert_summary
|
||||
test_perf_alert_2.save()
|
||||
|
||||
resp = authorized_sheriff_client.put(reverse('performance-alerts-list') + '1/', {
|
||||
'push_id': 3,
|
||||
'prev_push_id': 2
|
||||
})
|
||||
resp = authorized_sheriff_client.put(
|
||||
reverse('performance-alerts-list') + '1/', {'push_id': 3, 'prev_push_id': 2}
|
||||
)
|
||||
|
||||
assert resp.status_code == 200
|
||||
|
||||
|
@ -401,23 +413,24 @@ def test_nudge_right_alert_from_alert_summary_with_more_alerts(authorized_sherif
|
|||
# old alert summary still there
|
||||
assert PerformanceAlertSummary.objects.filter(pk=old_alert_summary_id).count() == 1
|
||||
# with other alert
|
||||
assert test_perf_alert_2 in PerformanceAlert.objects.filter(summary_id=old_alert_summary_id).all()
|
||||
assert (
|
||||
test_perf_alert_2 in PerformanceAlert.objects.filter(summary_id=old_alert_summary_id).all()
|
||||
)
|
||||
|
||||
# prev alert_summary gets properly updated
|
||||
assert test_perf_alert_summary.alerts.count() == 1
|
||||
|
||||
|
||||
@pytest.mark.xfail
|
||||
def test_nudge_raises_exception_when_no_perf_data(authorized_sheriff_client,
|
||||
test_perf_alert,
|
||||
test_perf_alert_summary):
|
||||
def test_nudge_raises_exception_when_no_perf_data(
|
||||
authorized_sheriff_client, test_perf_alert, test_perf_alert_summary
|
||||
):
|
||||
initial_summary_count = PerformanceAlertSummary.objects.all().count()
|
||||
initial_alert_count = PerformanceAlert.objects.all().count()
|
||||
|
||||
resp = authorized_sheriff_client.put(reverse('performance-alerts-list') + '1/', {
|
||||
'push_id': 3,
|
||||
'prev_push_id': 2
|
||||
})
|
||||
resp = authorized_sheriff_client.put(
|
||||
reverse('performance-alerts-list') + '1/', {'push_id': 3, 'prev_push_id': 2}
|
||||
)
|
||||
|
||||
assert resp.status_code == 400
|
||||
assert PerformanceAlertSummary.objects.all().count() == initial_summary_count
|
||||
|
@ -425,11 +438,9 @@ def test_nudge_raises_exception_when_no_perf_data(authorized_sheriff_client,
|
|||
|
||||
|
||||
@pytest.mark.xfail
|
||||
def test_nudge_recalculates_alert_properties(authorized_sheriff_client,
|
||||
test_perf_alert,
|
||||
test_perf_alert_summary,
|
||||
test_perf_data):
|
||||
|
||||
def test_nudge_recalculates_alert_properties(
|
||||
authorized_sheriff_client, test_perf_alert, test_perf_alert_summary, test_perf_data
|
||||
):
|
||||
def _get_alert_properties(test_perf_alert):
|
||||
prop_names = ['amount_pct', 'amount_abs', 'prev_value', 'new_value', 't_value']
|
||||
return [getattr(test_perf_alert, prop_name) for prop_name in prop_names]
|
||||
|
@ -440,10 +451,9 @@ def test_nudge_recalculates_alert_properties(authorized_sheriff_client,
|
|||
perf_datum.value = index * 10
|
||||
perf_datum.save()
|
||||
|
||||
resp = authorized_sheriff_client.put(reverse('performance-alerts-list') + '1/', {
|
||||
'push_id': 3,
|
||||
'prev_push_id': 2
|
||||
})
|
||||
resp = authorized_sheriff_client.put(
|
||||
reverse('performance-alerts-list') + '1/', {'push_id': 3, 'prev_push_id': 2}
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
test_perf_alert.refresh_from_db()
|
||||
|
||||
|
@ -451,17 +461,19 @@ def test_nudge_recalculates_alert_properties(authorized_sheriff_client,
|
|||
assert new_alert_properties == [400.0, 20.0, 5.0, 25.0, 20.0]
|
||||
|
||||
|
||||
def test_timestamps_on_alert_and_summaries_inside_code(test_perf_alert_summary,
|
||||
test_perf_signature,
|
||||
test_perf_signature_2):
|
||||
new_alert = PerformanceAlert.objects.create(summary=test_perf_alert_summary,
|
||||
series_signature=test_perf_signature,
|
||||
is_regression=True,
|
||||
amount_pct=10,
|
||||
amount_abs=10,
|
||||
prev_value=10,
|
||||
new_value=11,
|
||||
t_value=10)
|
||||
def test_timestamps_on_alert_and_summaries_inside_code(
|
||||
test_perf_alert_summary, test_perf_signature, test_perf_signature_2
|
||||
):
|
||||
new_alert = PerformanceAlert.objects.create(
|
||||
summary=test_perf_alert_summary,
|
||||
series_signature=test_perf_signature,
|
||||
is_regression=True,
|
||||
amount_pct=10,
|
||||
amount_abs=10,
|
||||
prev_value=10,
|
||||
new_value=11,
|
||||
t_value=10,
|
||||
)
|
||||
assert new_alert.created <= new_alert.last_updated
|
||||
assert new_alert.first_triaged is None
|
||||
|
||||
|
@ -490,12 +502,14 @@ def test_timestamps_on_alert_and_summaries_inside_code(test_perf_alert_summary,
|
|||
assert parent_summary.first_triaged is not None
|
||||
|
||||
|
||||
def test_timestamps_on_manual_created_alert_via_their_endpoints(authorized_sheriff_client, alert_create_post_blob,
|
||||
generate_enough_perf_datum):
|
||||
def test_timestamps_on_manual_created_alert_via_their_endpoints(
|
||||
authorized_sheriff_client, alert_create_post_blob, generate_enough_perf_datum
|
||||
):
|
||||
# created <= last_updated, created <= first_triaged
|
||||
# BUT manually_created is True
|
||||
resp = authorized_sheriff_client.post(reverse('performance-alerts-list'),
|
||||
alert_create_post_blob)
|
||||
resp = authorized_sheriff_client.post(
|
||||
reverse('performance-alerts-list'), alert_create_post_blob
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
|
||||
manual_alert_id = resp.json()['alert_id']
|
||||
|
@ -514,8 +528,9 @@ def test_alert_timestamps_via_endpoint(authorized_sheriff_client, test_sheriff,
|
|||
old_created = test_perf_alert.created
|
||||
old_last_updated = test_perf_alert.last_updated
|
||||
|
||||
resp = authorized_sheriff_client.put(reverse('performance-alerts-list') + '1/',
|
||||
{'starred': True})
|
||||
resp = authorized_sheriff_client.put(
|
||||
reverse('performance-alerts-list') + '1/', {'starred': True}
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
test_perf_alert.refresh_from_db()
|
||||
|
||||
|
@ -530,8 +545,9 @@ def test_alert_timestamps_via_endpoint(authorized_sheriff_client, test_sheriff,
|
|||
# updating alert multiple times:
|
||||
# keeps first_triaged the same
|
||||
authorized_sheriff_client.force_authenticate(user=test_sheriff)
|
||||
resp = authorized_sheriff_client.put(reverse('performance-alerts-list') + '1/',
|
||||
{'status': PerformanceAlert.ACKNOWLEDGED})
|
||||
resp = authorized_sheriff_client.put(
|
||||
reverse('performance-alerts-list') + '1/', {'status': PerformanceAlert.ACKNOWLEDGED}
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
test_perf_alert.refresh_from_db()
|
||||
|
||||
|
@ -540,8 +556,14 @@ def test_alert_timestamps_via_endpoint(authorized_sheriff_client, test_sheriff,
|
|||
|
||||
|
||||
@pytest.mark.parametrize('relation', [PerformanceAlert.DOWNSTREAM, PerformanceAlert.REASSIGNED])
|
||||
def test_related_alerts_timestamps_via_endpoint(authorized_sheriff_client, test_sheriff, test_perf_alert, relation,
|
||||
test_perf_alert_summary, test_perf_alert_summary_2):
|
||||
def test_related_alerts_timestamps_via_endpoint(
|
||||
authorized_sheriff_client,
|
||||
test_sheriff,
|
||||
test_perf_alert,
|
||||
relation,
|
||||
test_perf_alert_summary,
|
||||
test_perf_alert_summary_2,
|
||||
):
|
||||
# downstream/reassgin use case
|
||||
assert test_perf_alert.first_triaged is None
|
||||
assert test_perf_alert_summary.first_triaged is None
|
||||
|
@ -551,9 +573,10 @@ def test_related_alerts_timestamps_via_endpoint(authorized_sheriff_client, test_
|
|||
old_summary_last_updated = test_perf_alert_summary.last_updated
|
||||
old_summary_last_updated_2 = test_perf_alert_summary_2.last_updated
|
||||
|
||||
resp = authorized_sheriff_client.put(reverse('performance-alerts-list') + '1/',
|
||||
{'status': relation,
|
||||
'related_summary_id': test_perf_alert_summary_2.id})
|
||||
resp = authorized_sheriff_client.put(
|
||||
reverse('performance-alerts-list') + '1/',
|
||||
{'status': relation, 'related_summary_id': test_perf_alert_summary_2.id},
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
test_perf_alert.refresh_from_db()
|
||||
test_perf_alert_summary.refresh_from_db()
|
||||
|
@ -569,12 +592,11 @@ def test_related_alerts_timestamps_via_endpoint(authorized_sheriff_client, test_
|
|||
|
||||
|
||||
# utils
|
||||
def link_alert_summary_in_perf_data(test_perf_data, test_perf_alert,
|
||||
perf_datum_id):
|
||||
def link_alert_summary_in_perf_data(test_perf_data, test_perf_alert, perf_datum_id):
|
||||
assert perf_datum_id > 0
|
||||
|
||||
perf_datum = first(test_perf_data, key=lambda tpd: tpd.id == perf_datum_id)
|
||||
prev_perf_datum = first(test_perf_data, key=lambda tpd: tpd.id == perf_datum_id-1)
|
||||
prev_perf_datum = first(test_perf_data, key=lambda tpd: tpd.id == perf_datum_id - 1)
|
||||
|
||||
# adjust relations
|
||||
alert_summary = test_perf_alert.summary
|
||||
|
@ -596,9 +618,18 @@ def dump_vars(alert_summaries, perf_data, alerts=None):
|
|||
from pprint import pprint
|
||||
|
||||
def dump_alert(alert):
|
||||
pprint('Alert(id={0.id}, summary_id={0.summary_id}, push_id={0.summary.push_id}, prev_push_id={0.summary.prev_push_id})'.format(alert))
|
||||
pprint(
|
||||
'Alert(id={0.id}, summary_id={0.summary_id}, push_id={0.summary.push_id}, prev_push_id={0.summary.prev_push_id})'.format(
|
||||
alert
|
||||
)
|
||||
)
|
||||
|
||||
for summary in alert_summaries:
|
||||
pprint('AlertSummary(id={0.id}, push_id={0.push_id}, prev_push_id={0.prev_push_id}) has following alerts: '.format(summary))
|
||||
pprint(
|
||||
'AlertSummary(id={0.id}, push_id={0.push_id}, prev_push_id={0.prev_push_id}) has following alerts: '.format(
|
||||
summary
|
||||
)
|
||||
)
|
||||
for alert in summary.alerts.all():
|
||||
dump_alert(alert)
|
||||
if alerts is not None:
|
||||
|
|
|
@ -19,7 +19,7 @@ def test_repository_onhold(transactional_db):
|
|||
codebase="gecko",
|
||||
repository_group_id=1,
|
||||
description="",
|
||||
performance_alerts_enabled=True
|
||||
performance_alerts_enabled=True,
|
||||
)
|
||||
return r
|
||||
|
||||
|
@ -31,7 +31,8 @@ def test_perf_alert_summary_onhold(test_repository_onhold, test_perf_framework):
|
|||
repository=test_repository_onhold,
|
||||
revision='1234abcd{}'.format(i),
|
||||
author='foo@bar.com',
|
||||
time=datetime.datetime.now())
|
||||
time=datetime.datetime.now(),
|
||||
)
|
||||
|
||||
return PerformanceAlertSummary.objects.create(
|
||||
repository=test_repository_onhold,
|
||||
|
@ -39,12 +40,14 @@ def test_perf_alert_summary_onhold(test_repository_onhold, test_perf_framework):
|
|||
prev_push_id=1,
|
||||
push_id=2,
|
||||
manually_created=False,
|
||||
created=datetime.datetime.now())
|
||||
created=datetime.datetime.now(),
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_perf_alert_onhold(test_perf_signature, test_perf_alert_summary_onhold):
|
||||
from treeherder.perf.models import PerformanceAlert
|
||||
|
||||
return PerformanceAlert.objects.create(
|
||||
summary=test_perf_alert_summary_onhold,
|
||||
series_signature=test_perf_signature,
|
||||
|
@ -53,11 +56,11 @@ def test_perf_alert_onhold(test_perf_signature, test_perf_alert_summary_onhold):
|
|||
amount_abs=50.0,
|
||||
prev_value=100.0,
|
||||
new_value=150.0,
|
||||
t_value=20.0)
|
||||
t_value=20.0,
|
||||
)
|
||||
|
||||
|
||||
def test_alert_summaries_get(client, test_perf_alert_summary,
|
||||
test_perf_alert):
|
||||
def test_alert_summaries_get(client, test_perf_alert_summary, test_perf_alert):
|
||||
# verify that we get the performance summary + alert on GET
|
||||
resp = client.get(reverse('performance-alert-summaries-list'))
|
||||
assert resp.status_code == 200
|
||||
|
@ -66,51 +69,60 @@ def test_alert_summaries_get(client, test_perf_alert_summary,
|
|||
assert resp.json()['next'] is None
|
||||
assert resp.json()['previous'] is None
|
||||
assert len(resp.json()['results']) == 1
|
||||
assert set(resp.json()['results'][0].keys()) == set([
|
||||
'alerts',
|
||||
'bug_number',
|
||||
'bug_updated',
|
||||
'issue_tracker',
|
||||
'notes',
|
||||
'assignee_username',
|
||||
'assignee_email',
|
||||
'framework',
|
||||
'id',
|
||||
'created',
|
||||
'prev_push_id',
|
||||
'related_alerts',
|
||||
'repository',
|
||||
'push_id',
|
||||
'status',
|
||||
'revision',
|
||||
'push_timestamp',
|
||||
'prev_push_revision'
|
||||
])
|
||||
assert set(resp.json()['results'][0].keys()) == set(
|
||||
[
|
||||
'alerts',
|
||||
'bug_number',
|
||||
'bug_updated',
|
||||
'issue_tracker',
|
||||
'notes',
|
||||
'assignee_username',
|
||||
'assignee_email',
|
||||
'framework',
|
||||
'id',
|
||||
'created',
|
||||
'prev_push_id',
|
||||
'related_alerts',
|
||||
'repository',
|
||||
'push_id',
|
||||
'status',
|
||||
'revision',
|
||||
'push_timestamp',
|
||||
'prev_push_revision',
|
||||
]
|
||||
)
|
||||
assert len(resp.json()['results'][0]['alerts']) == 1
|
||||
assert set(resp.json()['results'][0]['alerts'][0].keys()) == set([
|
||||
'id',
|
||||
'status',
|
||||
'series_signature',
|
||||
'is_regression',
|
||||
'starred',
|
||||
'manually_created',
|
||||
'prev_value',
|
||||
'new_value',
|
||||
't_value',
|
||||
'amount_abs',
|
||||
'amount_pct',
|
||||
'summary_id',
|
||||
'related_summary_id',
|
||||
'classifier',
|
||||
'classifier_email',
|
||||
'backfill_record'
|
||||
])
|
||||
assert set(resp.json()['results'][0]['alerts'][0].keys()) == set(
|
||||
[
|
||||
'id',
|
||||
'status',
|
||||
'series_signature',
|
||||
'is_regression',
|
||||
'starred',
|
||||
'manually_created',
|
||||
'prev_value',
|
||||
'new_value',
|
||||
't_value',
|
||||
'amount_abs',
|
||||
'amount_pct',
|
||||
'summary_id',
|
||||
'related_summary_id',
|
||||
'classifier',
|
||||
'classifier_email',
|
||||
'backfill_record',
|
||||
]
|
||||
)
|
||||
assert resp.json()['results'][0]['related_alerts'] == []
|
||||
|
||||
|
||||
def test_alert_summaries_get_onhold(client, test_perf_alert_summary,
|
||||
test_perf_alert, test_perf_alert_summary_onhold,
|
||||
test_perf_alert_onhold, test_repository_onhold):
|
||||
def test_alert_summaries_get_onhold(
|
||||
client,
|
||||
test_perf_alert_summary,
|
||||
test_perf_alert,
|
||||
test_perf_alert_summary_onhold,
|
||||
test_perf_alert_onhold,
|
||||
test_repository_onhold,
|
||||
):
|
||||
# verify that we get the performance summary + alert on GET
|
||||
resp = client.get(reverse('performance-alert-summaries-list'))
|
||||
assert resp.status_code == 200
|
||||
|
@ -119,91 +131,98 @@ def test_alert_summaries_get_onhold(client, test_perf_alert_summary,
|
|||
assert resp.json()['next'] is None
|
||||
assert resp.json()['previous'] is None
|
||||
assert len(resp.json()['results']) == 1
|
||||
assert set(resp.json()['results'][0].keys()) == set([
|
||||
'alerts',
|
||||
'bug_number',
|
||||
'bug_updated',
|
||||
'issue_tracker',
|
||||
'notes',
|
||||
'assignee_username',
|
||||
'assignee_email',
|
||||
'framework',
|
||||
'id',
|
||||
'created',
|
||||
'prev_push_id',
|
||||
'related_alerts',
|
||||
'repository',
|
||||
'push_id',
|
||||
'status',
|
||||
'revision',
|
||||
'push_timestamp',
|
||||
'prev_push_revision'
|
||||
])
|
||||
assert set(resp.json()['results'][0].keys()) == set(
|
||||
[
|
||||
'alerts',
|
||||
'bug_number',
|
||||
'bug_updated',
|
||||
'issue_tracker',
|
||||
'notes',
|
||||
'assignee_username',
|
||||
'assignee_email',
|
||||
'framework',
|
||||
'id',
|
||||
'created',
|
||||
'prev_push_id',
|
||||
'related_alerts',
|
||||
'repository',
|
||||
'push_id',
|
||||
'status',
|
||||
'revision',
|
||||
'push_timestamp',
|
||||
'prev_push_revision',
|
||||
]
|
||||
)
|
||||
assert len(resp.json()['results'][0]['alerts']) == 1
|
||||
assert set(resp.json()['results'][0]['alerts'][0].keys()) == set([
|
||||
'id',
|
||||
'status',
|
||||
'series_signature',
|
||||
'is_regression',
|
||||
'starred',
|
||||
'manually_created',
|
||||
'prev_value',
|
||||
'new_value',
|
||||
't_value',
|
||||
'amount_abs',
|
||||
'amount_pct',
|
||||
'summary_id',
|
||||
'related_summary_id',
|
||||
'classifier',
|
||||
'classifier_email',
|
||||
'backfill_record'
|
||||
])
|
||||
assert set(resp.json()['results'][0]['alerts'][0].keys()) == set(
|
||||
[
|
||||
'id',
|
||||
'status',
|
||||
'series_signature',
|
||||
'is_regression',
|
||||
'starred',
|
||||
'manually_created',
|
||||
'prev_value',
|
||||
'new_value',
|
||||
't_value',
|
||||
'amount_abs',
|
||||
'amount_pct',
|
||||
'summary_id',
|
||||
'related_summary_id',
|
||||
'classifier',
|
||||
'classifier_email',
|
||||
'backfill_record',
|
||||
]
|
||||
)
|
||||
assert resp.json()['results'][0]['related_alerts'] == []
|
||||
|
||||
|
||||
def test_alert_summaries_put(client, test_repository, test_perf_signature,
|
||||
test_perf_alert_summary, test_user, test_sheriff):
|
||||
def test_alert_summaries_put(
|
||||
client, test_repository, test_perf_signature, test_perf_alert_summary, test_user, test_sheriff
|
||||
):
|
||||
# verify that we fail if not authenticated
|
||||
resp = client.put(reverse('performance-alert-summaries-list') + '1/', {
|
||||
'status': 1
|
||||
})
|
||||
resp = client.put(reverse('performance-alert-summaries-list') + '1/', {'status': 1})
|
||||
assert resp.status_code == 403
|
||||
assert PerformanceAlertSummary.objects.get(id=1).status == 0
|
||||
|
||||
# verify that we fail if authenticated, but not staff
|
||||
client.force_authenticate(user=test_user)
|
||||
resp = client.put(reverse('performance-alert-summaries-list') + '1/', {
|
||||
'status': 1
|
||||
})
|
||||
resp = client.put(reverse('performance-alert-summaries-list') + '1/', {'status': 1})
|
||||
assert resp.status_code == 403
|
||||
assert PerformanceAlertSummary.objects.get(id=1).status == 0
|
||||
|
||||
# verify that we succeed if authenticated + staff
|
||||
client.force_authenticate(user=test_sheriff)
|
||||
resp = client.put(reverse('performance-alert-summaries-list') + '1/', {
|
||||
'status': 1
|
||||
})
|
||||
resp = client.put(reverse('performance-alert-summaries-list') + '1/', {'status': 1})
|
||||
assert resp.status_code == 200
|
||||
assert PerformanceAlertSummary.objects.get(id=1).status == 1
|
||||
|
||||
# verify we can set assignee
|
||||
client.force_authenticate(user=test_sheriff)
|
||||
resp = client.put(reverse('performance-alert-summaries-list') + '1/', {
|
||||
'assignee_username': test_user.username
|
||||
})
|
||||
resp = client.put(
|
||||
reverse('performance-alert-summaries-list') + '1/',
|
||||
{'assignee_username': test_user.username},
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert PerformanceAlertSummary.objects.get(id=1).assignee == test_user
|
||||
|
||||
|
||||
def test_alert_summary_post(client, test_repository, test_issue_tracker,
|
||||
push_stored, test_perf_signature, test_user, test_sheriff):
|
||||
def test_alert_summary_post(
|
||||
client,
|
||||
test_repository,
|
||||
test_issue_tracker,
|
||||
push_stored,
|
||||
test_perf_signature,
|
||||
test_user,
|
||||
test_sheriff,
|
||||
):
|
||||
# this blob should be sufficient to create a new alert summary (assuming
|
||||
# the user of this API is authorized to do so!)
|
||||
post_blob = {
|
||||
'repository_id': test_repository.id,
|
||||
'framework_id': test_perf_signature.framework.id,
|
||||
'prev_push_id': 1,
|
||||
'push_id': 2
|
||||
'push_id': 2,
|
||||
}
|
||||
|
||||
# verify that we fail if not authenticated
|
||||
|
@ -237,16 +256,17 @@ def test_alert_summary_post(client, test_repository, test_issue_tracker,
|
|||
assert PerformanceAlertSummary.objects.count() == 1
|
||||
|
||||
|
||||
@pytest.mark.parametrize('modification',
|
||||
[{'notes': 'human created notes'},
|
||||
{'bug_number': 123456, 'issue_tracker': 1}])
|
||||
def test_alert_summary_timestamps_via_endpoints(authorized_sheriff_client, test_perf_alert_summary, modification):
|
||||
@pytest.mark.parametrize(
|
||||
'modification', [{'notes': 'human created notes'}, {'bug_number': 123456, 'issue_tracker': 1}]
|
||||
)
|
||||
def test_alert_summary_timestamps_via_endpoints(
|
||||
authorized_sheriff_client, test_perf_alert_summary, modification
|
||||
):
|
||||
assert test_perf_alert_summary.first_triaged is None
|
||||
|
||||
# when editing notes & linking bugs
|
||||
resp = authorized_sheriff_client.put(
|
||||
reverse('performance-alert-summaries-list') + '1/',
|
||||
modification
|
||||
reverse('performance-alert-summaries-list') + '1/', modification
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
test_perf_alert_summary.refresh_from_db()
|
||||
|
@ -256,15 +276,16 @@ def test_alert_summary_timestamps_via_endpoints(authorized_sheriff_client, test_
|
|||
assert test_perf_alert_summary.created < test_perf_alert_summary.last_updated
|
||||
|
||||
|
||||
def test_bug_number_and_timestamp_on_setting_value(authorized_sheriff_client, test_perf_alert_summary):
|
||||
def test_bug_number_and_timestamp_on_setting_value(
|
||||
authorized_sheriff_client, test_perf_alert_summary
|
||||
):
|
||||
assert test_perf_alert_summary.first_triaged is None
|
||||
assert test_perf_alert_summary.bug_number is None
|
||||
assert test_perf_alert_summary.bug_updated is None
|
||||
|
||||
# link a bug
|
||||
resp = authorized_sheriff_client.put(
|
||||
reverse('performance-alert-summaries-list') + '1/',
|
||||
{'bug_number': 123456}
|
||||
reverse('performance-alert-summaries-list') + '1/', {'bug_number': 123456}
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
test_perf_alert_summary.refresh_from_db()
|
||||
|
@ -274,7 +295,9 @@ def test_bug_number_and_timestamp_on_setting_value(authorized_sheriff_client, te
|
|||
assert test_perf_alert_summary.bug_updated is not None
|
||||
|
||||
|
||||
def test_bug_number_and_timestamp_on_overriding(authorized_sheriff_client, test_perf_alert_summary_with_bug):
|
||||
def test_bug_number_and_timestamp_on_overriding(
|
||||
authorized_sheriff_client, test_perf_alert_summary_with_bug
|
||||
):
|
||||
assert test_perf_alert_summary_with_bug.bug_number == 123456
|
||||
assert test_perf_alert_summary_with_bug.bug_updated < datetime.datetime.now()
|
||||
|
||||
|
@ -282,8 +305,7 @@ def test_bug_number_and_timestamp_on_overriding(authorized_sheriff_client, test_
|
|||
|
||||
# update the existing bug number
|
||||
resp = authorized_sheriff_client.put(
|
||||
reverse('performance-alert-summaries-list') + '1/',
|
||||
{'bug_number': 987654}
|
||||
reverse('performance-alert-summaries-list') + '1/', {'bug_number': 987654}
|
||||
)
|
||||
|
||||
assert resp.status_code == 200
|
||||
|
@ -294,15 +316,15 @@ def test_bug_number_and_timestamp_on_overriding(authorized_sheriff_client, test_
|
|||
assert test_perf_alert_summary_with_bug.bug_updated > bug_linking_time
|
||||
|
||||
|
||||
def test_bug_number_and_timestamp_dont_update_from_other_modifications(authorized_sheriff_client,
|
||||
test_perf_alert_summary):
|
||||
def test_bug_number_and_timestamp_dont_update_from_other_modifications(
|
||||
authorized_sheriff_client, test_perf_alert_summary
|
||||
):
|
||||
assert test_perf_alert_summary.bug_number is None
|
||||
assert test_perf_alert_summary.bug_updated is None
|
||||
|
||||
# link a bug
|
||||
resp = authorized_sheriff_client.put(
|
||||
reverse('performance-alert-summaries-list') + '1/',
|
||||
{'notes': 'human created notes'}
|
||||
reverse('performance-alert-summaries-list') + '1/', {'notes': 'human created notes'}
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
test_perf_alert_summary.refresh_from_db()
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
from django.urls import reverse
|
||||
|
||||
from treeherder.perf.models import (PerformanceBugTemplate,
|
||||
PerformanceFramework)
|
||||
from treeherder.perf.models import PerformanceBugTemplate, PerformanceFramework
|
||||
|
||||
|
||||
def test_perf_bug_template_api(client, test_perf_framework):
|
||||
|
@ -15,7 +14,7 @@ def test_perf_bug_template_api(client, test_perf_framework):
|
|||
'default_component': "dfcom{}".format(i),
|
||||
'default_product': "dfprod{}".format(i),
|
||||
'cc_list': "foo{}@bar.com".format(i),
|
||||
'text': "my great text {}".format(i)
|
||||
'text': "my great text {}".format(i),
|
||||
}
|
||||
PerformanceBugTemplate.objects.create(framework=framework, **dict)
|
||||
dict['framework'] = framework.id
|
||||
|
@ -27,7 +26,8 @@ def test_perf_bug_template_api(client, test_perf_framework):
|
|||
assert resp.json() == template_dicts
|
||||
|
||||
# test that we can get just one (the usual case, probably)
|
||||
resp = client.get(reverse('performance-bug-template-list') +
|
||||
'?framework={}'.format(test_perf_framework.id))
|
||||
resp = client.get(
|
||||
reverse('performance-bug-template-list') + '?framework={}'.format(test_perf_framework.id)
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert resp.json() == [template_dicts[0]]
|
||||
|
|
|
@ -3,11 +3,8 @@ import datetime
|
|||
import pytest
|
||||
from django.urls import reverse
|
||||
|
||||
from treeherder.model.models import (MachinePlatform,
|
||||
Push)
|
||||
from treeherder.perf.models import (PerformanceDatum,
|
||||
PerformanceFramework,
|
||||
PerformanceSignature)
|
||||
from treeherder.model.models import MachinePlatform, Push
|
||||
from treeherder.perf.models import PerformanceDatum, PerformanceFramework, PerformanceSignature
|
||||
|
||||
NOW = datetime.datetime.now()
|
||||
ONE_DAY_AGO = NOW - datetime.timedelta(days=1)
|
||||
|
@ -20,7 +17,7 @@ def summary_perf_signature(test_perf_signature):
|
|||
# summary performance signature don't have test value
|
||||
signature = PerformanceSignature.objects.create(
|
||||
repository=test_perf_signature.repository,
|
||||
signature_hash=(40*'s'),
|
||||
signature_hash=(40 * 's'),
|
||||
framework=test_perf_signature.framework,
|
||||
platform=test_perf_signature.platform,
|
||||
option_collection=test_perf_signature.option_collection,
|
||||
|
@ -28,7 +25,7 @@ def summary_perf_signature(test_perf_signature):
|
|||
test='',
|
||||
extra_options='e10s shell',
|
||||
has_subtests=True,
|
||||
last_updated=datetime.datetime.now()
|
||||
last_updated=datetime.datetime.now(),
|
||||
)
|
||||
test_perf_signature.parent_signature = signature
|
||||
test_perf_signature.save()
|
||||
|
@ -39,8 +36,7 @@ def summary_perf_signature(test_perf_signature):
|
|||
def test_perf_signature_same_hash_different_framework(test_perf_signature):
|
||||
# a new signature, same as the test_perf_signature in every
|
||||
# way, except it belongs to a different "framework"
|
||||
new_framework = PerformanceFramework.objects.create(
|
||||
name='test_talos_2', enabled=True)
|
||||
new_framework = PerformanceFramework.objects.create(name='test_talos_2', enabled=True)
|
||||
new_signature = PerformanceSignature.objects.create(
|
||||
repository=test_perf_signature.repository,
|
||||
signature_hash=test_perf_signature.signature_hash,
|
||||
|
@ -50,16 +46,16 @@ def test_perf_signature_same_hash_different_framework(test_perf_signature):
|
|||
suite=test_perf_signature.suite,
|
||||
test=test_perf_signature.test,
|
||||
has_subtests=test_perf_signature.has_subtests,
|
||||
last_updated=test_perf_signature.last_updated
|
||||
last_updated=test_perf_signature.last_updated,
|
||||
)
|
||||
return new_signature
|
||||
|
||||
|
||||
def test_no_summary_performance_data(client, test_perf_signature,
|
||||
test_repository):
|
||||
def test_no_summary_performance_data(client, test_perf_signature, test_repository):
|
||||
|
||||
resp = client.get(reverse('performance-signatures-list',
|
||||
kwargs={"project": test_repository.name}))
|
||||
resp = client.get(
|
||||
reverse('performance-signatures-list', kwargs={"project": test_repository.name})
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert resp.json() == {
|
||||
test_perf_signature.signature_hash: {
|
||||
|
@ -78,10 +74,12 @@ def test_no_summary_performance_data(client, test_perf_signature,
|
|||
|
||||
|
||||
def test_performance_platforms(client, test_perf_signature):
|
||||
resp = client.get(reverse('performance-signatures-platforms-list',
|
||||
kwargs={
|
||||
"project": test_perf_signature.repository.name
|
||||
}))
|
||||
resp = client.get(
|
||||
reverse(
|
||||
'performance-signatures-platforms-list',
|
||||
kwargs={"project": test_perf_signature.repository.name},
|
||||
)
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert resp.json() == ['win7']
|
||||
|
||||
|
@ -90,10 +88,13 @@ def test_performance_platforms_expired_test(client, test_perf_signature):
|
|||
# check that we have no performance platform if the signatures are too old
|
||||
test_perf_signature.last_updated = datetime.datetime.utcfromtimestamp(0)
|
||||
test_perf_signature.save()
|
||||
resp = client.get(reverse('performance-signatures-platforms-list',
|
||||
kwargs={
|
||||
"project": test_perf_signature.repository.name
|
||||
}) + '?interval={}'.format(86400))
|
||||
resp = client.get(
|
||||
reverse(
|
||||
'performance-signatures-platforms-list',
|
||||
kwargs={"project": test_perf_signature.repository.name},
|
||||
)
|
||||
+ '?interval={}'.format(86400)
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert resp.json() == []
|
||||
|
||||
|
@ -101,10 +102,7 @@ def test_performance_platforms_expired_test(client, test_perf_signature):
|
|||
def test_performance_platforms_framework_filtering(client, test_perf_signature):
|
||||
# check framework filtering
|
||||
framework2 = PerformanceFramework.objects.create(name='test_talos2', enabled=True)
|
||||
platform2 = MachinePlatform.objects.create(
|
||||
os_name='win',
|
||||
platform='win7-a',
|
||||
architecture='x86')
|
||||
platform2 = MachinePlatform.objects.create(os_name='win', platform='win7-a', architecture='x86')
|
||||
PerformanceSignature.objects.create(
|
||||
repository=test_perf_signature.repository,
|
||||
signature_hash=test_perf_signature.signature_hash,
|
||||
|
@ -114,35 +112,43 @@ def test_performance_platforms_framework_filtering(client, test_perf_signature):
|
|||
suite=test_perf_signature.suite,
|
||||
test=test_perf_signature.test,
|
||||
has_subtests=test_perf_signature.has_subtests,
|
||||
last_updated=test_perf_signature.last_updated)
|
||||
last_updated=test_perf_signature.last_updated,
|
||||
)
|
||||
|
||||
# by default should return both
|
||||
resp = client.get(reverse('performance-signatures-platforms-list',
|
||||
kwargs={
|
||||
"project": test_perf_signature.repository.name
|
||||
}))
|
||||
resp = client.get(
|
||||
reverse(
|
||||
'performance-signatures-platforms-list',
|
||||
kwargs={"project": test_perf_signature.repository.name},
|
||||
)
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert sorted(resp.json()) == ['win7', 'win7-a']
|
||||
|
||||
# if we specify just one framework, should only return one
|
||||
resp = client.get(reverse('performance-signatures-platforms-list',
|
||||
kwargs={
|
||||
"project": test_perf_signature.repository.name
|
||||
}) + '?framework={}'.format(framework2.id))
|
||||
resp = client.get(
|
||||
reverse(
|
||||
'performance-signatures-platforms-list',
|
||||
kwargs={"project": test_perf_signature.repository.name},
|
||||
)
|
||||
+ '?framework={}'.format(framework2.id)
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert resp.json() == ['win7-a']
|
||||
|
||||
|
||||
def test_summary_performance_data(client, test_repository,
|
||||
summary_perf_signature,
|
||||
test_perf_signature):
|
||||
def test_summary_performance_data(
|
||||
client, test_repository, summary_perf_signature, test_perf_signature
|
||||
):
|
||||
summary_signature_hash = summary_perf_signature.signature_hash
|
||||
resp = client.get(reverse('performance-signatures-list',
|
||||
kwargs={"project": test_repository.name}))
|
||||
resp = client.get(
|
||||
reverse('performance-signatures-list', kwargs={"project": test_repository.name})
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
|
||||
resp = client.get(reverse('performance-signatures-list',
|
||||
kwargs={"project": test_repository.name}))
|
||||
resp = client.get(
|
||||
reverse('performance-signatures-list', kwargs={"project": test_repository.name})
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
|
||||
assert len(resp.data.keys()) == 2
|
||||
|
@ -154,7 +160,7 @@ def test_summary_performance_data(client, test_repository,
|
|||
'suite': signature.suite,
|
||||
'option_collection_hash': signature.option_collection.option_collection_hash,
|
||||
'framework_id': signature.framework_id,
|
||||
'machine_platform': signature.platform.platform
|
||||
'machine_platform': signature.platform.platform,
|
||||
}
|
||||
if signature.test:
|
||||
expected['test'] = signature.test
|
||||
|
@ -175,32 +181,40 @@ def test_summary_performance_data(client, test_repository,
|
|||
assert resp.data[signature.signature_hash] == expected
|
||||
|
||||
|
||||
def test_filter_signatures_by_framework(client, test_repository, test_perf_signature,
|
||||
test_perf_signature_same_hash_different_framework):
|
||||
def test_filter_signatures_by_framework(
|
||||
client, test_repository, test_perf_signature, test_perf_signature_same_hash_different_framework
|
||||
):
|
||||
signature2 = test_perf_signature_same_hash_different_framework
|
||||
|
||||
# Filter by original framework
|
||||
resp = client.get(reverse('performance-signatures-list',
|
||||
kwargs={"project": test_repository.name}) +
|
||||
'?framework=%s' % test_perf_signature.framework.id,
|
||||
)
|
||||
resp = client.get(
|
||||
reverse('performance-signatures-list', kwargs={"project": test_repository.name})
|
||||
+ '?framework=%s' % test_perf_signature.framework.id,
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert len(resp.data.keys()) == 1
|
||||
assert resp.data[test_perf_signature.signature_hash]['framework_id'] == test_perf_signature.framework.id
|
||||
assert (
|
||||
resp.data[test_perf_signature.signature_hash]['framework_id']
|
||||
== test_perf_signature.framework.id
|
||||
)
|
||||
|
||||
# Filter by new framework
|
||||
resp = client.get(reverse('performance-signatures-list',
|
||||
kwargs={"project": test_repository.name}) +
|
||||
'?framework=%s' % signature2.framework.id,
|
||||
)
|
||||
resp = client.get(
|
||||
reverse('performance-signatures-list', kwargs={"project": test_repository.name})
|
||||
+ '?framework=%s' % signature2.framework.id,
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert len(resp.data.keys()) == 1
|
||||
assert resp.data[signature2.signature_hash]['framework_id'] == signature2.framework.id
|
||||
|
||||
|
||||
def test_filter_data_by_framework(client, test_repository, test_perf_signature,
|
||||
push_stored,
|
||||
test_perf_signature_same_hash_different_framework):
|
||||
def test_filter_data_by_framework(
|
||||
client,
|
||||
test_repository,
|
||||
test_perf_signature,
|
||||
push_stored,
|
||||
test_perf_signature_same_hash_different_framework,
|
||||
):
|
||||
signature2 = test_perf_signature_same_hash_different_framework
|
||||
push = Push.objects.get(id=1)
|
||||
for signature in [test_perf_signature, signature2]:
|
||||
|
@ -210,35 +224,40 @@ def test_filter_data_by_framework(client, test_repository, test_perf_signature,
|
|||
result_set_id=1,
|
||||
signature=signature,
|
||||
value=0.0,
|
||||
push_timestamp=push.time)
|
||||
push_timestamp=push.time,
|
||||
)
|
||||
|
||||
# No filtering, return two datapoints (this behaviour actually sucks,
|
||||
# but it's "by design" for now, see bug 1265709)
|
||||
resp = client.get(reverse('performance-data-list',
|
||||
kwargs={"project": test_repository.name}) +
|
||||
'?signatures=' + test_perf_signature.signature_hash)
|
||||
resp = client.get(
|
||||
reverse('performance-data-list', kwargs={"project": test_repository.name})
|
||||
+ '?signatures='
|
||||
+ test_perf_signature.signature_hash
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
datums = resp.data[test_perf_signature.signature_hash]
|
||||
assert len(datums) == 2
|
||||
assert set(datum['signature_id'] for datum in datums) == {1, 2}
|
||||
|
||||
# Filtering by first framework
|
||||
resp = client.get(reverse('performance-data-list',
|
||||
kwargs={"project": test_repository.name}) +
|
||||
'?signatures={}&framework={}'.format(
|
||||
test_perf_signature.signature_hash,
|
||||
test_perf_signature.framework.id))
|
||||
resp = client.get(
|
||||
reverse('performance-data-list', kwargs={"project": test_repository.name})
|
||||
+ '?signatures={}&framework={}'.format(
|
||||
test_perf_signature.signature_hash, test_perf_signature.framework.id
|
||||
)
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
datums = resp.data[test_perf_signature.signature_hash]
|
||||
assert len(datums) == 1
|
||||
assert datums[0]['signature_id'] == 1
|
||||
|
||||
# Filtering by second framework
|
||||
resp = client.get(reverse('performance-data-list',
|
||||
kwargs={"project": test_repository.name}) +
|
||||
'?signatures={}&framework={}'.format(
|
||||
test_perf_signature.signature_hash,
|
||||
signature2.framework.id))
|
||||
resp = client.get(
|
||||
reverse('performance-data-list', kwargs={"project": test_repository.name})
|
||||
+ '?signatures={}&framework={}'.format(
|
||||
test_perf_signature.signature_hash, signature2.framework.id
|
||||
)
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
datums = resp.data[test_perf_signature.signature_hash]
|
||||
assert len(datums) == 1
|
||||
|
@ -247,61 +266,70 @@ def test_filter_data_by_framework(client, test_repository, test_perf_signature,
|
|||
|
||||
def test_filter_signatures_by_interval(client, test_perf_signature):
|
||||
# interval for the last 24 hours, only one signature exists last updated within that timeframe
|
||||
resp = client.get(reverse('performance-signatures-list',
|
||||
kwargs={
|
||||
"project": test_perf_signature.repository.name
|
||||
}) + '?interval={}'.format(86400))
|
||||
resp = client.get(
|
||||
reverse(
|
||||
'performance-signatures-list', kwargs={"project": test_perf_signature.repository.name}
|
||||
)
|
||||
+ '?interval={}'.format(86400)
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert len(resp.json().keys()) == 1
|
||||
assert resp.json()[test_perf_signature.signature_hash]['id'] == 1
|
||||
|
||||
|
||||
@pytest.mark.parametrize('start_date, end_date, exp_count, exp_id', [
|
||||
(SEVEN_DAYS_AGO, ONE_DAY_AGO, 1, 1),
|
||||
(THREE_DAYS_AGO, '', 1, 1),
|
||||
(ONE_DAY_AGO, '', 0, 0)])
|
||||
def test_filter_signatures_by_range(client, test_perf_signature,
|
||||
start_date, end_date, exp_count, exp_id):
|
||||
@pytest.mark.parametrize(
|
||||
'start_date, end_date, exp_count, exp_id',
|
||||
[(SEVEN_DAYS_AGO, ONE_DAY_AGO, 1, 1), (THREE_DAYS_AGO, '', 1, 1), (ONE_DAY_AGO, '', 0, 0)],
|
||||
)
|
||||
def test_filter_signatures_by_range(
|
||||
client, test_perf_signature, start_date, end_date, exp_count, exp_id
|
||||
):
|
||||
# set signature last updated to 3 days ago
|
||||
test_perf_signature.last_updated = THREE_DAYS_AGO
|
||||
test_perf_signature.save()
|
||||
|
||||
resp = client.get(reverse('performance-signatures-list',
|
||||
kwargs={
|
||||
"project": test_perf_signature.repository.name
|
||||
}) + '?start_date={}&end_date={}'.format(start_date, end_date))
|
||||
resp = client.get(
|
||||
reverse(
|
||||
'performance-signatures-list', kwargs={"project": test_perf_signature.repository.name}
|
||||
)
|
||||
+ '?start_date={}&end_date={}'.format(start_date, end_date)
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert len(resp.json().keys()) == exp_count
|
||||
if exp_count != 0:
|
||||
assert resp.json()[test_perf_signature.signature_hash]['id'] == exp_id
|
||||
|
||||
|
||||
@pytest.mark.parametrize('interval, exp_datums_len, exp_push_ids', [
|
||||
(86400, 1, [1]),
|
||||
(86400 * 3, 2, [2, 1])])
|
||||
def test_filter_data_by_interval(client, test_repository, test_perf_signature,
|
||||
interval, exp_datums_len, exp_push_ids):
|
||||
@pytest.mark.parametrize(
|
||||
'interval, exp_datums_len, exp_push_ids', [(86400, 1, [1]), (86400 * 3, 2, [2, 1])]
|
||||
)
|
||||
def test_filter_data_by_interval(
|
||||
client, test_repository, test_perf_signature, interval, exp_datums_len, exp_push_ids
|
||||
):
|
||||
# create some test data
|
||||
for (i, timestamp) in enumerate([NOW, NOW - datetime.timedelta(days=2),
|
||||
NOW - datetime.timedelta(days=7)]):
|
||||
push = Push.objects.create(repository=test_repository,
|
||||
revision='abcdefgh%s' % i,
|
||||
author='foo@bar.com',
|
||||
time=timestamp)
|
||||
for (i, timestamp) in enumerate(
|
||||
[NOW, NOW - datetime.timedelta(days=2), NOW - datetime.timedelta(days=7)]
|
||||
):
|
||||
push = Push.objects.create(
|
||||
repository=test_repository,
|
||||
revision='abcdefgh%s' % i,
|
||||
author='foo@bar.com',
|
||||
time=timestamp,
|
||||
)
|
||||
PerformanceDatum.objects.create(
|
||||
repository=test_perf_signature.repository,
|
||||
result_set_id=push.id,
|
||||
push=push,
|
||||
signature=test_perf_signature,
|
||||
value=i,
|
||||
push_timestamp=timestamp)
|
||||
push_timestamp=timestamp,
|
||||
)
|
||||
|
||||
# going back interval of 1 day, should find 1 item
|
||||
resp = client.get(reverse('performance-data-list',
|
||||
kwargs={"project": test_repository.name}) +
|
||||
'?signature_id={}&interval={}'.format(
|
||||
test_perf_signature.id,
|
||||
interval))
|
||||
resp = client.get(
|
||||
reverse('performance-data-list', kwargs={"project": test_repository.name})
|
||||
+ '?signature_id={}&interval={}'.format(test_perf_signature.id, interval)
|
||||
)
|
||||
|
||||
assert resp.status_code == 200
|
||||
datums = resp.data[test_perf_signature.signature_hash]
|
||||
|
@ -310,32 +338,38 @@ def test_filter_data_by_interval(client, test_repository, test_perf_signature,
|
|||
assert datums[x]['push_id'] == exp_push_ids[x]
|
||||
|
||||
|
||||
@pytest.mark.parametrize('start_date, end_date, exp_datums_len, exp_push_ids', [
|
||||
(SEVEN_DAYS_AGO, THREE_DAYS_AGO, 1, [3]),
|
||||
(THREE_DAYS_AGO, '', 2, [2, 1])])
|
||||
def test_filter_data_by_range(client, test_repository, test_perf_signature,
|
||||
start_date, end_date, exp_datums_len,
|
||||
exp_push_ids):
|
||||
@pytest.mark.parametrize(
|
||||
'start_date, end_date, exp_datums_len, exp_push_ids',
|
||||
[(SEVEN_DAYS_AGO, THREE_DAYS_AGO, 1, [3]), (THREE_DAYS_AGO, '', 2, [2, 1])],
|
||||
)
|
||||
def test_filter_data_by_range(
|
||||
client, test_repository, test_perf_signature, start_date, end_date, exp_datums_len, exp_push_ids
|
||||
):
|
||||
# create some test data
|
||||
for (i, timestamp) in enumerate([NOW, NOW - datetime.timedelta(days=2),
|
||||
NOW - datetime.timedelta(days=5)]):
|
||||
push = Push.objects.create(repository=test_repository,
|
||||
revision='abcdefgh%s' % i,
|
||||
author='foo@bar.com',
|
||||
time=timestamp)
|
||||
for (i, timestamp) in enumerate(
|
||||
[NOW, NOW - datetime.timedelta(days=2), NOW - datetime.timedelta(days=5)]
|
||||
):
|
||||
push = Push.objects.create(
|
||||
repository=test_repository,
|
||||
revision='abcdefgh%s' % i,
|
||||
author='foo@bar.com',
|
||||
time=timestamp,
|
||||
)
|
||||
PerformanceDatum.objects.create(
|
||||
repository=test_perf_signature.repository,
|
||||
result_set_id=push.id,
|
||||
push=push,
|
||||
signature=test_perf_signature,
|
||||
value=i,
|
||||
push_timestamp=timestamp)
|
||||
push_timestamp=timestamp,
|
||||
)
|
||||
|
||||
resp = client.get(reverse('performance-data-list',
|
||||
kwargs={"project": test_repository.name}) +
|
||||
'?signature_id={}&start_date={}&end_date={}'.format(
|
||||
test_perf_signature.id,
|
||||
start_date, end_date))
|
||||
resp = client.get(
|
||||
reverse('performance-data-list', kwargs={"project": test_repository.name})
|
||||
+ '?signature_id={}&start_date={}&end_date={}'.format(
|
||||
test_perf_signature.id, start_date, end_date
|
||||
)
|
||||
)
|
||||
|
||||
assert resp.status_code == 200
|
||||
datums = resp.data[test_perf_signature.signature_hash]
|
||||
|
@ -345,23 +379,23 @@ def test_filter_data_by_range(client, test_repository, test_perf_signature,
|
|||
|
||||
|
||||
def test_job_ids_validity(client, test_repository):
|
||||
resp = client.get(reverse('performance-data-list',
|
||||
kwargs={"project": test_repository.name}) +
|
||||
'?job_id=1')
|
||||
resp = client.get(
|
||||
reverse('performance-data-list', kwargs={"project": test_repository.name}) + '?job_id=1'
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
|
||||
resp = client.get(reverse('performance-data-list',
|
||||
kwargs={"project": test_repository.name}) +
|
||||
'?job_id=foo')
|
||||
resp = client.get(
|
||||
reverse('performance-data-list', kwargs={"project": test_repository.name}) + '?job_id=foo'
|
||||
)
|
||||
assert resp.status_code == 400
|
||||
|
||||
|
||||
def test_filter_data_by_signature(client, test_repository, test_perf_signature,
|
||||
summary_perf_signature):
|
||||
push = Push.objects.create(repository=test_repository,
|
||||
revision='abcdefghi',
|
||||
author='foo@bar.com',
|
||||
time=NOW)
|
||||
def test_filter_data_by_signature(
|
||||
client, test_repository, test_perf_signature, summary_perf_signature
|
||||
):
|
||||
push = Push.objects.create(
|
||||
repository=test_repository, revision='abcdefghi', author='foo@bar.com', time=NOW
|
||||
)
|
||||
for (i, signature) in enumerate([test_perf_signature, summary_perf_signature]):
|
||||
PerformanceDatum.objects.create(
|
||||
repository=signature.repository,
|
||||
|
@ -369,16 +403,20 @@ def test_filter_data_by_signature(client, test_repository, test_perf_signature,
|
|||
push=push,
|
||||
signature=signature,
|
||||
value=i,
|
||||
push_timestamp=NOW)
|
||||
push_timestamp=NOW,
|
||||
)
|
||||
|
||||
# test that we get the expected value for all different permutations of
|
||||
# passing in signature_id and signature hash
|
||||
for (i, signature) in enumerate([test_perf_signature, summary_perf_signature]):
|
||||
for (param, value) in [('signatures', signature.signature_hash),
|
||||
('signature_id', signature.id)]:
|
||||
resp = client.get(reverse('performance-data-list',
|
||||
kwargs={"project": test_repository.name}) +
|
||||
'?{}={}'.format(param, value))
|
||||
for (param, value) in [
|
||||
('signatures', signature.signature_hash),
|
||||
('signature_id', signature.id),
|
||||
]:
|
||||
resp = client.get(
|
||||
reverse('performance-data-list', kwargs={"project": test_repository.name})
|
||||
+ '?{}={}'.format(param, value)
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert len(resp.data.keys()) == 1
|
||||
assert len(resp.data[signature.signature_hash]) == 1
|
||||
|
@ -389,31 +427,37 @@ def test_filter_data_by_signature(client, test_repository, test_perf_signature,
|
|||
def test_perf_summary(client, test_perf_signature, test_perf_data):
|
||||
|
||||
query_params1 = '?repository={}&framework={}&interval=172800&no_subtests=true&revision={}'.format(
|
||||
test_perf_signature.repository.name, test_perf_signature.framework_id, test_perf_data[0].push.revision)
|
||||
test_perf_signature.repository.name,
|
||||
test_perf_signature.framework_id,
|
||||
test_perf_data[0].push.revision,
|
||||
)
|
||||
|
||||
query_params2 = '?repository={}&framework={}&interval=172800&no_subtests=true&startday=2013-11-01T23%3A28%3A29&endday=2013-11-30T23%3A28%3A29'.format(
|
||||
test_perf_signature.repository.name, test_perf_signature.framework_id)
|
||||
test_perf_signature.repository.name, test_perf_signature.framework_id
|
||||
)
|
||||
|
||||
expected = [{
|
||||
'signature_id': test_perf_signature.id,
|
||||
'framework_id': test_perf_signature.framework_id,
|
||||
'signature_hash': test_perf_signature.signature_hash,
|
||||
'platform': test_perf_signature.platform.platform,
|
||||
'test': test_perf_signature.test,
|
||||
'application': test_perf_signature.application,
|
||||
'lower_is_better': test_perf_signature.lower_is_better,
|
||||
'has_subtests': test_perf_signature.has_subtests,
|
||||
'tags': test_perf_signature.tags,
|
||||
'measurement_unit': test_perf_signature.measurement_unit,
|
||||
'values': [test_perf_data[0].value],
|
||||
'name': 'mysuite mytest opt e10s opt',
|
||||
'parent_signature': None,
|
||||
'job_ids': [test_perf_data[0].job_id],
|
||||
'suite': test_perf_signature.suite,
|
||||
'repository_name': test_perf_signature.repository.name,
|
||||
'repository_id': test_perf_signature.repository.id,
|
||||
'data': []
|
||||
}]
|
||||
expected = [
|
||||
{
|
||||
'signature_id': test_perf_signature.id,
|
||||
'framework_id': test_perf_signature.framework_id,
|
||||
'signature_hash': test_perf_signature.signature_hash,
|
||||
'platform': test_perf_signature.platform.platform,
|
||||
'test': test_perf_signature.test,
|
||||
'application': test_perf_signature.application,
|
||||
'lower_is_better': test_perf_signature.lower_is_better,
|
||||
'has_subtests': test_perf_signature.has_subtests,
|
||||
'tags': test_perf_signature.tags,
|
||||
'measurement_unit': test_perf_signature.measurement_unit,
|
||||
'values': [test_perf_data[0].value],
|
||||
'name': 'mysuite mytest opt e10s opt',
|
||||
'parent_signature': None,
|
||||
'job_ids': [test_perf_data[0].job_id],
|
||||
'suite': test_perf_signature.suite,
|
||||
'repository_name': test_perf_signature.repository.name,
|
||||
'repository_id': test_perf_signature.repository.id,
|
||||
'data': [],
|
||||
}
|
||||
]
|
||||
|
||||
resp1 = client.get(reverse('performance-summary') + query_params1)
|
||||
assert resp1.status_code == 200
|
||||
|
|
|
@ -5,9 +5,7 @@ from django.urls import reverse
|
|||
|
||||
from tests.conftest import IS_WINDOWS
|
||||
from treeherder.etl.push import store_push_data
|
||||
from treeherder.model.models import (FailureClassification,
|
||||
JobNote,
|
||||
Push)
|
||||
from treeherder.model.models import FailureClassification, JobNote, Push
|
||||
from treeherder.webapp.api import utils
|
||||
|
||||
|
||||
|
@ -16,8 +14,7 @@ def test_push_list_basic(client, eleven_jobs_stored, test_repository):
|
|||
test retrieving a list of ten json blobs from the jobs-list
|
||||
endpoint.
|
||||
"""
|
||||
resp = client.get(
|
||||
reverse("push-list", kwargs={"project": test_repository.name}))
|
||||
resp = client.get(reverse("push-list", kwargs={"project": test_repository.name}))
|
||||
data = resp.json()
|
||||
results = data['results']
|
||||
meta = data['meta']
|
||||
|
@ -26,32 +23,28 @@ def test_push_list_basic(client, eleven_jobs_stored, test_repository):
|
|||
assert isinstance(results, list)
|
||||
|
||||
assert len(results) == 10
|
||||
exp_keys = set([
|
||||
u'id',
|
||||
u'repository_id',
|
||||
u'author',
|
||||
u'revision',
|
||||
u'revisions',
|
||||
u'revision_count',
|
||||
u'push_timestamp',
|
||||
])
|
||||
exp_keys = set(
|
||||
[
|
||||
u'id',
|
||||
u'repository_id',
|
||||
u'author',
|
||||
u'revision',
|
||||
u'revisions',
|
||||
u'revision_count',
|
||||
u'push_timestamp',
|
||||
]
|
||||
)
|
||||
for rs in results:
|
||||
assert set(rs.keys()) == exp_keys
|
||||
|
||||
assert(meta == {
|
||||
u'count': 10,
|
||||
u'filter_params': {},
|
||||
u'repository': test_repository.name
|
||||
})
|
||||
assert meta == {u'count': 10, u'filter_params': {}, u'repository': test_repository.name}
|
||||
|
||||
|
||||
def test_push_list_bad_project(client, transactional_db):
|
||||
"""
|
||||
test that we have a sane error when the repository does not exist
|
||||
"""
|
||||
resp = client.get(
|
||||
reverse("push-list", kwargs={"project": "foo"}),
|
||||
)
|
||||
resp = client.get(reverse("push-list", kwargs={"project": "foo"}),)
|
||||
assert resp.status_code == 404
|
||||
assert resp.json() == {"detail": "No project with name foo"}
|
||||
|
||||
|
@ -63,9 +56,7 @@ def test_push_list_empty_push_still_show(client, sample_push, test_repository):
|
|||
"""
|
||||
store_push_data(test_repository, sample_push)
|
||||
|
||||
resp = client.get(
|
||||
reverse("push-list", kwargs={"project": test_repository.name}),
|
||||
)
|
||||
resp = client.get(reverse("push-list", kwargs={"project": test_repository.name}),)
|
||||
assert resp.status_code == 200
|
||||
data = resp.json()
|
||||
assert len(data['results']) == 10
|
||||
|
@ -77,22 +68,19 @@ def test_push_list_single_short_revision(client, eleven_jobs_stored, test_reposi
|
|||
"""
|
||||
|
||||
resp = client.get(
|
||||
reverse("push-list", kwargs={"project": test_repository.name}),
|
||||
{"revision": "45f8637cb9f7"}
|
||||
reverse("push-list", kwargs={"project": test_repository.name}), {"revision": "45f8637cb9f7"}
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
results = resp.json()['results']
|
||||
meta = resp.json()['meta']
|
||||
assert len(results) == 1
|
||||
assert set([rs["revision"] for rs in results]) == {"45f8637cb9f78f19cb8463ff174e81756805d8cf"}
|
||||
assert(meta == {
|
||||
assert meta == {
|
||||
u'count': 1,
|
||||
u'revision': u'45f8637cb9f7',
|
||||
u'filter_params': {
|
||||
u'revisions_short_revision': "45f8637cb9f7"
|
||||
},
|
||||
u'repository': test_repository.name}
|
||||
)
|
||||
u'filter_params': {u'revisions_short_revision': "45f8637cb9f7"},
|
||||
u'repository': test_repository.name,
|
||||
}
|
||||
|
||||
|
||||
def test_push_list_single_long_revision(client, eleven_jobs_stored, test_repository):
|
||||
|
@ -102,21 +90,19 @@ def test_push_list_single_long_revision(client, eleven_jobs_stored, test_reposit
|
|||
|
||||
resp = client.get(
|
||||
reverse("push-list", kwargs={"project": test_repository.name}),
|
||||
{"revision": "45f8637cb9f78f19cb8463ff174e81756805d8cf"}
|
||||
{"revision": "45f8637cb9f78f19cb8463ff174e81756805d8cf"},
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
results = resp.json()['results']
|
||||
meta = resp.json()['meta']
|
||||
assert len(results) == 1
|
||||
assert set([rs["revision"] for rs in results]) == {"45f8637cb9f78f19cb8463ff174e81756805d8cf"}
|
||||
assert(meta == {
|
||||
assert meta == {
|
||||
u'count': 1,
|
||||
u'revision': u'45f8637cb9f78f19cb8463ff174e81756805d8cf',
|
||||
u'filter_params': {
|
||||
u'revisions_long_revision': u'45f8637cb9f78f19cb8463ff174e81756805d8cf'
|
||||
},
|
||||
u'repository': test_repository.name}
|
||||
)
|
||||
u'filter_params': {u'revisions_long_revision': u'45f8637cb9f78f19cb8463ff174e81756805d8cf'},
|
||||
u'repository': test_repository.name,
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.skipif(IS_WINDOWS, reason="timezone mixup happening somewhere")
|
||||
|
@ -127,7 +113,7 @@ def test_push_list_filter_by_revision(client, eleven_jobs_stored, test_repositor
|
|||
|
||||
resp = client.get(
|
||||
reverse("push-list", kwargs={"project": test_repository.name}),
|
||||
{"fromchange": "130965d3df6c", "tochange": "f361dcb60bbe"}
|
||||
{"fromchange": "130965d3df6c", "tochange": "f361dcb60bbe"},
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
data = resp.json()
|
||||
|
@ -138,38 +124,32 @@ def test_push_list_filter_by_revision(client, eleven_jobs_stored, test_repositor
|
|||
u'130965d3df6c9a1093b4725f3b877eaef80d72bc',
|
||||
u'7f417c3505e3d2599ac9540f02e3dbee307a3963',
|
||||
u'a69390334818373e2d7e6e9c8d626a328ed37d47',
|
||||
u'f361dcb60bbedaa01257fbca211452972f7a74b2'
|
||||
u'f361dcb60bbedaa01257fbca211452972f7a74b2',
|
||||
}
|
||||
assert(meta == {
|
||||
assert meta == {
|
||||
u'count': 4,
|
||||
u'fromchange': u'130965d3df6c',
|
||||
u'filter_params': {
|
||||
u'push_timestamp__gte': 1384363842,
|
||||
u'push_timestamp__lte': 1384365942
|
||||
},
|
||||
u'filter_params': {u'push_timestamp__gte': 1384363842, u'push_timestamp__lte': 1384365942},
|
||||
u'repository': test_repository.name,
|
||||
u'tochange': u'f361dcb60bbe'}
|
||||
)
|
||||
u'tochange': u'f361dcb60bbe',
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.skipif(IS_WINDOWS, reason="timezone mixup happening somewhere")
|
||||
def test_push_list_filter_by_date(client,
|
||||
test_repository,
|
||||
sample_push):
|
||||
def test_push_list_filter_by_date(client, test_repository, sample_push):
|
||||
"""
|
||||
test retrieving a push list, filtered by a date range
|
||||
"""
|
||||
for (i, datestr) in zip([3, 4, 5, 6, 7], ["2013-08-09", "2013-08-10",
|
||||
"2013-08-11", "2013-08-12",
|
||||
"2013-08-13"]):
|
||||
sample_push[i]['push_timestamp'] = utils.to_timestamp(
|
||||
utils.to_datetime(datestr))
|
||||
for (i, datestr) in zip(
|
||||
[3, 4, 5, 6, 7], ["2013-08-09", "2013-08-10", "2013-08-11", "2013-08-12", "2013-08-13"]
|
||||
):
|
||||
sample_push[i]['push_timestamp'] = utils.to_timestamp(utils.to_datetime(datestr))
|
||||
|
||||
store_push_data(test_repository, sample_push)
|
||||
|
||||
resp = client.get(
|
||||
reverse("push-list", kwargs={"project": test_repository.name}),
|
||||
{"startdate": "2013-08-10", "enddate": "2013-08-13"}
|
||||
{"startdate": "2013-08-10", "enddate": "2013-08-13"},
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
data = resp.json()
|
||||
|
@ -180,44 +160,47 @@ def test_push_list_filter_by_date(client,
|
|||
u'ce17cad5d554cfffddee13d1d8421ae9ec5aad82',
|
||||
u'7f417c3505e3d2599ac9540f02e3dbee307a3963',
|
||||
u'a69390334818373e2d7e6e9c8d626a328ed37d47',
|
||||
u'f361dcb60bbedaa01257fbca211452972f7a74b2'
|
||||
u'f361dcb60bbedaa01257fbca211452972f7a74b2',
|
||||
}
|
||||
assert(meta == {
|
||||
assert meta == {
|
||||
u'count': 4,
|
||||
u'enddate': u'2013-08-13',
|
||||
u'filter_params': {
|
||||
u'push_timestamp__gte': 1376092800.0,
|
||||
u'push_timestamp__lt': 1376438400.0
|
||||
u'push_timestamp__lt': 1376438400.0,
|
||||
},
|
||||
u'repository': test_repository.name,
|
||||
u'startdate': u'2013-08-10'}
|
||||
)
|
||||
u'startdate': u'2013-08-10',
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.parametrize('filter_param, exp_ids', [
|
||||
('id__lt=2', [1]),
|
||||
('id__lte=2', [1, 2]),
|
||||
('id=2', [2]),
|
||||
('id__gt=2', [3]),
|
||||
('id__gte=2', [2, 3])
|
||||
])
|
||||
def test_push_list_filter_by_id(client,
|
||||
test_repository,
|
||||
filter_param,
|
||||
exp_ids):
|
||||
@pytest.mark.parametrize(
|
||||
'filter_param, exp_ids',
|
||||
[
|
||||
('id__lt=2', [1]),
|
||||
('id__lte=2', [1, 2]),
|
||||
('id=2', [2]),
|
||||
('id__gt=2', [3]),
|
||||
('id__gte=2', [2, 3]),
|
||||
],
|
||||
)
|
||||
def test_push_list_filter_by_id(client, test_repository, filter_param, exp_ids):
|
||||
"""
|
||||
test filtering by id in various ways
|
||||
"""
|
||||
for (revision, author) in [('1234abcd', 'foo@bar.com'),
|
||||
('2234abcd', 'foo2@bar.com'),
|
||||
('3234abcd', 'foo3@bar.com')]:
|
||||
Push.objects.create(repository=test_repository,
|
||||
revision=revision,
|
||||
author=author,
|
||||
time=datetime.datetime.now())
|
||||
for (revision, author) in [
|
||||
('1234abcd', 'foo@bar.com'),
|
||||
('2234abcd', 'foo2@bar.com'),
|
||||
('3234abcd', 'foo3@bar.com'),
|
||||
]:
|
||||
Push.objects.create(
|
||||
repository=test_repository,
|
||||
revision=revision,
|
||||
author=author,
|
||||
time=datetime.datetime.now(),
|
||||
)
|
||||
resp = client.get(
|
||||
reverse("push-list", kwargs={"project": test_repository.name}) +
|
||||
'?' + filter_param
|
||||
reverse("push-list", kwargs={"project": test_repository.name}) + '?' + filter_param
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
results = resp.json()['results']
|
||||
|
@ -228,16 +211,19 @@ def test_push_list_id_in(client, test_repository):
|
|||
"""
|
||||
test the id__in parameter
|
||||
"""
|
||||
for (revision, author) in [('1234abcd', 'foo@bar.com'),
|
||||
('2234abcd', 'foo2@bar.com'),
|
||||
('3234abcd', 'foo3@bar.com')]:
|
||||
Push.objects.create(repository=test_repository,
|
||||
revision=revision,
|
||||
author=author,
|
||||
time=datetime.datetime.now())
|
||||
for (revision, author) in [
|
||||
('1234abcd', 'foo@bar.com'),
|
||||
('2234abcd', 'foo2@bar.com'),
|
||||
('3234abcd', 'foo3@bar.com'),
|
||||
]:
|
||||
Push.objects.create(
|
||||
repository=test_repository,
|
||||
revision=revision,
|
||||
author=author,
|
||||
time=datetime.datetime.now(),
|
||||
)
|
||||
resp = client.get(
|
||||
reverse("push-list", kwargs={"project": test_repository.name}) +
|
||||
'?id__in=1,2'
|
||||
reverse("push-list", kwargs={"project": test_repository.name}) + '?id__in=1,2'
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
|
||||
|
@ -247,8 +233,7 @@ def test_push_list_id_in(client, test_repository):
|
|||
|
||||
# test that we do something sane if invalid list passed in
|
||||
resp = client.get(
|
||||
reverse("push-list", kwargs={"project": test_repository.name}) +
|
||||
'?id__in=1,2,foobar',
|
||||
reverse("push-list", kwargs={"project": test_repository.name}) + '?id__in=1,2,foobar',
|
||||
)
|
||||
assert resp.status_code == 400
|
||||
|
||||
|
@ -260,8 +245,7 @@ def test_push_list_bad_count(client, test_repository):
|
|||
bad_count = "ZAP%n%s%n%s"
|
||||
|
||||
resp = client.get(
|
||||
reverse("push-list", kwargs={"project": test_repository.name}),
|
||||
data={'count': bad_count}
|
||||
reverse("push-list", kwargs={"project": test_repository.name}), data={'count': bad_count}
|
||||
)
|
||||
|
||||
assert resp.status_code == 400
|
||||
|
@ -272,17 +256,20 @@ def test_push_author(client, test_repository):
|
|||
"""
|
||||
test the author parameter
|
||||
"""
|
||||
for (revision, author) in [('1234abcd', 'foo@bar.com'),
|
||||
('2234abcd', 'foo@bar.com'),
|
||||
('3234abcd', 'foo2@bar.com')]:
|
||||
Push.objects.create(repository=test_repository,
|
||||
revision=revision,
|
||||
author=author,
|
||||
time=datetime.datetime.now())
|
||||
for (revision, author) in [
|
||||
('1234abcd', 'foo@bar.com'),
|
||||
('2234abcd', 'foo@bar.com'),
|
||||
('3234abcd', 'foo2@bar.com'),
|
||||
]:
|
||||
Push.objects.create(
|
||||
repository=test_repository,
|
||||
revision=revision,
|
||||
author=author,
|
||||
time=datetime.datetime.now(),
|
||||
)
|
||||
|
||||
resp = client.get(
|
||||
reverse("push-list", kwargs={"project": test_repository.name}) +
|
||||
'?author=foo@bar.com'
|
||||
reverse("push-list", kwargs={"project": test_repository.name}) + '?author=foo@bar.com'
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
|
||||
|
@ -291,8 +278,7 @@ def test_push_author(client, test_repository):
|
|||
assert set([result['id'] for result in results]) == set([1, 2])
|
||||
|
||||
resp = client.get(
|
||||
reverse("push-list", kwargs={"project": test_repository.name}) +
|
||||
'?author=foo2@bar.com'
|
||||
reverse("push-list", kwargs={"project": test_repository.name}) + '?author=foo2@bar.com'
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
|
||||
|
@ -301,17 +287,13 @@ def test_push_author(client, test_repository):
|
|||
assert results[0]['id'] == 3
|
||||
|
||||
|
||||
def test_push_list_without_jobs(client,
|
||||
test_repository,
|
||||
sample_push):
|
||||
def test_push_list_without_jobs(client, test_repository, sample_push):
|
||||
"""
|
||||
test retrieving a push list without jobs
|
||||
"""
|
||||
store_push_data(test_repository, sample_push)
|
||||
|
||||
resp = client.get(
|
||||
reverse("push-list", kwargs={"project": test_repository.name})
|
||||
)
|
||||
resp = client.get(reverse("push-list", kwargs={"project": test_repository.name}))
|
||||
assert resp.status_code == 200
|
||||
data = resp.json()
|
||||
results = data['results']
|
||||
|
@ -323,7 +305,7 @@ def test_push_list_without_jobs(client,
|
|||
assert meta == {
|
||||
u'count': len(results),
|
||||
u'filter_params': {},
|
||||
u'repository': test_repository.name
|
||||
u'repository': test_repository.name,
|
||||
}
|
||||
|
||||
|
||||
|
@ -335,10 +317,7 @@ def test_push_detail(client, eleven_jobs_stored, test_repository):
|
|||
|
||||
push = Push.objects.get(id=1)
|
||||
|
||||
resp = client.get(
|
||||
reverse("push-detail",
|
||||
kwargs={"project": test_repository.name, "pk": 1})
|
||||
)
|
||||
resp = client.get(reverse("push-detail", kwargs={"project": test_repository.name, "pk": 1}))
|
||||
assert resp.status_code == 200
|
||||
assert isinstance(resp.json(), dict)
|
||||
assert resp.json()["id"] == push.id
|
||||
|
@ -350,8 +329,7 @@ def test_push_detail_not_found(client, test_repository):
|
|||
endpoint.
|
||||
"""
|
||||
resp = client.get(
|
||||
reverse("push-detail",
|
||||
kwargs={"project": test_repository.name, "pk": -32767}),
|
||||
reverse("push-detail", kwargs={"project": test_repository.name, "pk": -32767}),
|
||||
)
|
||||
assert resp.status_code == 404
|
||||
|
||||
|
@ -362,10 +340,7 @@ def test_push_detail_bad_project(client, test_repository):
|
|||
endpoint.
|
||||
"""
|
||||
bad_pk = -32767
|
||||
resp = client.get(
|
||||
reverse("push-detail",
|
||||
kwargs={"project": "foo", "pk": bad_pk}),
|
||||
)
|
||||
resp = client.get(reverse("push-detail", kwargs={"project": "foo", "pk": bad_pk}),)
|
||||
assert resp.status_code == 404
|
||||
|
||||
|
||||
|
@ -373,27 +348,26 @@ def test_push_status(client, test_job, test_user):
|
|||
"""
|
||||
test retrieving the status of a push
|
||||
"""
|
||||
failure_classification = FailureClassification.objects.get(
|
||||
name="fixed by commit")
|
||||
failure_classification = FailureClassification.objects.get(name="fixed by commit")
|
||||
|
||||
push = test_job.push
|
||||
|
||||
resp = client.get(
|
||||
reverse("push-status",
|
||||
kwargs={"project": push.repository.name, "pk": push.id})
|
||||
reverse("push-status", kwargs={"project": push.repository.name, "pk": push.id})
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert isinstance(resp.json(), dict)
|
||||
assert resp.json() == {'success': 1, 'completed': 1, 'pending': 0, 'running': 0}
|
||||
|
||||
JobNote.objects.create(job=test_job,
|
||||
failure_classification=failure_classification,
|
||||
user=test_user,
|
||||
text='A random note')
|
||||
JobNote.objects.create(
|
||||
job=test_job,
|
||||
failure_classification=failure_classification,
|
||||
user=test_user,
|
||||
text='A random note',
|
||||
)
|
||||
|
||||
resp = client.get(
|
||||
reverse("push-status",
|
||||
kwargs={"project": push.repository.name, "pk": push.id})
|
||||
reverse("push-status", kwargs={"project": push.repository.name, "pk": push.id})
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
assert isinstance(resp.json(), dict)
|
||||
|
|
|
@ -1,16 +1,16 @@
|
|||
from django.urls import reverse
|
||||
|
||||
from tests.autoclassify.utils import (create_failure_lines,
|
||||
create_text_log_errors,
|
||||
test_line)
|
||||
from treeherder.model.models import (BugJobMap,
|
||||
Bugscache,
|
||||
ClassifiedFailure,
|
||||
FailureLine,
|
||||
Job,
|
||||
JobNote,
|
||||
TextLogError,
|
||||
TextLogErrorMetadata)
|
||||
from tests.autoclassify.utils import create_failure_lines, create_text_log_errors, test_line
|
||||
from treeherder.model.models import (
|
||||
BugJobMap,
|
||||
Bugscache,
|
||||
ClassifiedFailure,
|
||||
FailureLine,
|
||||
Job,
|
||||
JobNote,
|
||||
TextLogError,
|
||||
TextLogErrorMetadata,
|
||||
)
|
||||
|
||||
|
||||
def test_get_error(client, text_log_errors_failure_lines):
|
||||
|
@ -19,29 +19,32 @@ def test_get_error(client, text_log_errors_failure_lines):
|
|||
"""
|
||||
text_log_errors, _ = text_log_errors_failure_lines
|
||||
|
||||
resp = client.get(
|
||||
reverse("text-log-error-detail", kwargs={"pk": text_log_errors[0].id}))
|
||||
resp = client.get(reverse("text-log-error-detail", kwargs={"pk": text_log_errors[0].id}))
|
||||
|
||||
assert resp.status_code == 200
|
||||
|
||||
data = resp.json()
|
||||
|
||||
assert isinstance(data, object)
|
||||
exp_error_keys = ["id", "line", "line_number", "matches",
|
||||
"classified_failures", "bug_suggestions", "metadata"]
|
||||
exp_error_keys = [
|
||||
"id",
|
||||
"line",
|
||||
"line_number",
|
||||
"matches",
|
||||
"classified_failures",
|
||||
"bug_suggestions",
|
||||
"metadata",
|
||||
]
|
||||
|
||||
assert set(data.keys()) == set(exp_error_keys)
|
||||
|
||||
exp_meta_keys = ["text_log_error", "failure_line", "best_classification",
|
||||
"best_is_verified"]
|
||||
exp_meta_keys = ["text_log_error", "failure_line", "best_classification", "best_is_verified"]
|
||||
assert set(data["metadata"].keys()) == set(exp_meta_keys)
|
||||
|
||||
|
||||
def test_update_error_verify(client,
|
||||
test_repository,
|
||||
text_log_errors_failure_lines,
|
||||
classified_failures,
|
||||
test_user):
|
||||
def test_update_error_verify(
|
||||
client, test_repository, text_log_errors_failure_lines, classified_failures, test_user
|
||||
):
|
||||
|
||||
text_log_errors, _ = text_log_errors_failure_lines
|
||||
client.force_authenticate(user=test_user)
|
||||
|
@ -52,9 +55,7 @@ def test_update_error_verify(client,
|
|||
|
||||
body = {"best_classification": classified_failures[0].id}
|
||||
|
||||
resp = client.put(
|
||||
reverse("text-log-error-detail", kwargs={"pk": error_line.id}),
|
||||
body)
|
||||
resp = client.put(reverse("text-log-error-detail", kwargs={"pk": error_line.id}), body)
|
||||
|
||||
assert resp.status_code == 200
|
||||
|
||||
|
@ -64,11 +65,9 @@ def test_update_error_verify(client,
|
|||
assert error_line.metadata.best_is_verified
|
||||
|
||||
|
||||
def test_update_error_replace(client,
|
||||
test_repository,
|
||||
text_log_errors_failure_lines,
|
||||
classified_failures,
|
||||
test_user):
|
||||
def test_update_error_replace(
|
||||
client, test_repository, text_log_errors_failure_lines, classified_failures, test_user
|
||||
):
|
||||
client.force_authenticate(user=test_user)
|
||||
|
||||
text_log_errors, _ = text_log_errors_failure_lines
|
||||
|
@ -78,9 +77,7 @@ def test_update_error_replace(client,
|
|||
|
||||
body = {"best_classification": classified_failures[1].id}
|
||||
|
||||
resp = client.put(
|
||||
reverse("text-log-error-detail", kwargs={"pk": error_line.id}),
|
||||
body)
|
||||
resp = client.put(reverse("text-log-error-detail", kwargs={"pk": error_line.id}), body)
|
||||
|
||||
assert resp.status_code == 200
|
||||
|
||||
|
@ -93,22 +90,22 @@ def test_update_error_replace(client,
|
|||
assert error_line.metadata.best_is_verified
|
||||
|
||||
expected_matcher = "ManualDetector"
|
||||
assert error_line.matches.get(classified_failure=classified_failure).matcher_name == expected_matcher
|
||||
assert (
|
||||
error_line.matches.get(classified_failure=classified_failure).matcher_name
|
||||
== expected_matcher
|
||||
)
|
||||
|
||||
|
||||
def test_update_error_mark_job(client,
|
||||
test_job,
|
||||
text_log_errors_failure_lines,
|
||||
classified_failures,
|
||||
test_user):
|
||||
def test_update_error_mark_job(
|
||||
client, test_job, text_log_errors_failure_lines, classified_failures, test_user
|
||||
):
|
||||
text_log_errors, _ = text_log_errors_failure_lines
|
||||
|
||||
client.force_authenticate(user=test_user)
|
||||
|
||||
bug = Bugscache.objects.create(id=1234,
|
||||
status="NEW",
|
||||
modified="2014-01-01 00:00:00",
|
||||
summary="test")
|
||||
bug = Bugscache.objects.create(
|
||||
id=1234, status="NEW", modified="2014-01-01 00:00:00", summary="test"
|
||||
)
|
||||
classified_failures[1].bug_number = bug.id
|
||||
classified_failures[1].save()
|
||||
|
||||
|
@ -117,8 +114,7 @@ def test_update_error_mark_job(client,
|
|||
|
||||
body = {"best_classification": classified_failures[1].id}
|
||||
|
||||
resp = client.put(reverse("text-log-error-detail", kwargs={"pk": text_log_error.id}),
|
||||
body)
|
||||
resp = client.put(reverse("text-log-error-detail", kwargs={"pk": text_log_error.id}), body)
|
||||
|
||||
assert resp.status_code == 200
|
||||
|
||||
|
@ -138,25 +134,20 @@ def test_update_error_mark_job(client,
|
|||
assert job_bugs[0].bug_id == bug.id
|
||||
|
||||
|
||||
def test_update_error_mark_job_with_human_note(client,
|
||||
test_job,
|
||||
text_log_errors_failure_lines,
|
||||
classified_failures, test_user):
|
||||
def test_update_error_mark_job_with_human_note(
|
||||
client, test_job, text_log_errors_failure_lines, classified_failures, test_user
|
||||
):
|
||||
text_log_errors, _ = text_log_errors_failure_lines
|
||||
|
||||
client.force_authenticate(user=test_user)
|
||||
|
||||
JobNote.objects.create(job=test_job,
|
||||
failure_classification_id=4,
|
||||
user=test_user,
|
||||
text="note")
|
||||
JobNote.objects.create(job=test_job, failure_classification_id=4, user=test_user, text="note")
|
||||
|
||||
for error_line in text_log_errors:
|
||||
|
||||
body = {"best_classification": classified_failures[1].id}
|
||||
|
||||
resp = client.put(reverse("text-log-error-detail", kwargs={"pk": error_line.id}),
|
||||
body)
|
||||
resp = client.put(reverse("text-log-error-detail", kwargs={"pk": error_line.id}), body)
|
||||
|
||||
assert resp.status_code == 200
|
||||
|
||||
|
@ -168,25 +159,20 @@ def test_update_error_mark_job_with_human_note(client,
|
|||
assert note.user == test_user
|
||||
|
||||
|
||||
def test_update_error_line_mark_job_with_auto_note(client,
|
||||
test_job,
|
||||
text_log_errors_failure_lines,
|
||||
classified_failures,
|
||||
test_user):
|
||||
def test_update_error_line_mark_job_with_auto_note(
|
||||
client, test_job, text_log_errors_failure_lines, classified_failures, test_user
|
||||
):
|
||||
|
||||
text_log_errors, _ = text_log_errors_failure_lines
|
||||
|
||||
client.force_authenticate(user=test_user)
|
||||
|
||||
JobNote.objects.create(job=test_job,
|
||||
failure_classification_id=7,
|
||||
text="note")
|
||||
JobNote.objects.create(job=test_job, failure_classification_id=7, text="note")
|
||||
|
||||
for text_log_error in text_log_errors:
|
||||
body = {"best_classification": classified_failures[1].id}
|
||||
|
||||
resp = client.put(reverse("text-log-error-detail", kwargs={"pk": text_log_error.id}),
|
||||
body)
|
||||
resp = client.put(reverse("text-log-error-detail", kwargs={"pk": text_log_error.id}), body)
|
||||
|
||||
assert resp.status_code == 200
|
||||
|
||||
|
@ -204,38 +190,38 @@ def test_update_error_line_mark_job_with_auto_note(client,
|
|||
assert notes[1].text == "note"
|
||||
|
||||
|
||||
def test_update_errors(client,
|
||||
test_repository,
|
||||
text_log_errors_failure_lines,
|
||||
classified_failures,
|
||||
eleven_jobs_stored,
|
||||
test_user):
|
||||
def test_update_errors(
|
||||
client,
|
||||
test_repository,
|
||||
text_log_errors_failure_lines,
|
||||
classified_failures,
|
||||
eleven_jobs_stored,
|
||||
test_user,
|
||||
):
|
||||
|
||||
jobs = (Job.objects.get(id=1), Job.objects.get(id=2))
|
||||
|
||||
client.force_authenticate(user=test_user)
|
||||
|
||||
lines = [(test_line, {}),
|
||||
(test_line, {"subtest": "subtest2"})]
|
||||
lines = [(test_line, {}), (test_line, {"subtest": "subtest2"})]
|
||||
new_failure_lines = create_failure_lines(jobs[1], lines)
|
||||
new_text_log_errors = create_text_log_errors(jobs[1], lines)
|
||||
|
||||
for text_log_error, failure_line in zip(new_text_log_errors,
|
||||
new_failure_lines):
|
||||
TextLogErrorMetadata.objects.create(text_log_error=text_log_error,
|
||||
failure_line=failure_line)
|
||||
for text_log_error, failure_line in zip(new_text_log_errors, new_failure_lines):
|
||||
TextLogErrorMetadata.objects.create(
|
||||
text_log_error=text_log_error, failure_line=failure_line
|
||||
)
|
||||
|
||||
failure_lines = FailureLine.objects.filter(
|
||||
job_guid__in=[job.guid for job in jobs]).all()
|
||||
text_log_errors = TextLogError.objects.filter(
|
||||
step__job__in=jobs).all()
|
||||
failure_lines = FailureLine.objects.filter(job_guid__in=[job.guid for job in jobs]).all()
|
||||
text_log_errors = TextLogError.objects.filter(step__job__in=jobs).all()
|
||||
|
||||
for text_log_error in text_log_errors:
|
||||
assert text_log_error.metadata.best_is_verified is False
|
||||
|
||||
body = [{"id": failure_line.id,
|
||||
"best_classification": classified_failures[1].id}
|
||||
for failure_line in failure_lines]
|
||||
body = [
|
||||
{"id": failure_line.id, "best_classification": classified_failures[1].id}
|
||||
for failure_line in failure_lines
|
||||
]
|
||||
resp = client.put(reverse("text-log-error-list"), body)
|
||||
|
||||
assert resp.status_code == 200
|
||||
|
@ -254,8 +240,9 @@ def test_update_errors(client,
|
|||
assert note.user == test_user
|
||||
|
||||
|
||||
def test_update_error_ignore(client, test_job, text_log_errors_failure_lines,
|
||||
classified_failures, test_user):
|
||||
def test_update_error_ignore(
|
||||
client, test_job, text_log_errors_failure_lines, classified_failures, test_user
|
||||
):
|
||||
|
||||
text_log_errors, _ = text_log_errors_failure_lines
|
||||
client.force_authenticate(user=test_user)
|
||||
|
@ -264,12 +251,9 @@ def test_update_error_ignore(client, test_job, text_log_errors_failure_lines,
|
|||
assert text_log_error.metadata.best_classification == classified_failures[0]
|
||||
assert text_log_error.metadata.best_is_verified is False
|
||||
|
||||
body = {"project": test_job.repository.name,
|
||||
"best_classification": None}
|
||||
body = {"project": test_job.repository.name, "best_classification": None}
|
||||
|
||||
resp = client.put(
|
||||
reverse("text-log-error-detail", kwargs={"pk": text_log_error.id}),
|
||||
body)
|
||||
resp = client.put(reverse("text-log-error-detail", kwargs={"pk": text_log_error.id}), body)
|
||||
|
||||
assert resp.status_code == 200
|
||||
|
||||
|
@ -279,11 +263,9 @@ def test_update_error_ignore(client, test_job, text_log_errors_failure_lines,
|
|||
assert text_log_error.metadata.best_is_verified
|
||||
|
||||
|
||||
def test_update_error_all_ignore_mark_job(client,
|
||||
test_job,
|
||||
text_log_errors_failure_lines,
|
||||
classified_failures,
|
||||
test_user):
|
||||
def test_update_error_all_ignore_mark_job(
|
||||
client, test_job, text_log_errors_failure_lines, classified_failures, test_user
|
||||
):
|
||||
|
||||
text_log_errors, _ = text_log_errors_failure_lines
|
||||
|
||||
|
@ -302,8 +284,7 @@ def test_update_error_all_ignore_mark_job(client,
|
|||
|
||||
body = {"best_classification": None}
|
||||
|
||||
resp = client.put(reverse("text-log-error-detail", kwargs={"pk": error_line.id}),
|
||||
body)
|
||||
resp = client.put(reverse("text-log-error-detail", kwargs={"pk": error_line.id}), body)
|
||||
|
||||
assert resp.status_code == 200
|
||||
|
||||
|
@ -317,11 +298,9 @@ def test_update_error_all_ignore_mark_job(client,
|
|||
assert JobNote.objects.count() == 1
|
||||
|
||||
|
||||
def test_update_error_partial_ignore_mark_job(client,
|
||||
test_job,
|
||||
text_log_errors_failure_lines,
|
||||
classified_failures,
|
||||
test_user):
|
||||
def test_update_error_partial_ignore_mark_job(
|
||||
client, test_job, text_log_errors_failure_lines, classified_failures, test_user
|
||||
):
|
||||
|
||||
text_log_errors, _ = text_log_errors_failure_lines
|
||||
|
||||
|
@ -332,8 +311,7 @@ def test_update_error_partial_ignore_mark_job(client,
|
|||
|
||||
body = {"best_classification": None if i == 0 else classified_failures[0].id}
|
||||
|
||||
resp = client.put(reverse("text-log-error-detail", kwargs={"pk": error_line.id}),
|
||||
body)
|
||||
resp = client.put(reverse("text-log-error-detail", kwargs={"pk": error_line.id}), body)
|
||||
|
||||
assert resp.status_code == 200
|
||||
|
||||
|
@ -354,11 +332,9 @@ def test_update_error_partial_ignore_mark_job(client,
|
|||
assert note.user == test_user
|
||||
|
||||
|
||||
def test_update_error_verify_bug(client,
|
||||
test_repository,
|
||||
text_log_errors_failure_lines,
|
||||
classified_failures,
|
||||
test_user):
|
||||
def test_update_error_verify_bug(
|
||||
client, test_repository, text_log_errors_failure_lines, classified_failures, test_user
|
||||
):
|
||||
text_log_errors, _ = text_log_errors_failure_lines
|
||||
client.force_authenticate(user=test_user)
|
||||
|
||||
|
@ -371,9 +347,7 @@ def test_update_error_verify_bug(client,
|
|||
|
||||
body = {"bug_number": classified_failures[0].bug_number}
|
||||
|
||||
resp = client.put(
|
||||
reverse("text-log-error-detail", kwargs={"pk": error_line.id}),
|
||||
body)
|
||||
resp = client.put(reverse("text-log-error-detail", kwargs={"pk": error_line.id}), body)
|
||||
|
||||
assert resp.status_code == 200
|
||||
|
||||
|
@ -383,11 +357,9 @@ def test_update_error_verify_bug(client,
|
|||
assert error_line.metadata.best_is_verified
|
||||
|
||||
|
||||
def test_update_error_verify_new_bug(client,
|
||||
test_repository,
|
||||
text_log_errors_failure_lines,
|
||||
classified_failures,
|
||||
test_user):
|
||||
def test_update_error_verify_new_bug(
|
||||
client, test_repository, text_log_errors_failure_lines, classified_failures, test_user
|
||||
):
|
||||
text_log_errors, _ = text_log_errors_failure_lines
|
||||
client.force_authenticate(user=test_user)
|
||||
|
||||
|
@ -398,9 +370,7 @@ def test_update_error_verify_new_bug(client,
|
|||
assert 78910 not in [item.bug_number for item in classified_failures]
|
||||
body = {"bug_number": 78910}
|
||||
|
||||
resp = client.put(
|
||||
reverse("text-log-error-detail", kwargs={"pk": error_line.id}),
|
||||
body)
|
||||
resp = client.put(reverse("text-log-error-detail", kwargs={"pk": error_line.id}), body)
|
||||
|
||||
assert resp.status_code == 200
|
||||
|
||||
|
@ -411,11 +381,9 @@ def test_update_error_verify_new_bug(client,
|
|||
assert error_line.metadata.best_classification.bug_number == 78910
|
||||
|
||||
|
||||
def test_update_error_verify_ignore_now(client,
|
||||
test_repository,
|
||||
text_log_errors_failure_lines,
|
||||
classified_failures,
|
||||
test_user):
|
||||
def test_update_error_verify_ignore_now(
|
||||
client, test_repository, text_log_errors_failure_lines, classified_failures, test_user
|
||||
):
|
||||
text_log_errors, _ = text_log_errors_failure_lines
|
||||
client.force_authenticate(user=test_user)
|
||||
|
||||
|
@ -426,9 +394,7 @@ def test_update_error_verify_ignore_now(client,
|
|||
assert 78910 not in [item.bug_number for item in classified_failures]
|
||||
body = {}
|
||||
|
||||
resp = client.put(
|
||||
reverse("text-log-error-detail", kwargs={"pk": error_line.id}),
|
||||
body)
|
||||
resp = client.put(reverse("text-log-error-detail", kwargs={"pk": error_line.id}), body)
|
||||
|
||||
assert resp.status_code == 200
|
||||
|
||||
|
@ -438,11 +404,9 @@ def test_update_error_verify_ignore_now(client,
|
|||
assert error_line.metadata.best_is_verified
|
||||
|
||||
|
||||
def test_update_error_change_bug(client,
|
||||
test_repository,
|
||||
text_log_errors_failure_lines,
|
||||
classified_failures,
|
||||
test_user):
|
||||
def test_update_error_change_bug(
|
||||
client, test_repository, text_log_errors_failure_lines, classified_failures, test_user
|
||||
):
|
||||
text_log_errors, _ = text_log_errors_failure_lines
|
||||
client.force_authenticate(user=test_user)
|
||||
|
||||
|
@ -455,9 +419,7 @@ def test_update_error_change_bug(client,
|
|||
assert 78910 not in [item.bug_number for item in classified_failures]
|
||||
body = {"best_classification": classified_failure.id, "bug_number": 78910}
|
||||
|
||||
resp = client.put(
|
||||
reverse("text-log-error-detail", kwargs={"pk": error_line.id}),
|
||||
body)
|
||||
resp = client.put(reverse("text-log-error-detail", kwargs={"pk": error_line.id}), body)
|
||||
|
||||
assert resp.status_code == 200
|
||||
|
||||
|
@ -469,11 +431,9 @@ def test_update_error_change_bug(client,
|
|||
assert error_line.metadata.best_classification.bug_number == 78910
|
||||
|
||||
|
||||
def test_update_error_bug_change_cf(client,
|
||||
test_repository,
|
||||
text_log_errors_failure_lines,
|
||||
classified_failures,
|
||||
test_user):
|
||||
def test_update_error_bug_change_cf(
|
||||
client, test_repository, text_log_errors_failure_lines, classified_failures, test_user
|
||||
):
|
||||
text_log_errors, _ = text_log_errors_failure_lines
|
||||
client.force_authenticate(user=test_user)
|
||||
|
||||
|
@ -485,12 +445,9 @@ def test_update_error_bug_change_cf(client,
|
|||
classified_failures[1].bug_number = 78910
|
||||
classified_failures[1].save()
|
||||
|
||||
body = {"best_classification": classified_failures[0].id,
|
||||
"bug_number": 78910}
|
||||
body = {"best_classification": classified_failures[0].id, "bug_number": 78910}
|
||||
|
||||
resp = client.put(
|
||||
reverse("text-log-error-detail", kwargs={"pk": error_line.id}),
|
||||
body)
|
||||
resp = client.put(reverse("text-log-error-detail", kwargs={"pk": error_line.id}), body)
|
||||
|
||||
assert resp.status_code == 200
|
||||
|
||||
|
|
|
@ -26,8 +26,7 @@ def test_unsupported_version():
|
|||
def test_correct_version():
|
||||
view = RequestVersionView.as_view()
|
||||
version = settings.REST_FRAMEWORK['ALLOWED_VERSIONS'][0]
|
||||
request = factory.get('/endpoint/',
|
||||
HTTP_ACCEPT='application/json; version={0}'.format(version))
|
||||
request = factory.get('/endpoint/', HTTP_ACCEPT='application/json; version={0}'.format(version))
|
||||
response = view(request)
|
||||
assert response.data == {'version': version}
|
||||
|
||||
|
|
|
@ -7,8 +7,7 @@ from django.core.exceptions import ObjectDoesNotExist
|
|||
from jose import jwt
|
||||
from rest_framework.exceptions import AuthenticationFailed
|
||||
|
||||
from treeherder.config.settings import (AUTH0_CLIENTID,
|
||||
AUTH0_DOMAIN)
|
||||
from treeherder.config.settings import AUTH0_CLIENTID, AUTH0_DOMAIN
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -29,7 +28,6 @@ with open('treeherder/auth/jwks.json') as f:
|
|||
|
||||
|
||||
class AuthBackend:
|
||||
|
||||
def _get_access_token_expiry(self, request):
|
||||
expiration_timestamp_in_seconds = request.META.get('HTTP_ACCESS_TOKEN_EXPIRES_AT')
|
||||
|
||||
|
@ -141,7 +139,7 @@ class AuthBackend:
|
|||
"kid": key["kid"],
|
||||
"use": key["use"],
|
||||
"n": key["n"],
|
||||
"e": key["e"]
|
||||
"e": key["e"],
|
||||
}
|
||||
break
|
||||
|
||||
|
@ -156,7 +154,7 @@ class AuthBackend:
|
|||
algorithms=['RS256'],
|
||||
audience=AUTH0_CLIENTID,
|
||||
access_token=access_token,
|
||||
issuer="https://"+AUTH0_DOMAIN+"/"
|
||||
issuer="https://" + AUTH0_DOMAIN + "/",
|
||||
)
|
||||
return user_info
|
||||
except jwt.ExpiredSignatureError:
|
||||
|
@ -173,7 +171,9 @@ class AuthBackend:
|
|||
now_in_seconds = int(time.time())
|
||||
|
||||
# The session length is set to match whichever token expiration time is closer.
|
||||
earliest_expiration_timestamp = min(access_token_expiry_timestamp, id_token_expiry_timestamp)
|
||||
earliest_expiration_timestamp = min(
|
||||
access_token_expiry_timestamp, id_token_expiry_timestamp
|
||||
)
|
||||
seconds_until_expiry = earliest_expiration_timestamp - now_in_seconds
|
||||
|
||||
if seconds_until_expiry <= 0:
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче