Bug 1228154 - Generate new performance alerts as data is ingested

This commit is contained in:
William Lachance 2015-12-02 13:00:45 -05:00
Родитель 45caf5de80
Коммит a8e663d61d
6 изменённых файлов: 108 добавлений и 5 удалений

Просмотреть файл

@ -4,6 +4,6 @@ worker_pushlog: newrelic-admin run-program celery -A treeherder worker -Q pushlo
worker_buildapi_pending: newrelic-admin run-program celery -A treeherder worker -Q buildapi_pending --maxtasksperchild=20 --concurrency=5
worker_buildapi_running: newrelic-admin run-program celery -A treeherder worker -Q buildapi_running --maxtasksperchild=20 --concurrency=5
worker_buildapi_4hr: newrelic-admin run-program celery -A treeherder worker -Q buildapi_4hr --maxtasksperchild=20 --concurrency=1
worker_default: newrelic-admin run-program celery -A treeherder worker -Q default,cycle_data,calculate_durations,fetch_bugs,fetch_allthethings --maxtasksperchild=50 --concurrency=3
worker_default: newrelic-admin run-program celery -A treeherder worker -Q default,cycle_data,calculate_durations,fetch_bugs,fetch_allthethings,generate_perf_alerts --maxtasksperchild=50 --concurrency=3
worker_hp: newrelic-admin run-program celery -A treeherder worker -Q classification_mirroring,publish_to_pulse --maxtasksperchild=50 --concurrency=1
worker_log_parser: newrelic-admin run-program celery -A treeherder worker -Q log_parser_fail,log_parser,log_parser_hp,log_parser_json --maxtasksperchild=50 --concurrency=5

Просмотреть файл

@ -19,6 +19,6 @@ if [ ! -f $LOGFILE ]; then
fi
exec $NEWRELIC_ADMIN celery -A treeherder worker -c 3 \
-Q default,cycle_data,calculate_durations,fetch_bugs,autoclassify,detect_intermittents,fetch_allthethings \
-Q default,cycle_data,calculate_durations,fetch_bugs,autoclassify,detect_intermittents,fetch_allthethings,generate_perf_alerts \
-E --maxtasksperchild=500 \
--logfile=$LOGFILE -l INFO -n default.%h

Просмотреть файл

@ -11,7 +11,9 @@ from treeherder.model.models import (MachinePlatform,
Option,
OptionCollection,
Repository)
from treeherder.perf.models import (PerformanceDatum,
from treeherder.perf.models import (PerformanceAlert,
PerformanceAlertSummary,
PerformanceDatum,
PerformanceFramework,
PerformanceSignature)
@ -57,6 +59,48 @@ def perf_reference_data():
}
def _generate_perf_data_range(test_project, test_repository,
perf_option_collection, perf_platform,
perf_reference_data):
framework_name = "cheezburger"
PerformanceFramework.objects.create(name=framework_name)
for (i, value) in zip(range(30), [1]*15 + [2]*15):
perf_job_data = {
'fake_job_guid': {
'id': i,
'result_set_id': i,
'push_timestamp': i
}
}
datum = {
'job_guid': 'fake_job_guid',
'name': 'test',
'type': 'test',
'blob': {
'framework': {'name': framework_name},
'suites': [
{
'name': 'cheezburger metrics',
'subtests': [
{
'name': 'test1',
'value': value
}
]
}
]
}
}
# the perf data adapter expects unserialized performance data
submit_datum = copy.copy(datum)
submit_datum['blob'] = json.dumps({
'performance_data': submit_datum['blob']
})
load_perf_artifacts(test_repository.name, perf_reference_data,
perf_job_data, submit_datum)
def _verify_signature_datum(repo_name, framework_name, suitename,
testname, option_collection_hash, platform,
lower_is_better, value, push_timestamp):
@ -166,7 +210,7 @@ def test_load_talos_data(test_project, test_repository,
perf_option_collection, perf_platform,
perf_job_data, perf_reference_data):
PerformanceFramework.objects.get_or_create(name='talos')
PerformanceFramework.objects.create(name='talos')
talos_perf_data = SampleData.get_talos_perf_data()
for talos_datum in talos_perf_data:
@ -199,6 +243,7 @@ def test_load_talos_data(test_project, test_repository,
# verify that we have signatures for the subtests
for (testname, results) in talos_datum["results"].iteritems():
signature = PerformanceSignature.objects.get(test=testname)
datum = PerformanceDatum.objects.get(signature=signature)
if talos_datum.get('summary'):
# if we have a summary, ensure the subtest summary values made
@ -238,3 +283,39 @@ def test_load_talos_data(test_project, test_repository,
# delete perf objects for next iteration
PerformanceSignature.objects.all().delete()
PerformanceDatum.objects.all().delete()
def test_alert_generation(test_project, test_repository,
perf_option_collection, perf_platform,
perf_reference_data):
_generate_perf_data_range(test_project, test_repository,
perf_option_collection, perf_platform,
perf_reference_data)
# validate that a performance alert was generated
assert 1 == PerformanceAlert.objects.all().count()
assert 1 == PerformanceAlertSummary.objects.all().count()
summary = PerformanceAlertSummary.objects.get(id=1)
assert summary.result_set_id == 15
assert summary.prev_result_set_id == 14
alert = PerformanceAlert.objects.get(id=1)
assert alert.is_regression
assert alert.amount_abs == 1
assert alert.amount_pct == 100
def test_alert_generation_try(test_project, test_repository,
perf_option_collection, perf_platform,
perf_reference_data):
# validates that no alerts generated on "try" repos
test_repository.repository_group.name = "try"
test_repository.repository_group.save()
_generate_perf_data_range(test_project, test_repository,
perf_option_collection, perf_platform,
perf_reference_data)
assert 0 == PerformanceAlert.objects.all().count()
assert 0 == PerformanceAlertSummary.objects.all().count()

Просмотреть файл

@ -190,7 +190,8 @@ CELERY_QUEUES = (
Queue('cycle_data', Exchange('default'), routing_key='cycle_data'),
Queue('calculate_durations', Exchange('default'), routing_key='calculate_durations'),
Queue('fetch_bugs', Exchange('default'), routing_key='fetch_bugs'),
Queue('store_pulse_jobs', Exchange('default'), routing_key='store_pulse_jobs')
Queue('store_pulse_jobs', Exchange('default'), routing_key='store_pulse_jobs'),
Queue('generate_perf_alerts', Exchange('default'), routing_key='generate_perf_alerts'),
)
CELERY_ACCEPT_CONTENT = ['json']

Просмотреть файл

@ -13,6 +13,7 @@ from treeherder.model.models import (MachinePlatform,
from treeherder.perf.models import (PerformanceDatum,
PerformanceFramework,
PerformanceSignature)
from treeherder.perf.tasks import generate_alerts
logger = logging.getLogger(__name__)
@ -88,6 +89,7 @@ def load_perf_artifacts(project_name, reference_data, job_data, datum):
platform=reference_data['machine_platform'])[0]
repository = Repository.objects.get(
name=project_name)
is_try_repository = repository.repository_group.name == 'try'
# data for performance series
job_guid = datum["job_guid"]
@ -129,6 +131,12 @@ def load_perf_artifacts(project_name, reference_data, job_data, datum):
push_timestamp=push_timestamp,
defaults={'value': subtest['value']})
# if there is no summary, we should schedule a generate alerts
# task for the subtest, since we have new data
if not is_try_repository and suite.get('value') is None:
generate_alerts.apply_async(args=[signature.id],
routing_key='generate_perf_alerts')
# if we have a summary value, create or get its signature and insert
# it too
if suite.get('value') is not None:
@ -161,6 +169,9 @@ def load_perf_artifacts(project_name, reference_data, job_data, datum):
signature=signature,
push_timestamp=push_timestamp,
defaults={'value': suite['value']})
if not is_try_repository:
generate_alerts.apply_async(args=[signature.id],
routing_key='generate_perf_alerts')
def _calculate_summary_value(results):

10
treeherder/perf/tasks.py Normal file
Просмотреть файл

@ -0,0 +1,10 @@
from celery import task
from treeherder.perf.alerts import generate_new_alerts_in_series
from treeherder.perf.models import PerformanceSignature
@task(name='generate-alerts')
def generate_alerts(signature_id):
signature = PerformanceSignature.objects.get(id=signature_id)
generate_new_alerts_in_series(signature)