Bug 858756 (1/2): Add basic tests for analyze_talos.py [r=catlee]

This commit is contained in:
Matt Brubeck 2013-04-17 12:09:18 -07:00
Родитель ecafee4d45
Коммит b2989cb8e6
2 изменённых файлов: 96 добавлений и 14 удалений

Просмотреть файл

@ -253,9 +253,6 @@ class AnalysisRunner:
log.basicConfig(level=options.verbosity, format="%(asctime)s %(message)s") log.basicConfig(level=options.verbosity, format="%(asctime)s %(message)s")
self.pushlog = PushLog(config.get('cache', 'pushlog'), config.get('main', 'base_hg_url'))
self.pushlog.load()
self.loadWarningHistory() self.loadWarningHistory()
self.dashboard_data = {} self.dashboard_data = {}
@ -269,10 +266,24 @@ class AnalysisRunner:
# The id of the last test run we've looked at # The id of the last test run we've looked at
self.last_run = 0 self.last_run = 0
self._source = None
self._pushlog = None
import analyze_db as source @property
source.connect(config.get('main', 'dburl')) def pushlog(self):
self.source = source if not self._pushlog:
self._pushlog = PushLog(config.get('cache', 'pushlog'), config.get('main', 'base_hg_url'))
self._pushlog.load()
return self._pushlog
@property
def source(self):
if not self._source:
import analyze_db as source
source.connect(config.get('main', 'dburl'))
self._source = source
return self._source
def loadWarningHistory(self): def loadWarningHistory(self):
# Stop warning about stuff from a long time ago # Stop warning about stuff from a long time ago
@ -666,7 +677,7 @@ class AnalysisRunner:
basename = "%s/%s-%s-%s" % (graph_dir, basename = "%s/%s-%s-%s" % (graph_dir,
series.branch_name, series.os_name, test_name) series.branch_name, series.os_name, test_name)
for s, d, state, skip, last_good in series_data: for d, state, skip, last_good in series_data:
graph_point = (d.time * 1000, d.value) graph_point = (d.time * 1000, d.value)
all_data.append(graph_point) all_data.append(graph_point)
if state == "good": if state == "good":
@ -816,6 +827,14 @@ class AnalysisRunner:
self.warning_history[s.branch_name][s.os_name][s.test_name] = [] self.warning_history[s.branch_name][s.os_name][s.test_name] = []
warnings = self.warning_history[s.branch_name][s.os_name][s.test_name] warnings = self.warning_history[s.branch_name][s.os_name][s.test_name]
series_data = self.processSeries(analysis_gen, warnings)
for d, state, skip, last_good in series_data:
self.handleData(s, d, state, skip, last_good)
if self.config.has_option('main', 'graph_dir'):
self.outputGraphs(s, series_data)
def processSeries(self, analysis_gen, warnings):
last_good = None last_good = None
last_err = None last_err = None
last_err_good = None last_err_good = None
@ -856,11 +875,10 @@ class AnalysisRunner:
last_err = None last_err = None
last_good = d last_good = d
series_data.append((s, d, state, skip, last_good)) series_data.append((d, state, skip, last_good))
self.handleData(s, d, state, skip, last_good)
return series_data
if self.config.has_option('main', 'graph_dir'):
self.outputGraphs(s, series_data)
def loadSeries(self): def loadSeries(self):
start_time = self.options.start_time start_time = self.options.start_time
@ -923,9 +941,8 @@ class AnalysisRunner:
except: except:
log.exception("Error saving last time") log.exception("Error saving last time")
if __name__ == "__main__": def parse_options(args=None):
from optparse import OptionParser from optparse import OptionParser
from ConfigParser import RawConfigParser
parser = OptionParser() parser = OptionParser()
parser.add_option("-b", "--branch", dest="branches", action="append") parser.add_option("-b", "--branch", dest="branches", action="append")
@ -952,7 +969,10 @@ if __name__ == "__main__":
catchup = False, catchup = False,
) )
options, args = parser.parse_args() return parser.parse_args(args)
def get_config(options):
from ConfigParser import RawConfigParser
config = RawConfigParser() config = RawConfigParser()
config.add_section('main') config.add_section('main')
@ -968,6 +988,12 @@ if __name__ == "__main__":
if options.machine_addresses: if options.machine_addresses:
config.set('main', 'machine_emails', ",".join(options.machine_addresses)) config.set('main', 'machine_emails', ",".join(options.machine_addresses))
return config
if __name__ == "__main__":
options, args = parse_options()
config = get_config(options)
vars = os.environ.copy() vars = os.environ.copy()
vars['sys_prefix'] = sys.prefix vars['sys_prefix'] = sys.prefix
vars['here'] = os.path.dirname(__file__) vars['here'] = os.path.dirname(__file__)

Просмотреть файл

@ -0,0 +1,56 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import unittest
from analyze import PerfDatum
from analyze_talos import *
from ConfigParser import RawConfigParser
from time import time
TEST_CONFIG = """
base_hg_url = http://example.com
"""
class TestAnalysisRunner(unittest.TestCase):
def get_config(self):
options, args = parse_options(['--start-time', '0'])
config = get_config(options)
config.set('main', 'fore_window', '5')
config.set('main', 'back_window', '5')
config.set('main', 'threshold', '9')
config.set('main', 'percentage_threshold', '9')
config.set('main', 'machine_threshold', '9')
config.set('main', 'machine_history_size', '0')
return options, config
def get_data(self):
return [
(PerfDatum(0, 0, time() + 0, 0.0, 0, 0), 'good'),
(PerfDatum(1, 1, time() + 1, 0.0, 1, 1), 'good'),
(PerfDatum(2, 2, time() + 2, 0.0, 2, 2), 'good'),
(PerfDatum(3, 3, time() + 3, 0.0, 3, 3), 'good'),
(PerfDatum(4, 4, time() + 4, 1.0, 4, 4), 'regression'),
(PerfDatum(5, 5, time() + 5, 1.0, 5, 5), 'good'),
(PerfDatum(6, 6, time() + 6, 1.0, 6, 6), 'good'),
(PerfDatum(7, 7, time() + 7, 1.0, 7, 7), 'good'),
]
def test_processSeries(self):
options, config = self.get_config()
runner = AnalysisRunner(options, config)
data = self.get_data()
results = runner.processSeries(data, [])
self.assertEqual(len(results), 8)
skipped = filter(lambda (d, state, skip, last_good): skip, results)
self.assertEqual(len(skipped), 0)
self.assertEqual(results[3], (data[3][0], 'good', False, data[3][0]))
self.assertEqual(results[4], (data[4][0], 'regression', False, data[3][0]))
self.assertEqual(results[5], (data[5][0], 'good', False, data[5][0]))
if __name__ == '__main__':
unittest.main()