зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1399800 - integrate pytest and add initial tests r=jmaher
MozReview-Commit-ID: FNQwCEtM1MB --HG-- rename : testing/talos/tests/__init__.py => testing/talos/talos/unittests/__init__.py rename : testing/talos/tests/browser_output.ts.txt => testing/talos/talos/unittests/browser_output.ts.txt rename : testing/talos/tests/browser_output.tsvg.txt => testing/talos/talos/unittests/browser_output.tsvg.txt rename : testing/talos/tests/profile.tgz => testing/talos/talos/unittests/profile.tgz rename : testing/talos/tests/ps-Acj.out => testing/talos/talos/unittests/ps-Acj.out rename : testing/talos/tests/test_talosconfig_browser_config.json => testing/talos/talos/unittests/test_talosconfig_browser_config.json rename : testing/talos/tests/test_talosconfig_test_config.json => testing/talos/talos/unittests/test_talosconfig_test_config.json rename : testing/talos/tests/xrestop_output.txt => testing/talos/talos/unittests/xrestop_output.txt extra : rebase_source : 4048d8ef8f7b9352968115fb7125182b5e4e2907
This commit is contained in:
Родитель
029ecbb951
Коммит
8b45fa1364
|
@ -42,6 +42,7 @@ mozilla.pth:testing/marionette/client
|
|||
mozilla.pth:testing/marionette/harness
|
||||
mozilla.pth:testing/marionette/harness/marionette_harness/runner/mixins/browsermob-proxy-py
|
||||
mozilla.pth:testing/marionette/puppeteer/firefox
|
||||
mozilla.pth:testing/talos
|
||||
packages.txt:testing/mozbase/packages.txt
|
||||
mozilla.pth:tools
|
||||
mozilla.pth:testing/web-platform
|
||||
|
|
|
@ -85,6 +85,7 @@ if CONFIG['MOZ_WIDGET_TOOLKIT'] or not CONFIG['MOZ_BUILD_APP']:
|
|||
'layout/tools/reftest/selftest/python.ini',
|
||||
'testing/marionette/harness/marionette_harness/tests/harness_unit/python.ini',
|
||||
'testing/mochitest/tests/python/python.ini',
|
||||
'testing/talos/talos/unittests/python.ini'
|
||||
]
|
||||
|
||||
CONFIGURE_SUBST_FILES += [
|
||||
|
|
|
@ -18,6 +18,8 @@ Each filter is a simple function, but it also have attached a special
|
|||
# data is filtered
|
||||
"""
|
||||
|
||||
_FILTERS = {}
|
||||
|
||||
|
||||
class Filter(object):
|
||||
def __init__(self, func, *args, **kwargs):
|
||||
|
@ -45,9 +47,54 @@ def define_filter(func):
|
|||
func.prepare = prepare
|
||||
return func
|
||||
|
||||
|
||||
def register_filter(func):
|
||||
"""
|
||||
all filters defined in this module
|
||||
should be registered
|
||||
"""
|
||||
global _FILTERS
|
||||
|
||||
_FILTERS[func.__name__] = func
|
||||
return func
|
||||
|
||||
|
||||
def filters(*args):
|
||||
global _FILTERS
|
||||
|
||||
filters_ = [_FILTERS[filter] for filter in args]
|
||||
return filters_
|
||||
|
||||
|
||||
def apply(data, filters):
|
||||
for filter in filters:
|
||||
data = filter(data)
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def parse(string_):
|
||||
|
||||
def to_number(string_number):
|
||||
try:
|
||||
return int(string_number)
|
||||
except ValueError:
|
||||
return float(string_number)
|
||||
|
||||
tokens = string_.split(":")
|
||||
|
||||
func = tokens[0]
|
||||
digits = []
|
||||
if len(tokens) > 1:
|
||||
digits.extend(tokens[1].split(","))
|
||||
digits = [to_number(digit) for digit in digits]
|
||||
|
||||
return [func, digits]
|
||||
|
||||
|
||||
# filters that return a scalar
|
||||
|
||||
|
||||
@register_filter
|
||||
@define_filter
|
||||
def mean(series):
|
||||
"""
|
||||
|
@ -56,6 +103,7 @@ def mean(series):
|
|||
return sum(series)/float(len(series))
|
||||
|
||||
|
||||
@register_filter
|
||||
@define_filter
|
||||
def median(series):
|
||||
"""
|
||||
|
@ -71,6 +119,7 @@ def median(series):
|
|||
return 0.5*(series[middle-1] + series[middle])
|
||||
|
||||
|
||||
@register_filter
|
||||
@define_filter
|
||||
def variance(series):
|
||||
"""
|
||||
|
@ -82,6 +131,7 @@ def variance(series):
|
|||
return variance
|
||||
|
||||
|
||||
@register_filter
|
||||
@define_filter
|
||||
def stddev(series):
|
||||
"""
|
||||
|
@ -90,6 +140,7 @@ def stddev(series):
|
|||
return variance(series)**0.5
|
||||
|
||||
|
||||
@register_filter
|
||||
@define_filter
|
||||
def dromaeo(series):
|
||||
"""
|
||||
|
@ -106,12 +157,14 @@ def dromaeo(series):
|
|||
return geometric_mean(means)
|
||||
|
||||
|
||||
@register_filter
|
||||
@define_filter
|
||||
def dromaeo_chunks(series, size):
|
||||
for i in range(0, len(series), size):
|
||||
yield series[i:i+size]
|
||||
|
||||
|
||||
@register_filter
|
||||
@define_filter
|
||||
def geometric_mean(series):
|
||||
"""
|
||||
|
@ -125,6 +178,7 @@ def geometric_mean(series):
|
|||
# filters that return a list
|
||||
|
||||
|
||||
@register_filter
|
||||
@define_filter
|
||||
def ignore_first(series, number=1):
|
||||
"""
|
||||
|
@ -136,6 +190,7 @@ def ignore_first(series, number=1):
|
|||
return series[number:]
|
||||
|
||||
|
||||
@register_filter
|
||||
@define_filter
|
||||
def ignore(series, function):
|
||||
"""
|
||||
|
@ -150,6 +205,7 @@ def ignore(series, function):
|
|||
return series
|
||||
|
||||
|
||||
@register_filter
|
||||
@define_filter
|
||||
def ignore_max(series):
|
||||
"""
|
||||
|
@ -158,6 +214,7 @@ def ignore_max(series):
|
|||
return ignore(series, max)
|
||||
|
||||
|
||||
@register_filter
|
||||
@define_filter
|
||||
def ignore_min(series):
|
||||
"""
|
||||
|
@ -166,6 +223,7 @@ def ignore_min(series):
|
|||
return ignore(series, min)
|
||||
|
||||
|
||||
@register_filter
|
||||
@define_filter
|
||||
def v8_subtest(series, name):
|
||||
"""
|
||||
|
@ -190,6 +248,7 @@ def v8_subtest(series, name):
|
|||
return reference[name] / geometric_mean(series)
|
||||
|
||||
|
||||
@register_filter
|
||||
@define_filter
|
||||
def responsiveness_Metric(val_list):
|
||||
return sum([float(x)*float(x) / 1000000.0 for x in val_list])
|
||||
|
|
|
@ -29,6 +29,8 @@ def test_dict():
|
|||
|
||||
class Test(object):
|
||||
"""abstract base class for a Talos test case"""
|
||||
__test__ = False # not pytest
|
||||
|
||||
cycles = None # number of cycles
|
||||
keys = []
|
||||
desktop = True
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
here = os.path.realpath(__file__)
|
||||
__TESTS_DIR = os.path.join(os.path.dirname(os.path.dirname(here)), 'tests')
|
||||
|
||||
|
||||
def remove_develop_files(starting_dir=__TESTS_DIR):
|
||||
for file_name in os.listdir(starting_dir):
|
||||
|
||||
file_path = os.path.join(starting_dir, file_name)
|
||||
|
||||
if file_name.endswith('.develop') and os.path.isfile(file_path):
|
||||
os.remove(file_path)
|
||||
elif os.path.isdir(file_path):
|
||||
remove_develop_files(file_path)
|
|
@ -0,0 +1,6 @@
|
|||
[DEFAULT]
|
||||
subsuite = talos
|
||||
|
||||
[test_config.py]
|
||||
[test_ffsetup.py]
|
||||
[test_test.py]
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,90 @@
|
|||
from __future__ import absolute_import
|
||||
|
||||
import os
|
||||
|
||||
import mock
|
||||
import mozunit
|
||||
|
||||
from talos.ffsetup import FFSetup
|
||||
|
||||
|
||||
class TestFFSetup(object):
|
||||
|
||||
def setup_method(self, method):
|
||||
self.ffsetup = FFSetup(
|
||||
{ # browser_config
|
||||
"env": {},
|
||||
"symbols_path": "",
|
||||
"preferences": {},
|
||||
"webserver": "",
|
||||
"extensions": []
|
||||
},
|
||||
{ #test_config
|
||||
"preferences": {},
|
||||
"extensions": [],
|
||||
"profile": None
|
||||
}
|
||||
)
|
||||
|
||||
# setup proxy logger
|
||||
|
||||
def test_clean(self):
|
||||
# tmp dir removed
|
||||
assert self.ffsetup._tmp_dir is not None
|
||||
assert os.path.exists(self.ffsetup._tmp_dir) is True
|
||||
|
||||
self.ffsetup.clean()
|
||||
|
||||
assert self.ffsetup._tmp_dir is not None
|
||||
assert os.path.exists(self.ffsetup._tmp_dir) is False
|
||||
|
||||
# gecko profile also cleaned
|
||||
gecko_profile = mock.Mock()
|
||||
self.ffsetup.gecko_profile = gecko_profile
|
||||
|
||||
self.ffsetup.clean()
|
||||
|
||||
assert gecko_profile.clean.called is True
|
||||
|
||||
# def test_as_context_manager(self):
|
||||
# self.ffsetup._init_env = mock.Mock()
|
||||
# self.ffsetup._init_profile = mock.Mock()
|
||||
# self.ffsetup._run_profile = mock.Mock()
|
||||
# self.ffsetup._init_gecko_profile = mock.Mock()
|
||||
#
|
||||
# with self.ffsetup as setup:
|
||||
# # env initiated
|
||||
# self.assertIsNotNone(setup.env)
|
||||
# # profile initiated
|
||||
# self.assertTrue(setup._init_profile.called)
|
||||
# # gecko profile initiated
|
||||
#
|
||||
# # except raised
|
||||
# pass
|
||||
#
|
||||
# def test_environment_init(self):
|
||||
# # self.env not empty
|
||||
# # browser_config env vars in self.env
|
||||
# # multiple calls return same self.env
|
||||
# pass
|
||||
#
|
||||
# def test_profile_init(self):
|
||||
# # addons get installed
|
||||
# # webextensions get installed
|
||||
# # preferences contain interpolated values
|
||||
# # profile path is added
|
||||
# pass
|
||||
#
|
||||
# def test_run_profile(self):
|
||||
# # exception raised
|
||||
# # browser process launched
|
||||
# pass
|
||||
#
|
||||
# def test_gecko_profile_init(self):
|
||||
# # complains on not provided upload_dir
|
||||
# # self.gecko_profile not None
|
||||
# pass
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
mozunit.main()
|
|
@ -0,0 +1,206 @@
|
|||
from __future__ import absolute_import
|
||||
|
||||
import mozunit
|
||||
import pytest
|
||||
|
||||
from talos.test import Test, TsBase, ts_paint
|
||||
from talos.test import register_test
|
||||
from talos.test import test_dict
|
||||
|
||||
|
||||
class BasicTestA(Test):
|
||||
pass
|
||||
|
||||
|
||||
class BasicTestB(Test):
|
||||
pass
|
||||
|
||||
|
||||
class BasicTestC(Test):
|
||||
"""basic description"""
|
||||
keys = [
|
||||
'nonnull_attrib',
|
||||
'null_attrib'
|
||||
]
|
||||
|
||||
nonnull_attrib = 'value'
|
||||
null_attrib = None
|
||||
|
||||
|
||||
class NotATest(object):
|
||||
pass
|
||||
|
||||
|
||||
class Test_register_test(object):
|
||||
|
||||
def test_same_instance_returned(self):
|
||||
decorator = register_test()
|
||||
NewBasicTest = decorator(BasicTestA)
|
||||
|
||||
assert BasicTestA is NewBasicTest
|
||||
|
||||
def test_class_registered(self):
|
||||
_TESTS = test_dict()
|
||||
decorator = register_test()
|
||||
|
||||
# class registered
|
||||
_ = decorator(BasicTestB)
|
||||
assert 'BasicTestB' in _TESTS
|
||||
assert BasicTestB in _TESTS.values()
|
||||
|
||||
# cannot register same class
|
||||
with pytest.raises(AssertionError):
|
||||
_ = decorator(BasicTestB)
|
||||
|
||||
# # cannot register other class type
|
||||
with pytest.raises(AssertionError):
|
||||
_ = decorator(NotATest)
|
||||
|
||||
|
||||
class TestTest(object):
|
||||
|
||||
def test_same_class_name(self):
|
||||
assert BasicTestA.name() == 'BasicTestA'
|
||||
|
||||
def test_class_doc(self):
|
||||
assert BasicTestA.description() is not None
|
||||
assert BasicTestC.description() == 'basic description'
|
||||
|
||||
def test_init(self):
|
||||
basic_test = BasicTestA(new_attrib_a='value_a', new_attrib_b='value_b')
|
||||
assert basic_test.new_attrib_a == 'value_a'
|
||||
assert basic_test.new_attrib_b == 'value_b'
|
||||
|
||||
def test_update(self):
|
||||
basic_test = BasicTestA()
|
||||
basic_test.update(new_attrib_a='value_a', new_attrib_b='value_b')
|
||||
|
||||
assert basic_test.new_attrib_a == 'value_a'
|
||||
assert basic_test.new_attrib_b == 'value_b'
|
||||
|
||||
basic_test.update(new_attrib_c='value_c')
|
||||
assert basic_test.new_attrib_c == 'value_c'
|
||||
|
||||
def test_items(self):
|
||||
basic_test = BasicTestC()
|
||||
|
||||
# returns iterable
|
||||
try:
|
||||
items = iter(basic_test.items())
|
||||
except TypeError:
|
||||
pytest.fail('Test.items() did not return iterator')
|
||||
|
||||
try:
|
||||
key, val = basic_test.items()[0]
|
||||
except ValueError:
|
||||
pytest.fail('Test.items() did not contain tuples')
|
||||
|
||||
# returns not nones
|
||||
assert ('nonnull_attrib', 'value') in basic_test.items()
|
||||
assert ('null_attrib', None) not in basic_test.items()
|
||||
|
||||
# not overriden Test instance
|
||||
test_instance = Test()
|
||||
assert test_instance.items() == [('name', 'Test')]
|
||||
|
||||
# overriden Test instance
|
||||
test_instance = Test(unregistered_attr='value')
|
||||
assert ('unregistered_attr', 'value') not in test_instance.items()
|
||||
|
||||
test_instance = Test()
|
||||
test_instance.update(keys=['cycles', 'desktop', 'lower_is_better'])
|
||||
assert dict(test_instance.items()) == {'name': 'Test', 'desktop': True, 'lower_is_better': True}
|
||||
|
||||
test_instance = Test()
|
||||
test_instance.update(new_attrib='some')
|
||||
assert ('new_attrib', 'some') not in test_instance.items()
|
||||
|
||||
test_instance = Test()
|
||||
test_instance.update(keys=['new_attrib'], new_attrib='value')
|
||||
assert dict(test_instance.items()) == {'name': 'Test', 'new_attrib': 'value'}
|
||||
|
||||
test_instance = Test(cycles=20, desktop=False)
|
||||
assert test_instance.cycles == 20
|
||||
assert test_instance.desktop == False
|
||||
|
||||
test_instance = Test()
|
||||
test_instance.update(cycles=20, desktop=False)
|
||||
assert test_instance.cycles == 20
|
||||
assert test_instance.desktop == False
|
||||
|
||||
|
||||
class TestTsBase(object):
|
||||
ts_base_registered_keys = {
|
||||
'url',
|
||||
'url_timestamp',
|
||||
'timeout',
|
||||
'cycles',
|
||||
'shutdown',
|
||||
'profile_path',
|
||||
'gecko_profile',
|
||||
'gecko_profile_interval',
|
||||
'gecko_profile_entries',
|
||||
'gecko_profile_startup',
|
||||
'preferences',
|
||||
'xperf_counters',
|
||||
'xperf_providers',
|
||||
'xperf_user_providers',
|
||||
'xperf_stackwalk',
|
||||
'tpmozafterpaint',
|
||||
'fnbpaint',
|
||||
'profile',
|
||||
'firstpaint',
|
||||
'userready',
|
||||
'testeventmap',
|
||||
'base_vs_ref',
|
||||
'extensions',
|
||||
'filters',
|
||||
'setup',
|
||||
'cleanup',
|
||||
'webextensions',
|
||||
'reinstall',
|
||||
}
|
||||
|
||||
def setup_method(self):
|
||||
self.test_instance = TsBase()
|
||||
|
||||
def test_no_unknown_keys_are_somehow_added_alongside_registered_ones(self):
|
||||
assert set(self.test_instance.keys) == self.ts_base_registered_keys
|
||||
|
||||
self.test_instance.update(attribute_one='value', attribute_two='value')
|
||||
assert set(self.test_instance.keys) == self.ts_base_registered_keys
|
||||
|
||||
def test_nonnull_keys_show_up(self):
|
||||
assert dict(self.test_instance.items()) == {
|
||||
'name': 'TsBase',
|
||||
'filters': self.test_instance.filters
|
||||
}
|
||||
|
||||
self.test_instance.update(timeout=500)
|
||||
assert dict(self.test_instance.items()) == {
|
||||
'name': 'TsBase',
|
||||
'filters': self.test_instance.filters,
|
||||
'timeout': 500
|
||||
}
|
||||
|
||||
|
||||
class Test_ts_paint(object):
|
||||
def test_test_nonnull_keys_show_up(self):
|
||||
test_instance = ts_paint()
|
||||
keys = {key for key, _ in test_instance.items()}
|
||||
assert keys == {
|
||||
'name',
|
||||
'cycles',
|
||||
'timeout',
|
||||
'gecko_profile_startup',
|
||||
'gecko_profile_entries',
|
||||
'url',
|
||||
'shutdown',
|
||||
'xperf_counters',
|
||||
'filters',
|
||||
'tpmozafterpaint'
|
||||
}
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
mozunit.main()
|
|
@ -1,192 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
"""
|
||||
test talos browser output parsing
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
import os
|
||||
import unittest
|
||||
|
||||
from talos.results import BrowserLogResults
|
||||
from talos.results import PageloaderResults
|
||||
from talos.utils import TalosError
|
||||
|
||||
here = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
|
||||
class TestBrowserOutput(unittest.TestCase):
|
||||
|
||||
def test_ts_format(self):
|
||||
|
||||
# output file
|
||||
browser_ts = os.path.join(here, 'browser_output.ts.txt')
|
||||
|
||||
# parse the results
|
||||
browser_log = BrowserLogResults(browser_ts)
|
||||
|
||||
# ensure the results meet what we expect
|
||||
self.assertEqual(browser_log.format, 'tsformat')
|
||||
self.assertEqual(browser_log.browser_results.strip(), '392')
|
||||
self.assertEqual(browser_log.startTime, 1333663595953)
|
||||
self.assertEqual(browser_log.endTime, 1333663596551)
|
||||
|
||||
def test_tsvg_format(self):
|
||||
|
||||
# output file
|
||||
browser_tsvg = os.path.join(here, 'browser_output.tsvg.txt')
|
||||
|
||||
# parse the results
|
||||
browser_log = BrowserLogResults(browser_tsvg)
|
||||
|
||||
# ensure the results meet what we expect
|
||||
self.assertEqual(browser_log.format, 'tpformat')
|
||||
self.assertEqual(browser_log.startTime, 1333666702130)
|
||||
self.assertEqual(browser_log.endTime, 1333666702743)
|
||||
|
||||
# we won't validate the exact string because it is long
|
||||
raw_report = browser_log.browser_results.strip()
|
||||
raw_report.startswith('_x_x_mozilla_page_load')
|
||||
raw_report.endswith('|11;hixie-007.xml;1629;1651;1648;1652;1649')
|
||||
|
||||
# but we will ensure that it is parseable
|
||||
pageloader_results = PageloaderResults(raw_report)
|
||||
self.assertEqual(len(pageloader_results.results), 12)
|
||||
indices = [i['index'] for i in pageloader_results.results]
|
||||
self.assertEqual(indices, range(12))
|
||||
|
||||
# test hixie-001.xml just as a spot-check
|
||||
hixie_001 = pageloader_results.results[5]
|
||||
expected_values = [45643, 14976, 17807, 14971, 17235]
|
||||
self.assertEqual(hixie_001['runs'], expected_values)
|
||||
self.assertEqual(hixie_001['page'], 'hixie-001.xml')
|
||||
|
||||
def test_garbage(self):
|
||||
"""
|
||||
send in garbage input and ensure the output is the
|
||||
inability to find the report
|
||||
"""
|
||||
|
||||
garbage = "hjksdfhkhasdfjkhsdfkhdfjklasd"
|
||||
self.compare_error_message(garbage, "Could not find report")
|
||||
|
||||
def test_missing_end_report(self):
|
||||
"""what if you're not done with a report?"""
|
||||
garbage = "hjksdfhkhasdfjkhsdfkhdfjklasd"
|
||||
|
||||
input = self.start_report()
|
||||
input += garbage
|
||||
self.compare_error_message(input, "Could not find end token: '__end_report'")
|
||||
|
||||
def test_double_end_report(self):
|
||||
"""double end report tokens"""
|
||||
|
||||
garbage = "hjksdfhkhasdfjkhsdfkhdfjklasd"
|
||||
input = self.start_report() + garbage + self.end_report() + self.end_report()
|
||||
self.compare_error_message(input, "Unmatched number of tokens")
|
||||
|
||||
def test_end_report_before_start_report(self):
|
||||
"""the end report token occurs before the start report token"""
|
||||
|
||||
garbage = "hjksdfhkhasdfjkhsdfkhdfjklasd"
|
||||
input = self.end_report() + garbage + self.start_report()
|
||||
self.compare_error_message(input,
|
||||
"End token '%s' occurs before start token" %
|
||||
self.end_report())
|
||||
|
||||
def test_missing_timestamps(self):
|
||||
"""what if the timestamps are missing?"""
|
||||
|
||||
# make a bogus report but missing the timestamps
|
||||
garbage = "hjksdfhkhasdfjkhsdfkhdfjklasd"
|
||||
input = self.start_report() + garbage + self.end_report()
|
||||
|
||||
# it will fail
|
||||
self.compare_error_message(input, "Could not find startTime in browser output")
|
||||
|
||||
def test_wrong_order(self):
|
||||
"""what happens if you mix up the token order?"""
|
||||
|
||||
# i've secretly put the AfterTerminationTimestamp before
|
||||
# the BeforeLaunchTimestamp
|
||||
# Let's see if the parser notices
|
||||
bad_report = """__start_report392__end_report
|
||||
|
||||
Failed to load native module at path '/home/jhammel/firefox/components/libmozgnome.so':
|
||||
(80004005) libnotify.so.1: cannot open shared object file: No such file or directory
|
||||
Could not read chrome manifest
|
||||
'file:///home/jhammel/firefox/extensions/%7B972ce4c6-7e08-4474-a285-3208198ce6fd%7D/chrome.manifest'.
|
||||
[JavaScript Warning: "Use of enablePrivilege is deprecated.
|
||||
Please use code that runs with the system principal (e.g. an extension) instead.
|
||||
" {file: "http://localhost:15707/startup_test/startup_test.html?begin=1333663595557" line: 0}]
|
||||
__startTimestamp1333663595953__endTimestamp
|
||||
__startAfterTerminationTimestamp1333663596551__endAfterTerminationTimestamp
|
||||
__startBeforeLaunchTimestamp1333663595557__endBeforeLaunchTimestamp
|
||||
"""
|
||||
|
||||
self.compare_error_message(bad_report, "] found before " +
|
||||
"('__startBeforeLaunchTimestamp', " +
|
||||
"'__endBeforeLaunchTimestamp') " +
|
||||
"[character position:")
|
||||
|
||||
def test_multiple_reports(self):
|
||||
"""you're only allowed to have one report in a file"""
|
||||
|
||||
# this one works fine
|
||||
good_report = """__start_report392__end_report
|
||||
|
||||
Failed to load native module at path '/home/jhammel/firefox/components/libmozgnome.so':
|
||||
(80004005) libnotify.so.1: cannot open shared object file: No such file or directory
|
||||
Could not read chrome manifest
|
||||
'file:///home/jhammel/firefox/extensions/%7B972ce4c6-7e08-4474-a285-3208198ce6fd%7D/chrome.manifest'.
|
||||
[JavaScript Warning: "Use of enablePrivilege is deprecated.
|
||||
Please use code that runs with the system principal (e.g. an extension) instead.
|
||||
" {file: "http://localhost:15707/startup_test/startup_test.html?begin=1333663595557" line: 0}]
|
||||
__startTimestamp1333663595953__endTimestamp
|
||||
__startBeforeLaunchTimestamp1333663595557__endBeforeLaunchTimestamp
|
||||
__startAfterTerminationTimestamp1333663596551__endAfterTerminationTimestamp
|
||||
"""
|
||||
|
||||
# but there's no hope for this one
|
||||
bad_report = good_report + good_report # interesting math
|
||||
|
||||
self.compare_error_message(bad_report, "Multiple matches for %s,%s" %
|
||||
(self.start_report(), self.end_report()))
|
||||
|
||||
def start_report(self):
|
||||
"""return a start report token"""
|
||||
return BrowserLogResults.report_tokens[0][1][0] # start token
|
||||
|
||||
def end_report(self):
|
||||
"""return a start report token"""
|
||||
return BrowserLogResults.report_tokens[0][1][-1] # end token
|
||||
|
||||
def compare_error_message(self, browser_log, substr):
|
||||
"""
|
||||
ensures that exceptions give correct error messages
|
||||
- browser_log : a browser log file
|
||||
- substr : substring of the error message
|
||||
"""
|
||||
|
||||
try:
|
||||
BrowserLogResults(results_raw=browser_log)
|
||||
except TalosError as e:
|
||||
if substr not in str(e):
|
||||
import pdb
|
||||
pdb.set_trace()
|
||||
self.assertTrue(substr in str(e))
|
||||
|
||||
|
||||
class TestTalosError(unittest.TestCase):
|
||||
"""
|
||||
test TalosError class
|
||||
"""
|
||||
def test_browser_log_results(self):
|
||||
# an example that should fail
|
||||
# passing invalid value for argument result_raw
|
||||
with self.assertRaises(TalosError):
|
||||
BrowserLogResults(results_raw="__FAIL<bad test>__FAIL")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
|
@ -1,81 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
"""
|
||||
test talos' filter module:
|
||||
|
||||
http://hg.mozilla.org/build/talos/file/tip/talos/filter.py
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
import unittest
|
||||
|
||||
import talos.filter
|
||||
|
||||
|
||||
class TestFilter(unittest.TestCase):
|
||||
|
||||
data = range(30) # test data
|
||||
|
||||
def test_ignore(self):
|
||||
"""test the ignore filter"""
|
||||
# a bit of a stub sanity test for a single filter
|
||||
|
||||
filtered = talos.filter.ignore_first(self.data)
|
||||
self.assertEquals(filtered, self.data[1:])
|
||||
|
||||
filtered = talos.filter.ignore_first(self.data, 10)
|
||||
self.assertEquals(filtered, self.data[10:])
|
||||
|
||||
# short series won't be filtered
|
||||
filtered = talos.filter.ignore_first(self.data, 50)
|
||||
self.assertEquals(filtered, self.data)
|
||||
|
||||
def test_getting_filters(self):
|
||||
"""test getting a list of filters from a string"""
|
||||
|
||||
filter_names = ['ignore_max', 'ignore_max', 'max']
|
||||
|
||||
# get the filter functions
|
||||
filters = talos.filter.filters(*filter_names)
|
||||
self.assertEquals(len(filter_names), len(filters))
|
||||
for filter in filters:
|
||||
self.assertTrue(self, hasattr(filter, '__call__'))
|
||||
|
||||
# apply them on the data
|
||||
filtered = talos.filter.apply(self.data, filters)
|
||||
self.assertEquals(filtered, 27)
|
||||
|
||||
def test_parse(self):
|
||||
"""test the filter name parse function"""
|
||||
|
||||
# an example with no arguments
|
||||
parsed = talos.filter.parse('mean')
|
||||
self.assertEquals(parsed, ['mean', []])
|
||||
|
||||
# an example with one integer argument
|
||||
parsed = talos.filter.parse('ignore_first:10')
|
||||
self.assertEquals(parsed, ['ignore_first', [10]])
|
||||
self.assertEquals(type(parsed[1][0]), int)
|
||||
self.assertNotEqual(type(parsed[1][0]), float)
|
||||
|
||||
# an example with several arguments
|
||||
|
||||
# temporarily add foo
|
||||
# value is lambda function to mimic filter_dict key:value pair
|
||||
talos.filter.scalar_filters['foo'] = lambda *args: args
|
||||
parsed = talos.filter.parse('foo:10.1,2,5.0,6.')
|
||||
self.assertEquals(parsed, ['foo', [10.1, 2, 5.0, 6.0]])
|
||||
for index in (2, 3):
|
||||
self.assertEquals(type(parsed[1][index]), float)
|
||||
self.assertNotEqual(type(parsed[1][index]), int)
|
||||
|
||||
# an example that should fail
|
||||
self.assertRaises(ValueError, talos.filter.parse, 'foo:bar')
|
||||
self.assertRaises(ValueError, talos.filter.parse, 'foo:1,')
|
||||
|
||||
# delete foo again
|
||||
del talos.filter.scalar_filters['foo']
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
|
@ -1,163 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
"""
|
||||
test talos' heavy module:
|
||||
|
||||
http://hg.mozilla.org/build/talos/file/tip/talos/heavy.py
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
import unittest
|
||||
import tempfile
|
||||
import shutil
|
||||
import datetime
|
||||
import contextlib
|
||||
import os
|
||||
import time
|
||||
|
||||
import talos.heavy
|
||||
|
||||
|
||||
archive = os.path.join(os.path.dirname(__file__), 'profile.tgz')
|
||||
archive_size = os.stat(archive).st_size
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def mock_requests(**kw):
|
||||
class Session:
|
||||
def mount(self, *args, **kw):
|
||||
pass
|
||||
|
||||
kw['Session'] = Session
|
||||
old = {}
|
||||
for meth, func in kw.items():
|
||||
curr = getattr(talos.heavy.requests, meth)
|
||||
old[meth] = curr
|
||||
setattr(talos.heavy.requests, meth, func)
|
||||
setattr(Session, meth, func)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
for meth, func in old.items():
|
||||
setattr(talos.heavy.requests, meth, func)
|
||||
|
||||
|
||||
class _Response(object):
|
||||
def __init__(self, code, headers=None, file=None):
|
||||
if headers is None:
|
||||
headers = {}
|
||||
self.headers = headers
|
||||
self.status_code = code
|
||||
self.file = file
|
||||
|
||||
def raise_for_status(self):
|
||||
pass
|
||||
|
||||
def iter_content(self, chunk_size):
|
||||
with open(self.file, 'rb') as f:
|
||||
yield f.read(chunk_size)
|
||||
|
||||
|
||||
class Logger:
|
||||
def __init__(self):
|
||||
self.data = []
|
||||
|
||||
def info(self, msg):
|
||||
self.data.append(msg)
|
||||
|
||||
|
||||
class TestFilter(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.temp = tempfile.mkdtemp()
|
||||
self.logs = talos.heavy.LOG.logger = Logger()
|
||||
|
||||
def tearDown(self):
|
||||
shutil.rmtree(self.temp)
|
||||
|
||||
def test_profile_age(self):
|
||||
"""test profile_age function"""
|
||||
days = talos.heavy.profile_age(self.temp)
|
||||
self.assertEqual(days, 0)
|
||||
|
||||
_8_days = datetime.datetime.now() + datetime.timedelta(days=8)
|
||||
days = talos.heavy.profile_age(self.temp, _8_days)
|
||||
self.assertEqual(days, 8)
|
||||
|
||||
def test_directory_age(self):
|
||||
"""make sure it detects changes in files in subdirs"""
|
||||
with open(os.path.join(self.temp, 'file'), 'w') as f:
|
||||
f.write('xxx')
|
||||
|
||||
current_age = talos.heavy._recursive_mtime(self.temp)
|
||||
time.sleep(1.1)
|
||||
|
||||
with open(os.path.join(self.temp, 'file'), 'w') as f:
|
||||
f.write('----')
|
||||
|
||||
self.assertTrue(current_age < talos.heavy._recursive_mtime(self.temp))
|
||||
|
||||
def test_follow_redirect(self):
|
||||
"""test follow_redirect function"""
|
||||
_8_days = datetime.datetime.now() + datetime.timedelta(days=8)
|
||||
_8_days = _8_days.strftime('%a, %d %b %Y %H:%M:%S UTC')
|
||||
|
||||
resps = [_Response(303, {'Location': 'blah'}),
|
||||
_Response(303, {'Location': 'bli'}),
|
||||
_Response(200, {'Last-Modified': _8_days})]
|
||||
|
||||
class Counter:
|
||||
c = 0
|
||||
|
||||
def _head(url, curr=Counter()):
|
||||
curr.c += 1
|
||||
return resps[curr.c]
|
||||
|
||||
with mock_requests(head=_head):
|
||||
loc, lm = talos.heavy.follow_redirects('https://example.com')
|
||||
days = talos.heavy.profile_age(self.temp, lm)
|
||||
self.assertEqual(days, 8)
|
||||
|
||||
def _test_download(self, age):
|
||||
|
||||
def _days(num):
|
||||
d = datetime.datetime.now() + datetime.timedelta(days=num)
|
||||
return d.strftime('%a, %d %b %Y %H:%M:%S UTC')
|
||||
|
||||
resps = [_Response(303, {'Location': 'blah'}),
|
||||
_Response(303, {'Location': 'bli'}),
|
||||
_Response(200, {'Last-Modified': _days(age)})]
|
||||
|
||||
class Counter:
|
||||
c = 0
|
||||
|
||||
def _head(url, curr=Counter()):
|
||||
curr.c += 1
|
||||
return resps[curr.c]
|
||||
|
||||
def _get(url, *args, **kw):
|
||||
return _Response(200, {'Last-Modified': _days(age),
|
||||
'content-length': str(archive_size)},
|
||||
file=archive)
|
||||
|
||||
with mock_requests(head=_head, get=_get):
|
||||
target = talos.heavy.download_profile('simple',
|
||||
profiles_dir=self.temp)
|
||||
profile = os.path.join(self.temp, 'simple')
|
||||
self.assertTrue(os.path.exists(profile))
|
||||
return target
|
||||
|
||||
def test_download_profile(self):
|
||||
"""test downloading heavy profile"""
|
||||
# a 12 days old profile gets updated
|
||||
self._test_download(12)
|
||||
|
||||
# a 8 days two
|
||||
self._test_download(8)
|
||||
|
||||
# a 2 days sticks
|
||||
self._test_download(2)
|
||||
self.assertTrue("fresh enough" in self.logs.data[-2])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
|
@ -1,82 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
"""
|
||||
test talos results parsing
|
||||
|
||||
http://hg.mozilla.org/build/talos/file/tip/talos/results.py
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
import unittest
|
||||
|
||||
import talos.filter
|
||||
import talos.results
|
||||
|
||||
# example page load test results string
|
||||
results_string = """_x_x_mozilla_page_load
|
||||
_x_x_mozilla_page_load_details
|
||||
|i|pagename|runs|
|
||||
|0;gearflowers.svg;74;65;68;66;62
|
||||
|1;composite-scale.svg;43;44;35;41;41
|
||||
|2;composite-scale-opacity.svg;19;16;19;19;21
|
||||
|3;composite-scale-rotate.svg;18;19;20;20;19
|
||||
|4;composite-scale-rotate-opacity.svg;18;17;17;17;19
|
||||
|5;hixie-001.xml;71836;15057;15063;57436;15061
|
||||
|6;hixie-002.xml;53940;15057;38246;55323;31818
|
||||
|7;hixie-003.xml;5027;5026;13540;31503;5031
|
||||
|8;hixie-004.xml;5050;5054;5053;5054;5055
|
||||
|9;hixie-005.xml;4568;4569;4562;4545;4567
|
||||
|10;hixie-006.xml;5090;5165;5054;5015;5077
|
||||
|11;hixie-007.xml;1628;1623;1623;1617;1622
|
||||
"""
|
||||
|
||||
|
||||
class TestPageloaderResults(unittest.TestCase):
|
||||
|
||||
def test_parsing(self):
|
||||
"""test our ability to parse results data"""
|
||||
results = talos.results.PageloaderResults(results_string)
|
||||
|
||||
# ensure you got all of them
|
||||
self.assertEqual(len(results.results), 12)
|
||||
|
||||
# test the indices
|
||||
indices = [i['index'] for i in results.results]
|
||||
self.assertEqual(indices, range(12))
|
||||
|
||||
# test some pages
|
||||
pages = [i['page'] for i in results.results]
|
||||
comparison = ['hixie-00%d.xml' % i for i in range(1, 8)]
|
||||
self.assertEqual(pages[-len(comparison):], comparison)
|
||||
|
||||
# test a few values
|
||||
last = [1628., 1623., 1623, 1617., 1622.]
|
||||
self.assertEqual(results.results[-1]['runs'], last)
|
||||
first = [74., 65., 68., 66., 62.]
|
||||
self.assertEqual(results.results[0]['runs'], first)
|
||||
|
||||
def test_filter(self):
|
||||
"""test PageloaderResults.filter function"""
|
||||
|
||||
# parse the data
|
||||
results = talos.results.PageloaderResults(results_string)
|
||||
|
||||
# apply some filters
|
||||
filters = [[talos.filter.ignore_first, [2]], [talos.filter.median]]
|
||||
filtered = results.filter(*filters)
|
||||
self.assertEqual(filtered[0][0], 66.)
|
||||
self.assertEqual(filtered[-1][0], 1622.)
|
||||
|
||||
# apply some different filters
|
||||
filters = [[talos.filter.ignore_max, []], [max, []]]
|
||||
filtered = results.filter(*filters)
|
||||
self.assertEqual(filtered[0][0], 68.)
|
||||
self.assertEqual(filtered[-1][0], 1623.)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
|
@ -1,158 +0,0 @@
|
|||
from __future__ import absolute_import, print_function
|
||||
|
||||
import json
|
||||
import unittest
|
||||
|
||||
from talos import talosconfig
|
||||
from talos.configuration import YAML
|
||||
|
||||
# globals
|
||||
ffox_path = 'test/path/to/firefox'
|
||||
command_args = [ffox_path,
|
||||
'-profile',
|
||||
'pathtoprofile',
|
||||
'-tp',
|
||||
'pathtotpmanifest',
|
||||
'-tpchrome',
|
||||
'-tpmozafterpaint',
|
||||
'-tpnoisy',
|
||||
'-rss',
|
||||
'-tpcycles',
|
||||
'1',
|
||||
'-tppagecycles',
|
||||
'1']
|
||||
with open("test_talosconfig_browser_config.json") as json_browser_config:
|
||||
browser_config = json.load(json_browser_config)
|
||||
with open("test_talosconfig_test_config.json") as json_test_config:
|
||||
test_config = json.load(json_test_config)
|
||||
|
||||
|
||||
class TestWriteConfig(unittest.TestCase):
|
||||
def test_writeConfigFile(self):
|
||||
obj = dict(some=123, thing='456', other=789)
|
||||
|
||||
self.assertEquals(
|
||||
json.loads(talosconfig.writeConfigFile(obj, ('some', 'thing'))),
|
||||
dict(some=123, thing='456')
|
||||
)
|
||||
|
||||
# test without keys
|
||||
self.assertEquals(
|
||||
json.loads(talosconfig.writeConfigFile(obj, None)),
|
||||
obj
|
||||
)
|
||||
|
||||
|
||||
class TalosConfigUnitTest(unittest.TestCase):
|
||||
"""
|
||||
A class inheriting from unittest.TestCase to test the generateTalosConfig function.
|
||||
"""
|
||||
|
||||
def validate(self, var1, var2):
|
||||
# Function to check whether the output generated is correct or not.
|
||||
# If the output generated is not correct then specify the expected output to be generated.
|
||||
if var1 == var2:
|
||||
return 1
|
||||
else:
|
||||
print("input '%s' != expected '%s'" % (var1, var2))
|
||||
|
||||
def test_talosconfig(self):
|
||||
# This function stimulates a call to generateTalosConfig in talosconfig.py .
|
||||
# It is then tested whether the output generated is correct or not.
|
||||
# ensure that the output generated in yaml file is as expected or not.
|
||||
yaml = YAML()
|
||||
content = yaml.read(browser_config['bcontroller_config'])
|
||||
self.validate(content['command'],
|
||||
"test/path/to/firefox " +
|
||||
"-profile " +
|
||||
"pathtoprofile " +
|
||||
"-tp " +
|
||||
"pathtotpmanifest " +
|
||||
"-tpchrome " +
|
||||
"-tpmozafterpaint " +
|
||||
"-tpnoisy " +
|
||||
"-rss " +
|
||||
"-tpcycles " +
|
||||
"1 " +
|
||||
"-tppagecycles " +
|
||||
"1")
|
||||
self.validate(content['child_process'], "plugin-container")
|
||||
self.validate(content['process'], "firefox.exe")
|
||||
self.validate(content['browser_wait'], 5)
|
||||
self.validate(content['test_timeout'], 1200)
|
||||
self.validate(content['browser_log'], "browser_output.txt")
|
||||
self.validate(content['browser_path'], "test/path/to/firefox")
|
||||
self.validate(content['error_filename'], "pathtoerrorfile")
|
||||
self.validate(content['xperf_path'],
|
||||
"C:/Program Files/Microsoft Windows Performance Toolkit/xperf.exe")
|
||||
self.validate(content['buildid'], 20131205075310)
|
||||
self.validate(content['sourcestamp'], "39faf812aaec")
|
||||
self.validate(content['repository'], "http://hg.mozilla.org/releases/mozilla-release")
|
||||
self.validate(content['title'], "qm-pxp01")
|
||||
self.validate(content['testname'], "tp5n")
|
||||
self.validate(content['xperf_providers'], ['PROC_THREAD',
|
||||
'LOADER',
|
||||
'HARD_FAULTS',
|
||||
'FILENAME',
|
||||
'FILE_IO',
|
||||
'FILE_IO_INIT'])
|
||||
self.validate(content['xperf_user_providers'],
|
||||
['Mozilla Generic Provider', 'Microsoft-Windows-TCPIP'])
|
||||
self.validate(content['xperf_stackwalk'],
|
||||
['FileCreate', 'FileRead', 'FileWrite', 'FileFlush', 'FileClose'])
|
||||
self.validate(content['processID'], "None")
|
||||
self.validate(content['approot'], "test/path/to")
|
||||
|
||||
def test_errors(self):
|
||||
# Tests if errors are correctly raised.
|
||||
|
||||
# Testing that error is correctly raised or not if xperf_path is missing
|
||||
browser_config_copy = browser_config.copy()
|
||||
test_config_copy = test_config.copy()
|
||||
del browser_config_copy['xperf_path']
|
||||
talosconfig.generateTalosConfig(command_args, browser_config_copy, test_config_copy)
|
||||
yaml = YAML()
|
||||
content = yaml.read(browser_config['bcontroller_config'])
|
||||
|
||||
with self.assertRaises(KeyError):
|
||||
self.validate(content['xperf_path'],
|
||||
"C:/Program Files/Microsoft Windows Performance Toolkit/xperf.exe")
|
||||
|
||||
# Test to see if keyerror is raised or not for calling testname when xperf_path is missing
|
||||
with self.assertRaises(KeyError):
|
||||
self.validate(content['testname'], "tp5n")
|
||||
|
||||
# Testing that error is correctly raised or not if xperf_providers is missing
|
||||
browser_config_copy = browser_config.copy()
|
||||
test_config_copy = test_config.copy()
|
||||
del test_config_copy['xperf_providers']
|
||||
talosconfig.generateTalosConfig(command_args, browser_config_copy, test_config_copy)
|
||||
yaml = YAML()
|
||||
content = yaml.read(browser_config['bcontroller_config'])
|
||||
|
||||
# Checking keyerror when calling xperf_providers
|
||||
with self.assertRaises(KeyError):
|
||||
self.validate(content['xperf_providers'], ['PROC_THREAD', 'LOADER', 'HARD_FAULTS',
|
||||
'FILENAME', 'FILE_IO', 'FILE_IO_INIT'])
|
||||
|
||||
# Checking keyerror when calling xperf_user_providers when xperf_providers is missing
|
||||
with self.assertRaises(KeyError):
|
||||
self.validate(content['xperf_user_providers'],
|
||||
['Mozilla Generic Provider', 'Microsoft-Windows-TCPIP'])
|
||||
|
||||
# Checking keyerror when calling xperf_stackwalk when xperf_providers is missing
|
||||
with self.assertRaises(KeyError):
|
||||
self.validate(content['xperf_stackwalk'],
|
||||
['FileCreate', 'FileRead', 'FileWrite', 'FileFlush', 'FileClose'])
|
||||
|
||||
# Checking keyerror when calling processID when xperf_providers is missing
|
||||
with self.assertRaises(KeyError):
|
||||
self.validate(content['processID'], "None")
|
||||
|
||||
# Checking keyerror when calling approot when xperf_providers is missing
|
||||
with self.assertRaises(KeyError):
|
||||
self.validate(content['approot'], "test/path/to")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
|
@ -1,56 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
"""
|
||||
test URL parsing; see
|
||||
https://bugzilla.mozilla.org/show_bug.cgi?id=793875
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
import unittest
|
||||
|
||||
import talos.utils
|
||||
|
||||
|
||||
class TestURLParsing(unittest.TestCase):
|
||||
|
||||
def test_http_url(self):
|
||||
"""test parsing an HTTP URL"""
|
||||
|
||||
url = 'https://www.mozilla.org/en-US/about/'
|
||||
parsed = talos.utils.urlsplit(url)
|
||||
self.assertEqual(parsed,
|
||||
['https', 'www.mozilla.org', '/en-US/about/', '', ''])
|
||||
|
||||
def test_file_url(self):
|
||||
"""test parsing file:// URLs"""
|
||||
|
||||
# unix-like file path
|
||||
url = 'file:///foo/bar'
|
||||
parsed = talos.utils.urlsplit(url)
|
||||
self.assertEqual(parsed,
|
||||
['file', '', '/foo/bar', '', ''])
|
||||
|
||||
# windows-like file path
|
||||
url = r'file://c:\foo\bar'
|
||||
parsed = talos.utils.urlsplit(url)
|
||||
self.assertEqual(parsed,
|
||||
['file', '', r'c:\foo\bar', '', ''])
|
||||
|
||||
def test_implicit_file_url(self):
|
||||
"""
|
||||
test parsing URLs with no scheme, which by default are assumed
|
||||
to be file:// URLs
|
||||
"""
|
||||
|
||||
path = '/foo/bar'
|
||||
parsed = talos.utils.urlsplit(path)
|
||||
self.assertEqual(parsed,
|
||||
['file', '', '/foo/bar', '', ''])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
|
@ -1,43 +0,0 @@
|
|||
from __future__ import absolute_import
|
||||
|
||||
import os
|
||||
import unittest
|
||||
|
||||
from talos import utils
|
||||
|
||||
|
||||
class TestTimer(unittest.TestCase):
|
||||
def test_timer(self):
|
||||
timer = utils.Timer()
|
||||
timer._start_time -= 3 # remove three seconds for the test
|
||||
self.assertEquals(timer.elapsed(), '00:00:03')
|
||||
|
||||
|
||||
class TestRestoreEnv(unittest.TestCase):
|
||||
def test_basic(self):
|
||||
env_var = 'THIS_IS_A_ENV_VAR_NOT_USED'
|
||||
self.assertNotIn(env_var, os.environ)
|
||||
with utils.restore_environment_vars():
|
||||
os.environ[env_var] = '1'
|
||||
self.assertNotIn(env_var, os.environ)
|
||||
|
||||
|
||||
class TestInterpolate(unittest.TestCase):
|
||||
def test_interpolate_talos_is_always_defines(self):
|
||||
self.assertEquals(utils.interpolate('${talos}'), utils.here)
|
||||
|
||||
def test_interpolate_custom_placeholders(self):
|
||||
self.assertEquals(utils.interpolate('${talos} ${foo} abc', foo='bar', unused=1),
|
||||
utils.here + ' bar abc')
|
||||
|
||||
|
||||
class TestParsePref(unittest.TestCase):
|
||||
def test_parse_string(self):
|
||||
self.assertEquals(utils.parse_pref('abc'), 'abc')
|
||||
|
||||
def test_parse_int(self):
|
||||
self.assertEquals(utils.parse_pref('12'), 12)
|
||||
|
||||
def test_parse_bool(self):
|
||||
self.assertEquals(utils.parse_pref('true'), True)
|
||||
self.assertEquals(utils.parse_pref('false'), False)
|
|
@ -1,66 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
"""
|
||||
Tests for talos.xrestop
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
import unittest
|
||||
|
||||
from talos.cmanager_linux import xrestop
|
||||
|
||||
here = os.path.dirname(os.path.abspath(__file__))
|
||||
xrestop_output = os.path.join(here, 'xrestop_output.txt')
|
||||
|
||||
|
||||
class TestXrestop(unittest.TestCase):
|
||||
|
||||
def test_parsing(self):
|
||||
"""test parsing xrestop output from xrestop_output.txt"""
|
||||
|
||||
class MockPopen(object):
|
||||
"""
|
||||
stub class for subprocess.Popen
|
||||
We mock this to return a local static copy of xrestop output
|
||||
This has the unfortunate nature of depending on implementation
|
||||
details.
|
||||
"""
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.returncode = 0
|
||||
|
||||
def communicate(self):
|
||||
stdout = open(xrestop_output).read()
|
||||
return stdout, ''
|
||||
|
||||
# monkey-patch subprocess.Popen
|
||||
Popen = subprocess.Popen
|
||||
subprocess.Popen = MockPopen
|
||||
|
||||
# get the output
|
||||
output = xrestop()
|
||||
|
||||
# ensure that the parsed output is equal to what is in
|
||||
# xrestop_output.txt
|
||||
self.assertEqual(len(output), 7) # seven windows with PIDs
|
||||
|
||||
# the first window is Thunderbird
|
||||
pid = 2035 # thundrbird's pid
|
||||
self.assertTrue(pid in output)
|
||||
thunderbird = output[pid]
|
||||
self.assertEqual(thunderbird['index'], 0)
|
||||
self.assertEqual(thunderbird['total bytes'], '~4728761')
|
||||
|
||||
# PID=1668 is a Terminal
|
||||
pid = 1668
|
||||
self.assertTrue(pid in output)
|
||||
terminal = output[pid]
|
||||
self.assertEqual(terminal['pixmap bytes'], '1943716')
|
||||
|
||||
# cleanup: set subprocess.Popen back
|
||||
subprocess.Popen = Popen
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
Загрузка…
Ссылка в новой задаче