Bug 1599099 - Generate in-tree perfdocs using ./mach lint --fix. r=octavian_negru,sparky

This patch adds the PerfDocs Generator. Some modificiations to `run_perfdocs` were also made because verification always has to occur before the documentation generation, and generation always has to be run after verification (if verification passes).

Differential Revision: https://phabricator.services.mozilla.com/D57111

--HG--
extra : moz-landing-system : lando
This commit is contained in:
alexandru.ionescu 2020-02-12 07:00:15 +00:00
Родитель 4ac984054a
Коммит 5b6ff5a709
15 изменённых файлов: 441 добавлений и 42 удалений

Просмотреть файл

@ -30,6 +30,7 @@ categories:
- testing/geckodriver
- web-platform
- tools/fuzzing
- testing/perfdocs
l10n_doc:
- intl
- l10n

Просмотреть файл

@ -0,0 +1,9 @@
###################
Performance Testing
###################
Below you can find links to the various documentation that exists for performance testing and the associated tests.
:doc:`raptor`
For more information please see this `wiki page <https://wiki.mozilla.org/TestEngineering/Performance>`_.

Просмотреть файл

@ -0,0 +1,32 @@
######
Raptor
######
The following documents all testing we have for Raptor.
Benchmarks
----------
Benchmark tests
Desktop
-------
Used for testing page-load performance.
raptor-tp6
^^^^^^^^^^
Used for testing desktop page-load performance using Raptor.
Mobile
------
Used for testing page-load performance on Android.
Scenario
--------
Scenario tests
Unittests
---------
Unit tests
The methods for calling the tests can be found in the `Raptor wiki page <https://wiki.mozilla.org/TestEngineering/Performance/Raptor>`_.

Просмотреть файл

@ -0,0 +1 @@
SPHINX_TREES['/testing/perfdocs'] = 'generated'

Просмотреть файл

@ -9,7 +9,6 @@ suites:
description: "Used for testing page-load performance."
tests:
raptor-tp6: "Used for testing desktop page-load performance using Raptor."
browsertime-tp6: "Used for testing desktop page-load performance using browsertime."
mobile:
description: "Used for testing page-load performance on Android."
benchmarks:

Просмотреть файл

@ -6,4 +6,4 @@ The following documents all testing we have for Raptor.
{documentation}
The methods for calling the tests can be found below.
The methods for calling the tests can be found in the `Raptor wiki page <https://wiki.mozilla.org/TestEngineering/Performance/Raptor>`_.

Просмотреть файл

@ -16,7 +16,7 @@ def setup(root, **lintargs):
return 1
def lint(paths, config, logger, fix=None, **lintargs):
def lint(paths, config, logger, fix=False, **lintargs):
return perfdocs.run_perfdocs(
config, logger=logger, paths=paths, verify=True
config, logger=logger, paths=paths, generate=fix
)

Просмотреть файл

@ -28,6 +28,7 @@ class FrameworkGatherer(object):
self.workspace_dir = workspace_dir
self._yaml_path = yaml_path
self._suite_list = {}
self._test_list = {}
self._manifest_path = ''
self._manifest = None
@ -105,3 +106,45 @@ class RaptorGatherer(FrameworkGatherer):
self._suite_list[s].append(fpath)
return self._suite_list
def _get_subtests_from_ini(self, manifest_path):
'''
Returns a list of (sub)tests from an ini file containing the test definitions.
:param str manifest_path: path to the ini file
:return list: the list of the tests
'''
test_manifest = TestManifest([manifest_path], strict=False)
test_list = test_manifest.active_tests(exists=False, disabled=False)
subtest_list = [subtest["name"] for subtest in test_list]
return subtest_list
def get_test_list(self):
'''
Returns a dictionary containing the tests in every suite ini file.
:return dict: A dictionary with the following structure: {
"suite_name": [
'raptor_test1',
'raptor_test2'
]
}
'''
if self._test_list:
return self._test_list
suite_list = self.get_suite_list()
# Iterate over each manifest path from suite_list[suite_name]
# and place the subtests into self._test_list under the same key
for suite_name, manifest_paths in suite_list.items():
if not self._test_list.get(suite_name):
self._test_list[suite_name] = []
for i, manifest_path in enumerate(manifest_paths, 1):
subtest_list = self._get_subtests_from_ini(manifest_path)
self._test_list[suite_name].extend(subtest_list)
if i == len(manifest_paths):
self._test_list[suite_name] = sorted(self._test_list[suite_name])
return self._test_list

Просмотреть файл

@ -47,7 +47,7 @@ class Gatherer(object):
for information on the data strcture.
'''
if self._perfdocs_tree:
return self.perfdocs_tree
return self._perfdocs_tree
else:
self.fetch_perfdocs_tree()
return self._perfdocs_tree
@ -118,7 +118,7 @@ class Gatherer(object):
framework["yml_path"],
self.workspace_dir
)
framework["test_list"] = framework_gatherer.get_suite_list()
framework["test_list"] = framework_gatherer.get_test_list()
self._test_list.append(framework)
return framework

Просмотреть файл

@ -0,0 +1,243 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
import os
import re
import shutil
import tempfile
from perfdocs.logger import PerfDocLogger
from perfdocs.utils import (
are_dirs_equal,
read_file,
read_yaml,
save_file
)
logger = PerfDocLogger()
class Generator(object):
'''
After each perfdocs directory was validated, the generator uses the templates
for each framework, fills them with the test descriptions in config and saves
the perfdocs in the form index.rst as index file and suite_name.rst for
each suite of tests in the framework.
'''
def __init__(self, verifier, workspace, generate=False):
'''
Initialize the Generator.
:param verifier: Verifier object. It should not be a fresh Verifier object,
but an initialized one with validate_tree() method already called
:param workspace: Path to the top-level checkout directory.
:param generate: Flag for generating the documentation
'''
self._workspace = workspace
if not self._workspace:
raise Exception("PerfDocs Generator requires a workspace directory.")
# Template documents without added information reside here
self.templates_path = os.path.join(
self._workspace, 'tools', 'lint', 'perfdocs', 'templates')
self.perfdocs_path = os.path.join(
self._workspace, 'testing', 'perfdocs', 'generated')
self._generate = generate
self._verifier = verifier
self._perfdocs_tree = self._verifier._gatherer.perfdocs_tree
def build_perfdocs_from_tree(self):
'''
Builds up a document for each framework that was found.
:return dict: A dictionary containing a mapping from each framework
to the document that was built for it, i.e:
{
framework_name: framework_document,
...
}
'''
def _append_rst_section(title, content, documentation, type=None):
'''
Adds a section to the documentation with the title as the type mentioned
and paragraph as content mentioned.
:param title: title of the section
:param content: content of section paragraph
:param documentation: documentation object to add section to
:param type: type of the title heading
'''
heading_map = {
'H4': '-',
'H5': '^'
}
heading_symbol = heading_map.get(type, '-')
documentation.extend([title, heading_symbol * len(title), content, ''])
# Using the verified `perfdocs_tree`, build up the documentation.
frameworks_info = {}
for framework in self._perfdocs_tree:
yaml_content = read_yaml(os.path.join(framework['path'], framework['yml']))
rst_content = read_file(
os.path.join(framework['path'], framework['rst']),
stringify=True)
# Gather all tests and descriptions and format them into
# documentation content
documentation = []
suites = yaml_content['suites']
for suite_name in sorted(suites.keys()):
suite_info = suites[suite_name]
# Add the suite with an H4 heading
_append_rst_section(
suite_name.capitalize(), suite_info['description'], documentation, type="H4")
tests = suite_info.get('tests', {})
for test_name in sorted(tests.keys()):
# As we expect browsertime to become its own framework and its test names
# are not standardized, add 'browsertime-tp6(m) temporarily to the test name
if 'Browsertime' in tests[test_name] and 'Desktop' in tests[test_name]:
_append_rst_section(
'Browsertime '+test_name,
tests[test_name],
documentation, type="H5")
elif 'Android' in tests[test_name] and 'Browsertime' in tests[test_name]:
_append_rst_section(
'Browsertime '+test_name,
tests[test_name],
documentation, type="H5")
else:
# Add the tests with an H5 heading
_append_rst_section(test_name, tests[test_name], documentation, type="H5")
# Insert documentation into `.rst` file
framework_rst = re.sub(
r'{documentation}',
os.linesep.join(documentation),
rst_content
)
frameworks_info[yaml_content['name']] = framework_rst
return frameworks_info
def _create_temp_dir(self):
'''
Create a temp directory as preparation of saving the documentation tree.
:return: str the location of perfdocs_tmpdir
'''
# Build the directory that will contain the final result (a tmp dir
# that will be moved to the final location afterwards)
try:
tmpdir = tempfile.mkdtemp()
perfdocs_tmpdir = os.path.join(tmpdir, 'generated')
os.mkdir(perfdocs_tmpdir)
except OSError as e:
logger.critical("Error creating temp file: {}".format(e))
success = False or os.path.isdir(perfdocs_tmpdir)
if success:
return perfdocs_tmpdir
else:
return success
def _create_perfdocs(self):
'''
Creates the perfdocs documentation.
:return: str path of the temp dir it is saved in
'''
# All directories that are kept in the perfdocs tree are valid,
# so use it to build up the documentation.
framework_docs = self.build_perfdocs_from_tree()
perfdocs_tmpdir = self._create_temp_dir()
# Save the documentation files
frameworks = []
for framework_name in sorted(framework_docs.keys()):
frameworks.append(framework_name)
save_file(
framework_docs[framework_name],
os.path.join(perfdocs_tmpdir, framework_name)
)
# Get the main page and add the framework links to it
mainpage = read_file(os.path.join(self.templates_path, "index.rst"), stringify=True)
fmt_frameworks = os.linesep.join([' :doc:`%s`' % name for name in frameworks])
fmt_mainpage = re.sub(r"{test_documentation}", fmt_frameworks, mainpage)
save_file(fmt_mainpage, os.path.join(perfdocs_tmpdir, 'index'))
return perfdocs_tmpdir
def _save_perfdocs(self, perfdocs_tmpdir):
'''
Copies the perfdocs tree after it was saved into the perfdocs_tmpdir
:param perfdocs_tmpdir: str location of the temp dir where the
perfdocs was saved
'''
# Remove the old docs and copy the new version there without
# checking if they need to be regenerated.
logger.log("Regenerating perfdocs...")
if os.path.exists(self.perfdocs_path):
shutil.rmtree(self.perfdocs_path)
try:
saved = shutil.copytree(perfdocs_tmpdir, self.perfdocs_path)
if saved:
logger.log("Documentation saved to {}/".format(
re.sub(".*testing", "testing", self.perfdocs_path)))
except Exception as e:
logger.critical("There was an error while saving the documentation: {}".format(e))
def generate_perfdocs(self):
'''
Generate the performance documentation.
If `self._generate` is True, then the documentation will be regenerated
without any checks. Otherwise, if it is False, the new documentation will be
prepare and compare with the existing documentation to determine if
it should be regenerated.
:return bool: True/False - For True, if `self._generate` is True, then the
docs were regenerated. If `self._generate` is False, then True will mean
that the docs should be regenerated, and False means that they do not
need to be regenerated.
'''
def get_possibly_changed_files():
'''
Returns files that might have been modified
(used to output a linter warning for regeneration)
:return: list - files that might have been modified
'''
# Returns files that might have been modified
# (used to output a linter warning for regeneration)
files = []
for entry in self._perfdocs_tree:
files.extend(
[os.path.join(entry['path'], entry['yml']),
os.path.join(entry['path'], entry['rst'])]
)
return files
# Throw a warning if there's no need for generating
if not os.path.exists(self.perfdocs_path) and not self._generate:
# If they don't exist and we are not generating, then throw
# a linting error and exit.
logger.warning(
"PerfDocs need to be regenerated.",
files=get_possibly_changed_files()
)
return True
perfdocs_tmpdir = self._create_perfdocs()
if self._generate:
self._save_perfdocs(perfdocs_tmpdir)
else:
# If we are not generating, then at least check if they
# should be regenerated by comparing the directories.
if not are_dirs_equal(perfdocs_tmpdir, self.perfdocs_path):
logger.warning(
"PerfDocs are outdated, run ./mach lint -l perfdocs --fix` to update them.",
files=get_possibly_changed_files()
)

Просмотреть файл

@ -13,6 +13,7 @@ class PerfDocLogger(object):
'''
PATHS = []
LOGGER = None
FAILED = False
def __init__(self):
'''Initializes the PerfDocLogger.'''
@ -30,7 +31,7 @@ class PerfDocLogger(object):
def log(self, msg):
'''
Log a message.
Log an info message.
:param str msg: Message to log.
'''
@ -70,4 +71,13 @@ class PerfDocLogger(object):
rule="Flawless performance docs."
)
PerfDocLogger.FAILED = True
break
def critical(self, msg):
'''
Log a critical message.
:param str msg: Message to log.
'''
self.logger.critical(msg)

Просмотреть файл

@ -7,7 +7,7 @@ import os
import re
def run_perfdocs(config, logger=None, paths=None, verify=True, generate=False):
def run_perfdocs(config, logger=None, paths=None, generate=True):
'''
Build up performance testing documentation dynamically by combining
text data from YAML files that reside in `perfdoc` folders
@ -28,9 +28,7 @@ def run_perfdocs(config, logger=None, paths=None, verify=True, generate=False):
suite.
Usage for verification: ./mach lint -l perfdocs
Usage for generation: Not Implemented
Currently, doc generation is not implemented - only validation.
Usage for generation: ./mach lint -l perfdocs --fix
For validation, see the Verifier class for a description of how
it works.
@ -43,8 +41,7 @@ def run_perfdocs(config, logger=None, paths=None, verify=True, generate=False):
output the linting warnings/errors.
:param list paths: The paths that are being tested. Used to filter
out errors from files outside of these paths.
:param bool verify: If true, the verification will be performed.
:param bool generate: If true, the docs will be generated.
:param bool generate: If true, the docs will be (re)generated.
'''
from perfdocs.logger import PerfDocLogger
@ -63,11 +60,17 @@ def run_perfdocs(config, logger=None, paths=None, verify=True, generate=False):
if not os.path.exists(testing_dir):
raise Exception("Cannot locate testing directory at %s" % testing_dir)
# Run either the verifier or generator
if generate:
raise NotImplementedError
if verify:
from perfdocs.verifier import Verifier
# Late import because logger isn't defined until later
from perfdocs.generator import Generator
from perfdocs.verifier import Verifier
verifier = Verifier(testing_dir, top_dir)
verifier.validate_tree()
# Run the verifier first
verifier = Verifier(testing_dir, top_dir)
verifier.validate_tree()
if not PerfDocLogger.FAILED:
# Even if the tree is valid, we need to check if the documentation
# needs to be regenerated, and if it does, we throw a linting error.
# `generate` dictates whether or not the documentation is generated.
generator = Generator(verifier, generate=generate, workspace=top_dir)
generator.generate_perfdocs()

Просмотреть файл

@ -0,0 +1,9 @@
###################
Performance Testing
###################
Below you can find links to the various documentation that exists for performance testing and the associated tests.
{test_documentation}
For more information please see this `wiki page <https://wiki.mozilla.org/TestEngineering/Performance>`_.

Просмотреть файл

@ -3,20 +3,36 @@
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
import filecmp
import os
import yaml
from perfdocs.logger import PerfDocLogger
logger = PerfDocLogger()
def read_file(path):
'''Opens a file and returns its contents.
def save_file(file_content, path, extension="rst"):
'''
Saves data into a file.
:param str path: Location and name of the file being saved
(without an extension).
:param str data: Content to write into the file.
:param str extension: Extension to save the file as.
'''
with open("{}.{}".format(path, extension), 'w') as f:
f.write(file_content)
def read_file(path, stringify=False):
'''
Opens a file and returns its contents.
:param str path: Path to the file.
:return list: List containing the lines in the file.
'''
with open(path, 'r') as f:
return f.readlines()
return f.read() if stringify else f.readlines()
def read_yaml(yaml_path):
@ -36,3 +52,34 @@ def read_yaml(yaml_path):
)
return contents
def are_dirs_equal(dir_1, dir_2):
'''
Compare two directories to see if they are equal. Files in each
directory are assumed to be equal if their names and contents
are equal.
:param dir_1: First directory path
:param dir_2: Second directory path
:return: True if the directory trees are the same and False otherwise.
'''
dirs_cmp = filecmp.dircmp(dir_1, dir_2)
if dirs_cmp.left_only or dirs_cmp.right_only or dirs_cmp.funny_files:
return False
_, mismatch, errors = filecmp.cmpfiles(
dir_1, dir_2, dirs_cmp.common_files, shallow=False
)
if mismatch or errors:
return False
for common_dir in dirs_cmp.common_dirs:
subdir_1 = os.path.join(dir_1, common_dir)
subdir_2 = os.path.join(dir_2, common_dir)
if not are_dirs_equal(subdir_1, subdir_2):
return False
return True

Просмотреть файл

@ -117,25 +117,25 @@ class Verifier(object):
# Suite found - now check if any tests in YAML
# definitions doesn't exist
ytests = ytests['tests']
for mnf_pth in ytests:
for suite_name in ytests:
foundtest = False
for t in framework_info["test_list"][suite]:
tb = os.path.basename(t)
tb = re.sub("\..*", "", tb)
if mnf_pth == tb:
# Found an exact match for the mnf_pth
if suite_name == tb:
# Found an exact match for the suite_name
foundtest = True
break
if mnf_pth in tb:
# Found a 'fuzzy' match for the mnf_pth
if suite_name in tb:
# Found a 'fuzzy' match for the suite_name
# i.e. 'wasm' could exist for all raptor wasm tests
global_descriptions[suite].append(mnf_pth)
global_descriptions[suite].append(suite_name)
foundtest = True
break
if not foundtest:
logger.warning(
"Could not find an existing test for {} - bad test name?".format(
mnf_pth
suite_name
),
framework_info["yml_path"]
)
@ -146,7 +146,7 @@ class Verifier(object):
)
# Check for any missing tests/suites
for suite, manifest_paths in framework_info["test_list"].items():
for suite, test_list in framework_info["test_list"].items():
if not yaml_content["suites"].get(suite):
# Description doesn't exist for the suite
logger.warning(
@ -165,14 +165,14 @@ class Verifier(object):
tests_found = 0
missing_tests = []
test_to_manifest = {}
for mnf_pth in manifest_paths:
tb = os.path.basename(mnf_pth)
for test_name in test_list:
tb = os.path.basename(test_name)
tb = re.sub("\..*", "", tb)
if stests.get(tb) or stests.get(mnf_pth):
if stests.get(tb) or stests.get(test_name):
# Test description exists, continue with the next test
tests_found += 1
continue
test_to_manifest[tb] = mnf_pth
test_to_manifest[tb] = test_name
missing_tests.append(tb)
# Check if global test descriptions exist (i.e.
@ -180,8 +180,8 @@ class Verifier(object):
new_mtests = []
for mt in missing_tests:
found = False
for mnf_pth in global_descriptions[suite]:
if mnf_pth in mt:
for test_name in global_descriptions[suite]:
if test_name in mt:
# Global test exists for this missing test
found = True
break
@ -191,10 +191,10 @@ class Verifier(object):
if len(new_mtests):
# Output an error for each manifest with a missing
# test description
for mnf_pth in new_mtests:
for test_name in new_mtests:
logger.warning(
"Could not find a test description for {}".format(mnf_pth),
test_to_manifest[mnf_pth]
"Could not find a test description for {}".format(test_name),
test_to_manifest[test_name]
)
continue
@ -295,9 +295,11 @@ class Verifier(object):
"rst": self.validate_rst_content(matched_rst)
}
# Log independently the errors found for the matched files
for file_format, valid in _valid_files.items():
if not valid:
logger.log("File validation error: {}".format(file_format))
if not all(_valid_files.values()):
# Don't check the descriptions if the YAML or RST is bad
logger.log("Bad perfdocs directory found in {}".format(matched['path']))
continue
found_good += 1