new: added run_id to reports and to the ctx object (#40)

This commit is contained in:
Natalia Maximo 2021-07-13 15:54:21 -04:00 коммит произвёл GitHub
Родитель c21f934dde
Коммит 13292d241e
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
9 изменённых файлов: 153 добавлений и 44 удалений

Просмотреть файл

@ -1,3 +1,5 @@
import uuid
import pytest
from _pytest.config import Config
from _pytest.config.argparsing import Parser
@ -37,4 +39,7 @@ def pytest_load_initial_conftests(early_config: Config, parser: Parser):
def pytest_collect_file(parent: pytest.Session, path):
return collect_file(parent, path, parent.config.getini('quilla-prefix'))
return collect_file(parent, path, parent.config.getini('quilla-prefix'), run_id)
run_id = str(uuid.uuid4())

Просмотреть файл

@ -10,7 +10,7 @@ from quilla import (
from quilla.reports.report_summary import ReportSummary
def collect_file(parent: pytest.Session, path: LocalPath, prefix: str):
def collect_file(parent: pytest.Session, path: LocalPath, prefix: str, run_id: str):
'''
Collects files if their path ends with .json and starts with the prefix
@ -18,6 +18,7 @@ def collect_file(parent: pytest.Session, path: LocalPath, prefix: str):
parent: The session object performing the collection
path: The path to the file that might be collected
prefix: The prefix for files that should be collected
run_id: The run ID of the quilla tests
Returns:
A quilla file object if the path matches, None otherwise
@ -26,10 +27,14 @@ def collect_file(parent: pytest.Session, path: LocalPath, prefix: str):
# TODO: change "path" to be "fspath" when pytest 6.3 is released:
# https://docs.pytest.org/en/latest/_modules/_pytest/hookspec.html#pytest_collect_file
if path.ext == '.json' and path.basename.startswith(prefix):
return QuillaFile.from_parent(parent, fspath=path)
return QuillaFile.from_parent(parent, fspath=path, run_id=run_id)
class QuillaFile(pytest.File):
def __init__(self, *args, run_id: str = '', **kwargs) -> None:
super().__init__(*args, **kwargs)
self.quilla_run_id = run_id
def collect(self):
'''
Loads the JSON test data from the path and creates the test instance
@ -38,13 +43,19 @@ class QuillaFile(pytest.File):
A quilla item configured from the JSON data
'''
test_data = self.fspath.open().read()
yield QuillaItem.from_parent(self, name=self.fspath.purebasename, test_data=test_data)
yield QuillaItem.from_parent(
self,
name=self.fspath.purebasename,
test_data=test_data,
run_id=self.quilla_run_id
)
class QuillaItem(pytest.Item):
def __init__(self, name: str, parent: QuillaFile, test_data: str):
def __init__(self, name: str, parent: QuillaFile, test_data: str, run_id: str):
super(QuillaItem, self).__init__(name, parent)
self.test_data = test_data
self.quilla_run_id = run_id
json_data = json.loads(test_data)
markers = json_data.get('markers', [])
for marker in markers:
@ -59,6 +70,12 @@ class QuillaItem(pytest.Item):
[*self.config.getoption('--quilla-opts').split(), ''],
str(self.config.rootpath)
)
if not (
'-i' in self.config.getoption('--quilla-opts') or
'--run-id' in self.config.getoption('--quilla-opts')
):
ctx.run_id = self.quilla_run_id
ctx.json = self.test_data
results = execute(ctx)
self.results = results

Просмотреть файл

@ -30,6 +30,7 @@ def make_parser() -> argparse.ArgumentParser: # pragma: no cover
'''
parser = argparse.ArgumentParser(
prog='quilla',
usage='%(prog)s [options] [-f] JSON',
description='''
Program to provide a report of UI validations given a json representation
of the validations or given the filename containing a json document describing
@ -37,6 +38,12 @@ def make_parser() -> argparse.ArgumentParser: # pragma: no cover
''',
)
parser.add_argument(
'--version',
action='store_true',
help='Prints the version of the software and quits'
)
parser.add_argument(
'-f',
'--file',
@ -48,25 +55,32 @@ def make_parser() -> argparse.ArgumentParser: # pragma: no cover
'json',
help='The json file name or raw json string',
)
parser.add_argument(
'--debug',
action='store_true',
help='Enable debug mode',
config_group = parser.add_argument_group(title='Configuration options')
config_group.add_argument(
'-i',
'--run-id',
action='store',
metavar='run_id',
default=None,
help='A run ID for quilla, if manually passed in.'
'Used to set many quilla tests to have the same run ID'
)
parser.add_argument(
config_group.add_argument(
'-d',
'--definitions',
action='append',
metavar='file',
help='A file with definitions for the \'Definitions\' context object'
)
config_group.add_argument(
'--driver-dir',
dest='drivers_path',
action='store',
default='.',
help='The directory where browser drivers are stored',
)
parser.add_argument(
'-P',
'--pretty',
action='store_true',
help='Set this flag to have the output be pretty-printed'
)
parser.add_argument(
config_group.add_argument(
'--no-sandbox',
dest='no_sandbox',
action='store_true',
@ -75,14 +89,28 @@ def make_parser() -> argparse.ArgumentParser: # pragma: no cover
Useful for running in docker containers'
'''
)
parser.add_argument(
'-d',
'--definitions',
action='append',
metavar='file',
help='A file with definitions for the \'Definitions\' context object'
output_group = parser.add_argument_group(title='Output Options')
output_group.add_argument(
'-P',
'--pretty',
action='store_true',
help='Set this flag to have the output be pretty-printed'
)
parser.add_argument(
output_group.add_argument(
'--indent',
type=int,
default=4,
help='How much space each indent level should have when pretty-printing the report'
)
debug_group = parser.add_argument_group(title='Debug Options')
debug_group.add_argument(
'--debug',
action='store_true',
help='Enable debug mode',
)
debug_group.add_argument(
'-v',
'--verbose',
action='count',
@ -90,11 +118,6 @@ def make_parser() -> argparse.ArgumentParser: # pragma: no cover
'Log outputs are directed to stderr by default.',
default=0
)
parser.add_argument(
'--version',
action='store_true',
help='Prints the version of the software and quits'
)
return parser
@ -257,6 +280,8 @@ def setup_context(args: List[str], plugin_root: str = '.') -> Context:
parsed_args.no_sandbox,
parsed_args.definitions,
logger=logger,
run_id=parsed_args.run_id,
indent=parsed_args.indent,
)
logger.info('Running "quilla_configure" hook')
@ -279,8 +304,6 @@ def run():
ctx.logger.debug('Finished generating reports')
out = reports.to_dict()
if ctx._context_data['Outputs']:
out['Outputs'] = ctx._context_data['Outputs']
if ctx.pretty:
print(json.dumps(

Просмотреть файл

@ -14,6 +14,7 @@ from logging import (
NullHandler,
)
import json
import uuid
from pluggy import PluginManager
import pydeepmerge as pdm
@ -39,6 +40,7 @@ class Context(DriverHolder):
is_file: Whether a file was originally passed in or if the raw json was passed in
no_sandbox: Whether to pass the '--no-sandbox' arg to Chrome and Edge
logger: An optional configured logger instance.
run_id: A string that uniquely identifies the run of Quilla.
Attributes:
@ -54,6 +56,8 @@ class Context(DriverHolder):
no_sandbox: Whether to pass the '--no-sandbox' arg to Chrome and Edge
logger: A logger instance. If None was passed in for the 'logger' argument, will create
one with the default logger.
run_id: A string that uniquely identifies the run of Quilla.
pretty_print_indent: How many spaces to use for indentation when pretty-printing the output
'''
default_context: Optional['Context'] = None
_drivers_path: str
@ -65,7 +69,6 @@ class Context(DriverHolder):
r'([a-zA-Z][a-zA-Z0-9_]+)(\.[a-zA-Z_][a-zA-Z0-9_]+)+'
)
_output_browser: str = 'Firefox'
pretty_print_indent: int = 4
def __init__(
self,
@ -77,7 +80,9 @@ class Context(DriverHolder):
is_file: bool = False,
no_sandbox: bool = False,
definitions: List[str] = [],
logger: Optional[Logger] = None
logger: Optional[Logger] = None,
run_id: Optional[str] = None,
indent: int = 4,
):
super().__init__()
self.pm = plugin_manager
@ -87,6 +92,7 @@ class Context(DriverHolder):
self.json = json_data
self.is_file = is_file
self.no_sandbox = no_sandbox
self.pretty_print_indent = indent
path = Path(drivers_path)
if logger is None:
@ -95,10 +101,22 @@ class Context(DriverHolder):
else:
self.logger = logger
if run_id is None:
self.run_id = str(uuid.uuid4()) # Generate a random UUID
else:
self.run_id = run_id
self.drivers_path = str(path.resolve())
self._context_data: Dict[str, dict] = {'Validation': {}, 'Outputs': {}, 'Definitions': {}}
self._load_definition_files(definitions)
@property
def outputs(self) -> dict:
'''
A dictionary of all outputs created by the steps for the current Quilla test
'''
return self._context_data['Outputs']
@property
def is_debug(self) -> bool:
'''
@ -329,7 +347,9 @@ def get_default_context(
no_sandbox: bool = False,
definitions: List[str] = [],
recreate_context: bool = False,
logger: Optional[Logger] = None
logger: Optional[Logger] = None,
run_id: Optional[str] = None,
indent: int = 4,
) -> Context:
'''
Gets the default context, creating a new one if necessary.
@ -350,6 +370,7 @@ def get_default_context(
recreate_context: Whether a new context object should be created or not
logger: An optional logger instance. If None, one will be created
with the NullHandler.
run_id: A string that uniquely identifies the run of Quilla.
Returns
Application context shared for the entire application
@ -368,5 +389,7 @@ def get_default_context(
no_sandbox,
definitions,
logger,
run_id,
indent,
)
return Context.default_context

Просмотреть файл

@ -1,6 +1,5 @@
import json
from abc import (
abstractclassmethod,
abstractmethod,
)
from typing import Dict
@ -29,7 +28,8 @@ class BaseReport(EnumResolver):
self.msg: str = msg
self.report_type: ReportType = report_type
@abstractclassmethod
@classmethod
@abstractmethod
def from_dict(cls, report: Dict[str, Dict[str, str]]) -> 'BaseReport':
'''
Converts a dictionary report into a valid Report object

Просмотреть файл

@ -10,6 +10,7 @@ from quilla.common.enums import ReportType
from quilla.reports.base_report import BaseReport
from quilla.reports.validation_report import ValidationReport
from quilla.reports.step_failure_report import StepFailureReport
from quilla.reports.visual_parity_report import VisualParityReport
class ReportSummary:
@ -17,9 +18,13 @@ class ReportSummary:
A class to describe a series of report objects, as well as manipulating them for test purposes.
Args:
run_id: A string that uniquely identifies the run
outputs: The outputs generated by various steps
reports: A list of reports to produce a summary of
Attributes:
run_id: A string that uniquely identifies the run
outputs: The outputs generated by various steps
reports: A list of reports used to produce a summary
successes: The number of reports that are described as successful
fails: The numer of reports that are not described as successful
@ -30,9 +35,12 @@ class ReportSummary:
selector: Dict[str, Type[BaseReport]] = {
'validationReport': ValidationReport,
'stepFailureReport': StepFailureReport,
'visualParityReport': VisualParityReport,
}
def __init__(self, reports: List[BaseReport] = []):
def __init__(self, run_id: str, outputs: dict, reports: List[BaseReport] = []):
self.run_id = run_id
self.outputs = outputs
self.reports = reports
self.successes = 0
self.fails = 0
@ -54,7 +62,9 @@ class ReportSummary:
'reports': [
report.to_dict() for report in self.reports
]
}
},
'outputs': self.outputs,
'run_id': self.run_id,
}
def to_json(self) -> str:
@ -65,12 +75,14 @@ class ReportSummary:
return json.dumps(self.to_dict())
@classmethod
def from_dict(cls, summary_dict):
def from_dict(cls, summary_dict: dict):
'''
Loads a ReportSummary object that is represented as a dictionary. It does not trust the
metadata that is in the report, and will regenerate the metadata itself.
'''
reports = summary_dict['reportSummary']['reports']
run_id = summary_dict.get('run_id', '')
outputs = summary_dict.get('outputs', {})
obj_reports = []
for report in reports:
# Each report has a report tag as the root of the json document
@ -78,7 +90,7 @@ class ReportSummary:
report_object = cls.selector[report_type]
obj_reports.append(report_object.from_dict(report))
obj_reports = [ValidationReport.from_dict(report) for report in reports]
return ReportSummary(obj_reports)
return ReportSummary(run_id, outputs, obj_reports)
@classmethod
def from_json(cls, summary_json):
@ -110,6 +122,7 @@ class ReportSummary:
'''
def __init__(self, summary: 'ReportSummary'):
self._summary = summary
self._run_id = summary.run_id
def _filter(self, condition: Callable[[BaseReport], bool]) -> 'ReportSummary':
'''
@ -119,7 +132,7 @@ class ReportSummary:
reports = self._summary.reports.copy()
filtered_reports = filter(condition, reports)
return ReportSummary(list(filtered_reports))
return ReportSummary(self._run_id, {}, list(filtered_reports))
def state(self, state: str) -> 'ReportSummary':
'''

Просмотреть файл

@ -1,4 +1,7 @@
from typing import (
Dict,
cast
)
from quilla.common.enums import (
XPathValidationStates,
ValidationTypes
@ -75,3 +78,24 @@ class VisualParityReport(ValidationReport):
return {
'visualParityReport': report_data
}
@classmethod
def from_dict(cls, report) -> 'VisualParityReport':
params: Dict[str, str] = report['visualParityReport']
msg = params.get('msg', '')
baseline_id = params['baselineId']
baseline_uri = params.get('baselineImageUri', '')
treatment_uri = params.get('treatmentImageUri', '')
delta_uri = params.get('deltaImageUri', '')
success = cast(bool, params['passed'])
return VisualParityReport(
target=params['target'],
browser_name=params['targetBrowser'],
success=success,
msg=msg,
baseline_id=baseline_id,
baseline_image_uri=baseline_uri,
treatment_image_uri=treatment_uri,
delta_image_uri=delta_uri,
)

Просмотреть файл

@ -160,4 +160,4 @@ class QuillaTest(EnumResolver):
for browser in self.browsers:
validation_reports.extend(browser.validate())
return ReportSummary(validation_reports)
return ReportSummary(self.ctx.run_id, self.ctx.outputs, validation_reports)

Просмотреть файл

@ -3,6 +3,7 @@ from logging import (
Logger,
NullHandler,
)
import uuid
import pytest
from _pytest.config import PytestPluginManager
@ -43,6 +44,9 @@ def driver():
return mock_driver
run_id = str(uuid.uuid4())
def pytest_addoption(parser, pluginmanager: PytestPluginManager):
pluginmanager.set_blocked('quilla')
parser.addoption(
@ -54,4 +58,4 @@ def pytest_addoption(parser, pluginmanager: PytestPluginManager):
def pytest_collect_file(parent, path):
return collect_file(parent, path, 'test')
return collect_file(parent, path, 'test', run_id)