new: added test pipeline workflow file (#9)
This commit is contained in:
Родитель
b0f229181f
Коммит
d32ae79e09
|
@ -0,0 +1,52 @@
|
|||
name: Test pipeline
|
||||
on:
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python: ['3.8', '3.9']
|
||||
name: Testing quilla with python ${{ matrix.python }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Setup python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python }}
|
||||
|
||||
- name: Install firefox
|
||||
run: sudo apt install firefox
|
||||
|
||||
- name: Get geckodriver
|
||||
run: ./scripts/get_geckodriver.sh
|
||||
|
||||
- name: Install wheel
|
||||
run: pip install wheel
|
||||
|
||||
- name: Install all dependencies
|
||||
run: pip install -e .[all]
|
||||
|
||||
- name: Run linter
|
||||
run: flake8
|
||||
|
||||
- name: Run static type checker
|
||||
run: mypy
|
||||
|
||||
- name: Set filename for xml output
|
||||
run: echo "JUNIT_XML_OUT=quilla-pytest-junit-RUN${{ env.GITHUB_RUN_NUMBER }}-$(date +'%Y-%m-%d').xml" >> $GITHUB_ENV
|
||||
|
||||
- name: Run tests
|
||||
run: pytest --junit-xml="${{ env.JUNIT_XML_OUT }}"
|
||||
|
||||
- name: Upload JUnit XML artifact
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: junit_xml
|
||||
path: ${{ env.JUNIT_XML_OUT }}
|
|
@ -0,0 +1,16 @@
|
|||
#!/bin/bash
|
||||
|
||||
DRIVER_RELEASE_API="https://api.github.com/repos/mozilla/geckodriver/releases"
|
||||
JSON_FILE="release_data.json"
|
||||
PLATFORM="linux64"
|
||||
|
||||
curl $DRIVER_RELEASE_API > $JSON_FILE
|
||||
|
||||
ASSET_URL=$(python3 scripts/process_release.py "$JSON_FILE" "$PLATFORM")
|
||||
|
||||
echo "$ASSET_URL"
|
||||
wget $ASSET_URL
|
||||
rm $JSON_FILE
|
||||
|
||||
find . -name "*.tar.gz" -exec tar -xzf {} \;
|
||||
rm -rf *.tar.gz
|
|
@ -0,0 +1,29 @@
|
|||
import json
|
||||
import sys
|
||||
import re
|
||||
|
||||
release_json = sys.argv[1]
|
||||
platform = sys.argv[2]
|
||||
|
||||
asset_regex = re.compile(f"{platform}.tar.gz$")
|
||||
|
||||
with open(release_json) as f:
|
||||
release_data = json.load(f)
|
||||
release = None
|
||||
|
||||
for data in release_data:
|
||||
# Get latest actual release
|
||||
if not data['draft'] and not data['prerelease']:
|
||||
release = data
|
||||
break
|
||||
|
||||
if release is None:
|
||||
print("")
|
||||
sys.exit(1)
|
||||
|
||||
for asset in release['assets']:
|
||||
if asset_regex.search(asset['name']):
|
||||
print(asset['browser_download_url'])
|
||||
sys.exit(0)
|
||||
|
||||
sys.exit(1)
|
|
@ -6,6 +6,7 @@ max-complexity = 10
|
|||
|
||||
[mypy]
|
||||
files = src/**/*.py
|
||||
ignore_missing_imports = True
|
||||
|
||||
[tool:pytest]
|
||||
markers =
|
||||
|
@ -20,5 +21,5 @@ markers =
|
|||
quilla: Marks tests written to be executed with Quilla
|
||||
integration: Marks an integration test.
|
||||
testpaths = tests
|
||||
addopts = --cov=src --cov-report term-missing -p no:quilla
|
||||
addopts = --cov=src --cov-report term-missing -p no:quilla -n auto
|
||||
python_classes = *Tests
|
||||
|
|
13
setup.py
13
setup.py
|
@ -1,5 +1,4 @@
|
|||
from setuptools import setup, find_packages
|
||||
from itertools import chain
|
||||
|
||||
|
||||
with open('VERSION') as f:
|
||||
|
@ -28,7 +27,8 @@ extra_dependencies = {
|
|||
'pytest'
|
||||
],
|
||||
'dev': [
|
||||
'pre-commit'
|
||||
'pre-commit',
|
||||
'types-setuptools', # Adds typing stubs
|
||||
],
|
||||
'release': [
|
||||
'wheel',
|
||||
|
@ -37,10 +37,13 @@ extra_dependencies = {
|
|||
]
|
||||
}
|
||||
|
||||
extra_dependencies['all'] = list(
|
||||
chain(dependencies for _, dependencies in extra_dependencies.items())
|
||||
)
|
||||
all_dependencies = []
|
||||
|
||||
for _, dependencies in extra_dependencies.items():
|
||||
all_dependencies.extend(dependencies)
|
||||
|
||||
all_dependencies = list(set(all_dependencies)) # Convert to set to remove overlaps
|
||||
extra_dependencies['all'] = all_dependencies
|
||||
|
||||
setup(
|
||||
name='quilla',
|
||||
|
|
|
@ -5,7 +5,6 @@ from _pytest.config.argparsing import Parser
|
|||
from pytest_quilla.pytest_classes import collect_file
|
||||
|
||||
|
||||
|
||||
def pytest_addoption(parser: Parser):
|
||||
'''
|
||||
Adds quilla INI option for enabling
|
||||
|
@ -36,5 +35,6 @@ def pytest_load_initial_conftests(early_config: Config, parser: Parser):
|
|||
help="Options to be passed through to the quilla runtime for the scenario tests"
|
||||
)
|
||||
|
||||
|
||||
def pytest_collect_file(parent: pytest.Session, path):
|
||||
return collect_file(parent, path, parent.config.getini('quilla-prefix'))
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
import json
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
from py._path.local import LocalPath
|
||||
|
@ -55,7 +54,10 @@ class QuillaItem(pytest.Item):
|
|||
Runs the quilla test by creating an isolated context and executing the test
|
||||
data retrieved from the JSON file.
|
||||
'''
|
||||
ctx = setup_context([*self.config.getoption('--quilla-opts'), ''], str(self.config.rootpath))
|
||||
ctx = setup_context(
|
||||
[*self.config.getoption('--quilla-opts'), ''],
|
||||
str(self.config.rootpath)
|
||||
)
|
||||
results = UIValidation.from_dict(ctx, self.test_data).validate_all()
|
||||
self.results = results
|
||||
try:
|
||||
|
|
|
@ -5,7 +5,9 @@ the runtime context for the application, then executing the rest of the applicat
|
|||
import argparse
|
||||
import sys
|
||||
import json
|
||||
from typing import List
|
||||
from typing import (
|
||||
List,
|
||||
)
|
||||
|
||||
from quilla.ui_validation import UIValidation
|
||||
from quilla.ctx import (
|
||||
|
@ -65,7 +67,10 @@ def make_parser() -> argparse.ArgumentParser: # pragma: no cover
|
|||
'--no-sandbox',
|
||||
dest='no_sandbox',
|
||||
action='store_true',
|
||||
help='Adds \'--no-sandbox\' to the Chrome and Edge browsers. Useful for running in docker containers'
|
||||
help='''
|
||||
Adds \'--no-sandbox\' to the Chrome and Edge browsers.
|
||||
Useful for running in docker containers'
|
||||
'''
|
||||
)
|
||||
parser.add_argument(
|
||||
'-d',
|
||||
|
@ -131,32 +136,33 @@ def setup_context(args: List[str], plugin_root: str = '.') -> Context:
|
|||
parser = make_parser()
|
||||
pm.hook.quilla_addopts(parser=parser) # type: ignore
|
||||
|
||||
args = parser.parse_args(args)
|
||||
parsed_args = parser.parse_args(args)
|
||||
|
||||
# Set to empty list since argparse defaults to None
|
||||
if not args.definitions:
|
||||
args.definitions = []
|
||||
if not parsed_args.definitions:
|
||||
parsed_args.definitions = []
|
||||
|
||||
if not args.is_file:
|
||||
json_data = args.json
|
||||
if not parsed_args.is_file:
|
||||
json_data = parsed_args.json
|
||||
else:
|
||||
with open(args.json) as f:
|
||||
with open(parsed_args.json) as f:
|
||||
json_data = f.read()
|
||||
ctx = get_default_context(
|
||||
pm,
|
||||
args.debug,
|
||||
args.drivers_path,
|
||||
args.pretty,
|
||||
parsed_args.debug,
|
||||
parsed_args.drivers_path,
|
||||
parsed_args.pretty,
|
||||
json_data,
|
||||
args.is_file,
|
||||
args.no_sandbox,
|
||||
args.definitions,
|
||||
parsed_args.is_file,
|
||||
parsed_args.no_sandbox,
|
||||
parsed_args.definitions,
|
||||
)
|
||||
|
||||
pm.hook.quilla_configure(ctx=ctx, args=args)
|
||||
|
||||
return ctx
|
||||
|
||||
|
||||
def run():
|
||||
'''
|
||||
Creates the parser object, parses the command-line arguments, and runs them, finishing with the
|
||||
|
|
|
@ -56,8 +56,10 @@ class EnumResolver:
|
|||
Utility class to define shared behaviour for classes that need to
|
||||
resolve string values into appropriate enums
|
||||
'''
|
||||
|
||||
# ctx type omitted due to circular import
|
||||
@classmethod
|
||||
def _name_to_enum(cls, name: str, enum: Type[T], ctx = None) -> T: # ctx type omitted due to circular import
|
||||
def _name_to_enum(cls, name: str, enum: Type[T], ctx=None) -> T:
|
||||
'''
|
||||
Converts a string value into the appropriate enum type.
|
||||
Useful for inner representations of the data so we're not just working with strings
|
||||
|
|
|
@ -4,6 +4,8 @@ from functools import lru_cache
|
|||
from typing import (
|
||||
Optional,
|
||||
List,
|
||||
Dict,
|
||||
cast
|
||||
)
|
||||
from pathlib import Path
|
||||
import json
|
||||
|
@ -74,7 +76,7 @@ class Context(DriverHolder):
|
|||
path = Path(drivers_path)
|
||||
|
||||
self.drivers_path = str(path.resolve())
|
||||
self._context_data = {'Validation': {}, 'Outputs': {}, 'Definitions': {}}
|
||||
self._context_data: Dict[str, dict] = {'Validation': {}, 'Outputs': {}, 'Definitions': {}}
|
||||
self._load_definition_files(definitions)
|
||||
|
||||
@property
|
||||
|
@ -167,10 +169,15 @@ class Context(DriverHolder):
|
|||
data = self._context_data[root]
|
||||
data = self._walk_data_tree(data, path, object_expression)
|
||||
|
||||
repl_value = data
|
||||
repl_value = cast(str, data)
|
||||
elif self.pm is not None:
|
||||
# Pass it to the defined hooks
|
||||
hook_results = self.pm.hook.quilla_context_obj(ctx=self, root=root, path=tuple(path)) # type: ignore
|
||||
hook_results = self.pm.hook.quilla_context_obj(
|
||||
ctx=self,
|
||||
root=root,
|
||||
path=tuple(path)
|
||||
) # type: ignore
|
||||
|
||||
# Hook results will always be either size 1 or 0
|
||||
if len(hook_results) == 0:
|
||||
repl_value = ''
|
||||
|
@ -242,7 +249,7 @@ class Context(DriverHolder):
|
|||
for definition_file in definition_files:
|
||||
with open(definition_file) as fp:
|
||||
data_dict = json.load(fp)
|
||||
self._load_definitions(data_dict)
|
||||
self.load_definitions(data_dict)
|
||||
|
||||
def load_definitions(self, definitions_dict: dict):
|
||||
'''
|
||||
|
@ -271,7 +278,7 @@ def get_default_context(
|
|||
no_sandbox: bool = False,
|
||||
definitions: List[str] = [],
|
||||
recreate_context: bool = False,
|
||||
) -> Context:
|
||||
) -> Context:
|
||||
'''
|
||||
Gets the default context, creating a new one if necessary.
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ from quilla.ui_validation import UIValidation
|
|||
|
||||
hookspec = pluggy.HookspecMarker('quilla')
|
||||
|
||||
StepFactorySelector = Dict[UITestActions, Type[BaseStepFactory]]
|
||||
StepFactorySelector = Dict[UITestActions, Type[BaseStepFactory]]
|
||||
T = TypeVar('T', bound=Enum)
|
||||
|
||||
|
||||
|
@ -112,6 +112,7 @@ def quilla_step_factory_selector(selector: StepFactorySelector):
|
|||
selector: The factory selector dictionary.
|
||||
'''
|
||||
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def quilla_resolve_enum_from_name(name: str, enum: Type[T]) -> Optional[T]:
|
||||
'''
|
||||
|
|
|
@ -16,27 +16,27 @@ class _DummyHooks:
|
|||
'''
|
||||
|
||||
@_hookimpl
|
||||
def quilla_addopts():
|
||||
def quilla_addopts(): # type: ignore
|
||||
pass
|
||||
|
||||
@_hookimpl
|
||||
def quilla_context_obj():
|
||||
def quilla_context_obj(): # type: ignore
|
||||
pass
|
||||
|
||||
@_hookimpl
|
||||
def quilla_configure():
|
||||
def quilla_configure(): # type: ignore
|
||||
pass
|
||||
|
||||
@_hookimpl
|
||||
def quilla_prevalidate():
|
||||
def quilla_prevalidate(): # type: ignore
|
||||
pass
|
||||
|
||||
@_hookimpl
|
||||
def quilla_postvalidate():
|
||||
def quilla_postvalidate(): # type: ignore
|
||||
pass
|
||||
|
||||
@_hookimpl
|
||||
def quilla_step_factory_selector():
|
||||
def quilla_step_factory_selector(): # type: ignore
|
||||
pass
|
||||
|
||||
|
||||
|
@ -87,7 +87,7 @@ def _load_entrypoint_plugins(pm: pluggy.PluginManager):
|
|||
try:
|
||||
entry_point.require()
|
||||
_load_hooks_from_module(pm, entry_point.load())
|
||||
except pkg_resources.DistributionNotFound as e:
|
||||
except pkg_resources.DistributionNotFound:
|
||||
# Skips package if it cannot load it
|
||||
pass
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ class ValidationReport(BaseReport):
|
|||
if 'msg' in params:
|
||||
msg = params['msg']
|
||||
return ValidationReport(
|
||||
type_=params['type'],
|
||||
validation_type=params['type'],
|
||||
target=params['target'],
|
||||
state=params['state'],
|
||||
browser_name=params['targetBrowser'],
|
||||
|
|
|
@ -134,7 +134,7 @@ class BaseStep(DriverHolder, EnumResolver):
|
|||
|
||||
class BaseStepFactory:
|
||||
@abstractclassmethod
|
||||
def from_dict(ctx: Context, step: Dict, driver: Optional[WebDriver] = None) -> BaseStep:
|
||||
def from_dict(cls, ctx: Context, step: Dict, driver: Optional[WebDriver] = None) -> BaseStep:
|
||||
'''
|
||||
Given a context, step dictionary, and optionally a driver, return an appropriate subclass
|
||||
of BaseStep
|
||||
|
@ -149,7 +149,6 @@ class BaseStepFactory:
|
|||
'''
|
||||
|
||||
|
||||
|
||||
class BaseValidation(BaseStep):
|
||||
'''
|
||||
Base validation class with shared functionality for all validations
|
||||
|
@ -171,10 +170,16 @@ class BaseValidation(BaseStep):
|
|||
target: str,
|
||||
state: ValidationStates,
|
||||
selector: Dict[ValidationStates, Callable[[], ValidationReport]],
|
||||
parameters: Dict,
|
||||
parameters: Optional[Dict],
|
||||
driver: Optional[WebDriver] = None,
|
||||
) -> None:
|
||||
super().__init__(ctx, UITestActions.VALIDATE, target=target, parameters=parameters, driver=driver)
|
||||
super().__init__(
|
||||
ctx,
|
||||
UITestActions.VALIDATE,
|
||||
target=target,
|
||||
parameters=parameters,
|
||||
driver=driver
|
||||
)
|
||||
self._type = type_
|
||||
self._state = state
|
||||
self._driver = driver
|
||||
|
@ -187,7 +192,7 @@ class BaseValidation(BaseStep):
|
|||
self.ctx, # type: ignore
|
||||
self._target, # type: ignore
|
||||
self._state, # type: ignore
|
||||
self._parameters,
|
||||
self._parameters, # type: ignore
|
||||
self._driver # type: ignore
|
||||
)
|
||||
|
||||
|
|
|
@ -27,9 +27,9 @@ class OutputValueStep(BaseStep, BaseStepFactory):
|
|||
def from_dict(
|
||||
cls,
|
||||
ctx: Context,
|
||||
action_dict,
|
||||
action_dict: Dict,
|
||||
driver: Optional[WebDriver] = None
|
||||
) -> "OutputValueStep":
|
||||
) -> "BaseStep":
|
||||
'''
|
||||
Factory method to extract needed parameters from a dictionary
|
||||
'''
|
||||
|
@ -44,7 +44,6 @@ class OutputValueStep(BaseStep, BaseStepFactory):
|
|||
|
||||
return OutputValueStep(ctx, **params, driver=driver)
|
||||
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
ctx: Context,
|
||||
|
@ -62,7 +61,6 @@ class OutputValueStep(BaseStep, BaseStepFactory):
|
|||
OutputSources.XPATH_PROPERTY: self._output_xpath_property,
|
||||
}
|
||||
|
||||
|
||||
def perform(self):
|
||||
value_producer = self.selector[self.parameters['source']]
|
||||
|
||||
|
|
|
@ -47,12 +47,13 @@ class StepsAggregator(DriverHolder):
|
|||
UITestActions.OUTPUT_VALUE: OutputValueStep,
|
||||
}
|
||||
|
||||
ctx.pm.hook.quilla_step_factory_selector(selector=step_factory_selector) # Allow plugins to add selectors
|
||||
# Allow plugins to add selectors
|
||||
ctx.pm.hook.quilla_step_factory_selector(selector=step_factory_selector)
|
||||
|
||||
for step in steps:
|
||||
step_factory = step_factory_selector.get(step['action'], TestStep)
|
||||
|
||||
self._steps.append(step_factory.from_dict(ctx, step, driver=driver))
|
||||
self._steps.append(step_factory.from_dict(ctx, step, driver=driver)) # type: ignore
|
||||
|
||||
@property
|
||||
def driver(self) -> WebDriver:
|
||||
|
|
|
@ -91,7 +91,6 @@ class XPathValidation(BaseValidation):
|
|||
|
||||
return re.search(pattern, element_text) is not None
|
||||
|
||||
|
||||
def _element_exists(self) -> bool:
|
||||
return len(self._find_all()) > 0
|
||||
|
||||
|
@ -210,7 +209,6 @@ class XPathValidation(BaseValidation):
|
|||
)
|
||||
|
||||
|
||||
|
||||
class URLValidation(BaseValidation):
|
||||
'''
|
||||
Class defining the behaviour for performing URL validations
|
||||
|
|
|
@ -1,9 +1,7 @@
|
|||
from unittest.mock import Mock
|
||||
from typing import List
|
||||
|
||||
import pytest
|
||||
from _pytest.config import PytestPluginManager
|
||||
from _pytest.nodes import Item
|
||||
from _pytest.config import Config
|
||||
from selenium.webdriver.remote.webdriver import WebDriver
|
||||
|
||||
|
@ -28,10 +26,11 @@ def ctx(driver: WebDriver, plugin_manager):
|
|||
@pytest.fixture()
|
||||
def plugin_manager(pytestconfig: Config):
|
||||
|
||||
pm = get_plugin_manager(pytestconfig.rootpath)
|
||||
pm = get_plugin_manager(str(pytestconfig.rootpath))
|
||||
|
||||
return pm
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def driver():
|
||||
mock_driver = Mock(spec=WebDriver)
|
||||
|
|
Загрузка…
Ссылка в новой задаче