new: initial migration from ADO

This commit is contained in:
Natalia Maximo 2021-06-26 11:26:39 -04:00 коммит произвёл Natalia Maximo
Родитель 15ecb7abcb
Коммит b968f2c8b3
77 изменённых файлов: 5032 добавлений и 10 удалений

13
.gitignore поставляемый
Просмотреть файл

@ -20,7 +20,6 @@ parts/
sdist/ sdist/
var/ var/
wheels/ wheels/
pip-wheel-metadata/
share/python-wheels/ share/python-wheels/
*.egg-info/ *.egg-info/
.installed.cfg .installed.cfg
@ -50,6 +49,7 @@ coverage.xml
*.py,cover *.py,cover
.hypothesis/ .hypothesis/
.pytest_cache/ .pytest_cache/
cover/
# Translations # Translations
*.mo *.mo
@ -72,6 +72,7 @@ instance/
docs/_build/ docs/_build/
# PyBuilder # PyBuilder
.pybuilder/
target/ target/
# Jupyter Notebook # Jupyter Notebook
@ -82,7 +83,9 @@ profile_default/
ipython_config.py ipython_config.py
# pyenv # pyenv
.python-version # For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv # pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
@ -127,3 +130,9 @@ dmypy.json
# Pyre type checker # Pyre type checker
.pyre/ .pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/

1
MANIFEST.in Normal file
Просмотреть файл

@ -0,0 +1 @@
graft src

71
Makefile Normal file
Просмотреть файл

@ -0,0 +1,71 @@
# All of these variables can be set as env variables instead
DOCS_BUILD_DIR ?= _build
DOC_TARGETS ?= html man
PYTHON_EXECUTABLE ?= python3
DIST_DIR ?= dist
CLEAN_CMDS ?= clean-python clean-docs clean-build
SDIST_OPTS ?=
SDIST ?= sdist --dist-dir $(DIST_DIR) $(SDIST_OPTS)
BDIST_OPTS ?=
BDIST_WHEEL ?= bdist_wheel --dist-dir $(DIST_DIR) $(BDIST_OPTS)
PACKAGE_TARGETS ?= $(SDIST) $(BDIST_WHEEL)
PACKAGE_OPTS ?=
.PHONY: help
help: ## Print this help message and exit
@echo Usage:
@echo " make [target]"
@echo
@echo Targets:
@awk -F ':|##' \
'/^[^\t].+?:.*?##/ {\
printf " %-30s %s\n", $$1, $$NF \
}' $(MAKEFILE_LIST)
.PHONY: package
package: ## Create release packages
$(PYTHON_EXECUTABLE) setup.py $(PACKAGE_TARGETS) $(PACKAGE_OPTS)
.PHONY: package-deps
package-deps: ## Create wheel files for all runtime dependencies
$(PYTHON_EXECUTABLE) -m pip wheel --wheel-dir $(DIST_DIR) $(PACKAGE_OPTS) .
.PHONY: docs
docs: ## Build all the docs in the docs/_build directory
$(MAKE) -C $@ $(DOC_TARGETS) BUILDDIR=$(DOCS_BUILD_DIR)
.PHONY: clean-python
clean-python: ## Cleans all the python cache & egg files files
$(RM) `find . -name "*.pyc" | tac`
$(RM) -d `find . -name "__pycache__" | tac`
$(RM) -r `find . -name "*.egg-info" | tac`
.PHONY: clean-docs
clean-docs: ## Clean the docs build directory
$(RM) -r docs/$(DOCS_BUILD_DIR)
.PHONY: clean-build
clean-build: ## Cleans all code build and distribution directories
$(RM) -r build $(DIST_DIR)
.PHONY: clean
clean: ## Cleans all build, docs, and cache files
$(MAKE) $(CLEAN_CMDS)
.PHONY: install
install: ## Installs the package
$(PYTHON_EXECUTABLE) -m pip install .
.PHONY: install-docs
install-docs: ## Install the package and docs dependencies
$(PYTHON_EXECUTABLE) -m pip install -e .[docs]
.PHONY: install-tests
install-tests: ## Install the package and test dependencies
$(PYTHON_EXECUTABLE) -m pip install -e .[tests]
.PHONY: install-all
install-all: ## Install the package, docs, and test dependencies
$(PYTHON_EXECUTABLE) -m pip install -e .[all]

107
README.md
Просмотреть файл

@ -1,14 +1,105 @@
# Project # Quilla
> This repo has been populated by an initial template to help get you started. Please <!-- THIS SECTION SHOULD BE COPY+PASTED INTO THE docs/intro.md FILE -->
> make sure to update the content to build a great experience for community-building. ## Declarative UI Testing with JSON
As the maintainer of this project, please make a few updates: Quilla is a framework that allows test-writers to perform UI testing using declarative syntax through JSON files. This enables test writers, owners, and maintainers to focus not on how to use code libraries, but on what steps a user would have to take to perform the actions being tested. In turn, this allows for more agile test writing and easier-to-understand test cases.
- Improving this README.MD file to provide a great experience Quilla was built to be run in CI/CD, in containers, and locally. It also comes with an optional integration with [pytest](https://pytest.org), so you can write your Quilla test cases as part of your regular testing environment for python-based projects. Check out the [quilla-pytest](docs/quilla_pytest.md) docs for more information on how to configure `pytest` to auto-discover Quilla files, adding markers, and more.
- Updating SUPPORT.MD with content about this project's support experience
- Understanding the security reporting process in SECURITY.MD Check out the [features](docs/features.md) docs for an overview of all quilla can do!
- Remove this section from the README
## Quickstart
1. Clone the repository
2. `cd` into the `quilla` directory and run `make install`
3. Ensure that you have the correct browser and drivers. Quilla will autodetect drivers that are in your PATH or in the directory it is called
4. Write the following as `Validation.json`:
```json
{
"targetBrowsers": ["Edge"], // Or "Firefox" or "Chrome", depending on what you have installed
"path": "https://www.bing.com",
"steps": [
{
"action": "Validate",
"type": "URL",
"state": "Contains",
"target": "bing",
}
]
}
```
5. Run `quilla -f Validation.json`
## Installation
> Note: It is **highly recommended** that you use a virtual environment whenever you install new python packages.
You can install Quilla by cloning the repository and running `make install`.
For more information on installation options and packaging Quilla for remote install, check out the documentation for it [here](docs/install.md)
## Usage
This module can be used both as a library, a runnable module, as well as as a command-line tool. The output of `quilla --help` is presented below:
```text
usage: quilla [-h] [-f] [-d] [--driver-dir DRIVERS_PATH] [-P] json
Program to provide a report of UI validations given a json representation of the validations or given the filename
containing a json document describing the validations
positional arguments:
json The json file name or raw json string
optional arguments:
-h, --help show this help message and exit
-f, --file Whether to treat the argument as raw json or as a file
-d, --debug Enable debug mode
--driver-dir DRIVERS_PATH
The directory where browser drivers are stored
-P, --pretty Set this flag to have the output be pretty-printed
```
## Writing Validation Files
Check out the documentation for it [here](docs/validation_files.md)
## Context Expressions
This package is able to dynamically inject different values, exposed through context objects and expressions whenever the validation JSON would ordinarily require a regular string (instead of an enum). This can be used to grab values specified either at the command-line, or through environment variables.
More discussion of context expressions and how to use them can be found in the documentation file [here](docs/context_expressions.md)
## Generating Documentation
Documentation can be generated through the `make` command `make docs`
Check out the documentation for it [here](docs/README.md)
## Make commands
A Makefile is provided with several convenience commands. You can find usage instructions with `make help`, or below:
```text
Usage:
make [target]
Targets:
help Print this help message and exit
package Create release packages
package-deps Create wheel files for all runtime dependencies
docs Build all the docs in the docs/_build directory
clean-python Cleans all the python cache & egg files files
clean-docs Clean the docs build directory
clean-build Cleans all code build and distribution directories
clean Cleans all build, docs, and cache files
install Installs the package
install-docs Install the package and docs dependencies
install-tests Install the package and test dependencies
install-all Install the package, docs, and test dependencies
```
## Contributing ## Contributing

1
VERSION Normal file
Просмотреть файл

@ -0,0 +1 @@
0.1

20
docs/Makefile Normal file
Просмотреть файл

@ -0,0 +1,20 @@
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line, and also
# from the environment for the first two.
SPHINXOPTS ?=
SPHINXBUILD ?= sphinx-build
SOURCEDIR = .
BUILDDIR = _build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)

47
docs/README.md Normal file
Просмотреть файл

@ -0,0 +1,47 @@
# Documentation
## Style
The Quilla module is extensively documented using the Google Docstring style (see [example](https://www.sphinx-doc.org/en/master/usage/extensions/example_google.html) here), and uses [Sphinx](https://www.sphinx-doc.org/en/master/index.html) to generate the documentation.
## Dependencies
The following Python packages are used to create all the documentation for this project
| Package | Usage |
|---------------------------|:-----------------------------------------------------------------------:|
|`Sphinx` | Generating docs |
|`sphinx-rtd-theme` | HTML Doocumentation style theme |
|`sphinx_autodoc_typehints` | Detecting type hints |
|`myst_parser` | Integrating markdown docs used in the repo into generated documentation |
|`sphinx_argparse_cli` | Documenting the CLI usage |
&nbsp;
## Building the Docs
### Building from the package directory
The preferred method for building documentation is to use the `make` commands provided in the root package directory.
Using environment variables or the command-line, you can further customize these options. The following table describes the variables you can use to customize the documentation process.
| Variable Name | Use | Default Values |
|:-------------:|:---:|:--------:|
| `DOC_TARGETS` | A space-separated list of values specifying what targets to build | `html man`|
| `DOCS_BUILD_DIR` | A directory (relative to the `docs` directory) in which to build the `make` targets | `_build` |
For more information on customizing `make` targets, check out the [makefile vars](makefile_vars.md) documentation
### Building from the `docs/` directory
All of the above packages are available through `pip` and can be installed with `pip install sphinx sphinx-rtd-theme sphinx_autodoc_typehints myst_parser`. They are also specified in the `setup.py` file, and can therefore be installed with `pip install .[docs]`
To generate the docs, run `make help` to see what targets are available. In general, these are common targets:
- `make html`
- `make man`
- `make latex`
- `make latexpdf`
> Note: Even though the `latexpdf` target will produce a PDF document, you need the required `tex` packages installed to generate it, and those are not provided with sphinx. Installing `apt` packages such as `tex-common`, `texlive-full`, etc. may help, but installation of the specific packages is out of scope for this documentation and should be handled by the end user.

64
docs/conf.py Normal file
Просмотреть файл

@ -0,0 +1,64 @@
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Quilla'
copyright = '2021, Microsoft'
author = 'Natalia Maximo'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx_autodoc_typehints',
'myst_parser',
'sphinx_argparse_cli',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
napoleon_attr_annotations = True
latex_elements = {
'extraclassoptions': 'openany',
}

139
docs/context_expressions.md Normal file
Просмотреть файл

@ -0,0 +1,139 @@
# Context Expressions
## Overview
Quilla enables test writers to dynamically create their tests using context expressions and context objects. This allows test writers to write validations that might require sensitive data that should not be committed to files (such as passwords for logging in), as well as reacting appropriately to potentially dynamic components of their applications. Beyond that, context expressions can be extended through plugins to incorporate more advanced behaviours such as retrieving secrets from a keystore.
Context expressions can be used whenever Quilla expects a non-enumerated value. This means that, while you can control the target of your `Click` action through a context expression, you cannot control the action itself through a context expression. This is because Quilla must be able to ensure at "compile time" (i.e. when the file is first loaded in to Quilla) that all the actions being performed are supported actions.
The syntax for context expressions is `${{ <CONTEXT_OBJECT_NAME>.<PATH_TO_VALUE> }}`. For the `Validation` and `Definitions` context objects, this is represented as a dot-separated path of a dictionary (i.e. to get the value `"ham"` from `{"spam": {"eggs": "ham"}}`, it would be accessed by `${{ Validation.spam.eggs }}`).
At this time, Quilla does not run `eval` on the context expression. This means that, while context expressions allow you to replace the values, you cannot write pure python code inside the context expression.
Below is a table that describes the included context objects.
| Context Object Name | Description | Example |
|:-------------------:|:-----------:|:-------:|
| Definitions | Resolves a name from the passed-in definition files or local definitions | `${{ Definitions.MyService.HomePage.SubmitButton }}` |
| Environment | Accesses environment variables, where `<PATH_TO_VALUE>` matches exactly the name of an environment variable | `${{ Environment.PATH }}` |
| Validation | Accesses previous outputs created by the validation | `${{ Validation.my.output }}` |
## Context Objects
### Definitions
The `Definitions` context object is a way to use an external file to hold any text data that is better stored with additional context. This is particularly useful for maintaining a definitions file for all XPaths used. Since most XPaths contain paths, IDs, and other element identifying information, they are not normally easy to understand at a glance. By using definitions files, each XPath used in a validation file can have a proper name associated with it, making validation files easier to read.
As an additional benefit, definition files allow test writers to quickly adapt to changes in how elements are identified on a page. If, for example, a developer changes the ID of a 'Submit' button, test writers can ensure that all tests that require the use of that submit button are using the new, correct identifier by changing it in just one place.
Below is an example definitions file:
```json
// Definitions.json
{
"HomePage": {
"SearchTextField": "//input[@id='search']",
"SearchSubmitButton": "//button[@id='form_submit']"
}
}
```
We can then use these directly inside of our validation
```json
// HomePageSearchTest.json
{
"targetBrowsers": ["Edge"],
"path": "https://example.com",
"steps": [
{
"action": "SendKeys",
"target": "${{ Definitions.HomePage.SearchTextField }}",
"parameters": {
"data": "puppies"
}
},
{
"action": "Click",
"target": "${{ Definitions.HomePage.SearchSubmitButton }}"
},
{
"action": "Validate",
"type": "URL",
"state": "Contains",
"target": "puppies"
}
]
}
```
When calling quilla, we pass in the definitions file: `quilla -d Definitions.json -f HomePageSearchTest.json`.
If we wanted there to be better legibility but not use the definitions anywhere else, we could also specify them inside the quilla test file itself. We see an example of that below:
```json
// HomePageSearchTest.json
{
"definitions": {
"HomePage": {
"SearchTextField": "//input[@id='search']",
"SearchSubmitButton": "//button[@id='form_submit']"
}
},
"targetBrowsers": ["Edge"],
"path": "https://example.com",
"steps": [
{
"action": "SendKeys",
"target": "${{ Definitions.HomePage.SearchTextField }}",
"parameters": {
"data": "puppies"
}
},
{
"action": "Click",
"target": "${{ Definitions.HomePage.SearchSubmitButton }}"
},
{
"action": "Validate",
"type": "URL",
"state": "Contains",
"target": "puppies"
}
]
}
```
Quilla will accept multiple definition files by adding multiple `-d <FILENAME>` groups to the CLI. It will attempt to resolve any duplicates by performing a deep merge operation between the multiple definition files, as well as any definitions specified in the quilla test file. The load order for the definition files is provided below:
1. First definition file specified to the CLI
1. Second definition file specified to the CLI
1. (...)
1. The last definition file specified to the CLI
1. Local definitions in the Quilla test file
If there are conflicts encountered, Quilla will favour the newer configs (i.e. second definition file overrides first, etc).
Quilla definitions can also be defined recursively. Quilla will attempt to evaluate the context expression iteratively until it has exhausted the context expressions in the string, so definitions do not have a limit to the recursive depth. This is useful when a definition is in part determined by other definitions, and reduces the amount of repetition in the Quilla files.
Below is an example definition file that uses recursive definitions:
```json
// Definitions.json
{
"HomePage": {
"HeaderSection": "//div[@id='header']",
"SignInButton": "${{ Definitions.HomePage.HeaderSection }}/a[@id='login']"
}
}
```
When calling `${{ Definitions.HomePage.SignInButton }}`, Quilla will expand it to `"//div[@id='header']/a[@id='login']"`.
### Environment
The `Environment` context object works as a pass-through to `os.environ.get(<PATH_TO_VALUE>, '')`. This means that Quilla will fail silently if the variable that you attempt to extract from the environment does not exist. Most likely, this will result in other failures of the ui validation. If you use this context object and are getting unexpected errors, check to make sure that the variable is set and that you have not made a typo on the variable name!
### Validation
The `Validation` context object will attempt to access a data store represented by a dictionary. This data store is populated at runtime through the `OutputValue` action, enabling any value that was created through the validations to be used inline.

33
docs/features.md Normal file
Просмотреть файл

@ -0,0 +1,33 @@
# Features
## Declarative JSON UI Testing
Write all your UI test cases in JSON using Quilla's declarative syntax. Focus on *what* your tests do, not *how* you do them!
Check out how to write your validation files [here](validation_files.md).
## Dynamic Validation Files with Context Expressions
Most UI validations have well-defined and easy to reproduce steps that can be defined statically. For all those that need a little extra, context expressions can give you just the edge you need. You'll never have to worry about writing passwords in your validation files with the bundled `Environment` context object, which lets you use environment variables in your UI validations.
Need to use values you produce yourself further on in your testing? With the `Validation` context object, you can consume your outputs seamlessly!
Check out all these and more in the context expressions documentation [here](context_expressions.md)
## Extensive Plugin System
Quilla has an extensive plugin system to allow users to customize their own experience, and to further extend the functionality that it can do! Check out how to write your first `uiconf.py` local plugin file, and how to release new plugins [here](plugins.md), and make sure to check out the documentation for `quilla.hookspecs` for the most up-to-date hooks and how to use them!
## JSON Reporting and Outputs
Interact with Quilla output programmatically, and act on the results of your Quilla tests easily! All Quilla results are returned as a JSON string which you can pass to other applications, display neatly with the `--pretty` flag, or publish as part of your CI/CD.
Did you produce values you will then need later on in your build pipeline? Collect it from the resulting JSON through the `"Outputs"` object!
## Pytest Integration
Quilla also functions as a `pytest` plugin! Just add `use-quilla: True` to your Pytest configuration, and you can immediately add all your quilla tests to your pre-existing testing suite!
Using the `pytest-quilla` plugin (which is installed alongside `quilla`) lets you take advantage of an already-existing and thriving community of testers! Add concurrency to your quilla tests through `pytest-xdist` with the assurance that each of your Quilla tests will be executed with an isolated context, enhance your testing output through one of many visual plugins, and benefit from the familiar `pytest` test discovery and reporting paradigm.
Check out the `pytest-quilla` documentation [here](quilla_pytest.md)!

7
docs/hooks.rst Normal file
Просмотреть файл

@ -0,0 +1,7 @@
Quilla Plugin Hooks
-----------------------
.. automodule:: quilla.hookspecs
:members:
:undoc-members:
:show-inheritance:

53
docs/how_it_works.md Normal file
Просмотреть файл

@ -0,0 +1,53 @@
# How does Quilla work?
Quilla is a wrapper on Selenium to allow for test writers to create their testing scenarios focusing not on how Selenium works, but on how their tests are executed. As such, the goal was to create a testing syntax that focuses on legibility, and uses minimal required setup. For further information on the specifics of how Quilla translates the JSON test files into workable code and runs the validations, read on.
## Code execution flow
When Quilla is called, it will first create and initialize the plugin manager. This is done by first loading all the plugins that are exposed through python entrypoints, then attempting to discover a `uiconf.py` file in the plugin root directory (which is at this time just the calling directory).
Next, Quilla will create the parser and pass it to the `quilla_addopts` hook to allow plugins to register new parser options. If the user has specified that they are passing in a filename, the file will then be read and the contents of the file will be saved as a string. It will then parse the CLI options and use them to create the default context.
The context object is initialized as follows:
1. A snapshot of the `PATH` variable is taken
1. The debug configurations are set
1. The driver path is added to the system `PATH` environment variable
1. The definition files will be loaded and merged
Once the context is initialized, it will be passed to the `quilla_configure` hook to allow plugins to alter the context.
When the configuration is finalized, the contents of the file will be loaded with the default JSON loader from python into a dictionary. This dictionary will then be processed as follows:
1. If the quilla test file has a 'definitions' key, it will be loaded and merged with the existing definitions
1. All specified browser names will be resolved into a `BrowserTargets` enum.
1. Each step will be processed as such:
1. The action name for the step will be resolved into a `UITestActions` enum
1. If the action is a `Validate` action, the type will be resolved into a `ValidationTypes` enum
1. Based on the `ValidationTypes`, the appropriate `ValidationStates` subclass will be selected and the state will be resolved
1. If there are parameters specified, they will be checked:
1. If the "source" parameter is specified, it will be resolved into a `OutputSources` enum
1. The `UIValidation` object is then created with the fully-resolved dictionary
1. A `StepsAggregator` instance is created to manage the creation of all proper step objects
1. Each step in the list of step dictionaries will be resolved into the appropriate type, either a `TestStep` or something that can be resolved by the `Validation` factory class
1. For each browser specified in the JSON file, a copy of the `StepsAggregator` object will be created and passed into a new `BrowserValidation` object
After the `UIValidation` object is created, it is passed to the `quilla_prevalidate` plugin hook. This hook is able to mutate the object however it sees fit, allowing end-users to manipulate steps dynamically.
When the `UIValidation` object is finalized, it will then call `validate_all()` to execute all browser validations sequentially. The order in which the browsers will be validated is the same order in which they were specified. When calling the `validate_all` function, the following will occur for each browser target:
1. An appropriate driver will be created and configured according to the runtime context, opening up a blank page
1. The driver will navigate to the root path of the validation
1. The `BrowserValidation` object will bind the current driver to itself, to each of its steps (through the `StepsAggregator`), and to the runtime context
1. Each step will be executed in order, performing the following:
1. The action function is selected. For a `TestStep`, it is resolved based on the action associated to its `UITestActions` value through a dictionary selector. For a `Validation`, this is determined based on the `ValidationStates` subclass value (i.e. the `XPathValidationStates`, etc)
1. If the action produces a report, add it to the list of resulting reports
1. If the action produces an exception, a `StepFailureReport` will be generated and the rest of the steps will *not* be executed. This is substantially different from the `Validation` behaviour: the `Validate` action only describes the state of the page, so it does not necessarily mean that the steps following it are not able to be performed. Since almost every other action actually causes the state of the page to change, allowing the test to continue would mean allowing the execution of the next steps in an inconsistent state.
1. Once all steps have been executed, or an uncaught exception happens on the `StepsAggregator` (which will happen if the `suppress_exceptions` flag is set to `False` in the context object), the `BrowserValidation` will close the browser window, unbind the driver from itself, the steps, and the context.
1. If no exception was raised, the list of report objects will be returned
After each browser finishes executing, the returned reports are all aggregated and put into a `ReportSummary`, which is ultimately returned.
Once the final `ReportSummary` has been generated, it is passed (along with the runtime context) to the `quilla_postvalidate` hook.
Finally, the entire `ReportSummary` is converted into JSON alongside any outputs created by the test actions, which are then printed to the standard output. If the `ReportSummary` contains any failures, or critical failures, it will then return the exit code of 1, otherwise it will return an exit code of 0.

25
docs/index.rst Normal file
Просмотреть файл

@ -0,0 +1,25 @@
.. Quilla documentation master file, created by
sphinx-quickstart on Tue May 25 14:32:45 2021.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to Quilla's documentation!
========================================
.. toctree::
:maxdepth: 3
:caption: Contents:
intro
preface
usage
install
validation_files
context_expressions
plugins
hooks
quilla_pytest
how_it_works
source/modules
README
makefile_vars

23
docs/install.md Normal file
Просмотреть файл

@ -0,0 +1,23 @@
# Installation
## Recommended Python Version
Quilla was designed to work with Python 3.8+, and currently includes syntax that will cause errors in Python 3.7. Make sure you are using the correct version of python
## Installing Quilla
<!-- TODO: Update docs if it gets deployed to PyPI to reflect easy installation with `pip` -->
> Note: A virtual environment is recommended. Python ships with `venv`, though other alternatives such as using `conda` or `virtualenvwrapper` are just as useful
Download (or clone) the repository, and run `make install` to install Quilla. This will also expose the Quilla plugin to Pytest (if installed), though it will remain disabled until your pytest configuration file has the `use-quilla` option set to `True`. This does not cause any issues if Pytest is not installed, however, since it only registers the entrypoint.
## Packaging the Python Code
Packaging Quilla requires the `setuptools`, `pip`, and `wheel` packages. These should come default with every installation of Python, but if something seems to be going wrong make sure to check if they are installed.
The preferred method of packaging Quilla is by using the `make` commands that come bundled with the project. Use `make package` to create (by default) the source and wheel distributions of Quilla, and use `make package-deps` to create a Quilla `.whl` file and all the `.whl` files required by Quilla as dependencies.
## Customizing install & build
The `Makefile` provided with Quilla uses several environment variables to configure its install and build process. All the available options are defined in the [makefile vars](makefile_vars.md) docs, including explanations on how they are used.

19
docs/intro.md Normal file
Просмотреть файл

@ -0,0 +1,19 @@
<!--
THIS FILE CONTAINS THE INTRO CONTENTS
FROM THE `quilla/README.md` FILE SO IT WILL
BE DISPLAYED IN THE GENERATED DOCUMENTATION.
DO NOT EDIT THIS DIRECTLY, AND IF YOU EDIT
THE README FILE'S INTRO, MAKE SURE YOU COPY+PASTE
IT TO THIS FILE.
MAKE SURE ANY LOCAL LNKS IN THIS PAGE USE THE CORRECT
LOCATION
-->
# Quilla
## Declarative UI Testing with JSON
Quilla is a framework that allows test-writers to perform UI testing using declarative syntax through JSON files. This enables test writers, owners, and maintainers to focus not on how to use code libraries, but on what steps a user would have to take to perform the actions being tested. In turn, this allows for more agile test writing and easier-to-understand test cases.
Quilla was built to be run in CI/CD, in containers, and locally. It also comes with an optional integration with [pytest](https://pytest.org), so you can write your Quilla test cases as part of your regular testing environment for python-based projects. Check out the [quilla-pytest](quilla_pytest.md) docs for more information on how to configure `pytest` to auto-discover Quilla files, adding markers, and more.

35
docs/make.bat Normal file
Просмотреть файл

@ -0,0 +1,35 @@
@ECHO OFF
pushd %~dp0
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set SOURCEDIR=.
set BUILDDIR=_build
if "%1" == "" goto help
%SPHINXBUILD% >NUL 2>NUL
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.http://sphinx-doc.org/
exit /b 1
)
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
goto end
:help
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
:end
popd

50
docs/makefile_vars.md Normal file
Просмотреть файл

@ -0,0 +1,50 @@
# Makefile Variables
The following are all the makefile variables used
| Variable Name | Use | Defaults |
|---------------|-----|----------|
| `PYTHON_EXECUTABLE` | The python version to use for any targets that use python commands. If you are using virtual environments, this will auto-detect it, and only should be set if you have a specific need to change the executable used for the make commands | `python3` |
| `CLEAN_CMDS` | The `make` targets to be executed when running `make clean` | `clean-python clean-docs clean-build` |
| `DOC_TARGETS` | The `make` targets to build docs for. For more info on possible targets, run `make help` in the `quilla/docs` directory | `html man` |
| `DOCS_BUILD_DIR` | The directory to output the docs artifacts in. This will output in `quilla/docs/$(DOCS_BUILD_DIR)` | `_build`
| `DIST_DIR` | The directory in which distribution artifacts (`.whl` files, source distributions, etc) will be output to | `dist` |
| `SDIST` | The configurations for the source distribution target | `sdist --dist-dir $(DIST_DIR) $(SDIST_OPTS)` |
| `SDIST_OPTS` | Additional options for `SDIST` | |
| `BDIST_WHEEL` | The configurations for the wheel binary distribution target | `bdist_wheel --dist-dir $(DIST_DIR) $(BDIST_OPTS)` |
| `BDIST_OPTS` | Additional options for `BDIST_WHEEL` | |
| `PACKAGE_TARGETS` | The distribution targets for the `make package` command | `$(SDIST) $(BDIST_WHEEL)` |
| `PACKAGE_OPTS` | Additional options for the `make package` and `make package-deps` targets | |
## Examples
### Changing the distribution directory
```bash
# Using options
$ make package DIST_DIR="_dist"
```
```bash
# Using environment variables
$ DIST_DIR="_dist" make package
```
### Only build wheel packages
```bash
# Using environment variables
$ SDIST="" make package
```
```bash
# Using options
$ make package SDIST=""
```
### Build html, man, latexpdf, and epub docs targets
```bash
# Using environment variables
$ DOC_TARGETS="html man latexpdf epub" make docs
```

218
docs/plugins.md Normal file
Просмотреть файл

@ -0,0 +1,218 @@
# Plugins
The Quilla framework supports the use of both installed and local plugins. This is done to allow for
maximum extensibility, while still only allowing for controlled access.
## Why Plugins
First and foremost, plugins allow the community to extend the behaviours of Quilla as well as for
individual users to be able to have more granular configuration control over the module.
Secondly, plugins allow for individual users of this project to decouple platform- and use-specific logic
from the codebase of the entire project. For example, a user could use a specific secret store that
they would like to retrieve data from and inject into their validations dynamically. Instead of having
to find a way of doing so externally, they could write a plugin to add a 'Secrets' context object, connecting
their own code to the right secret store without having to expose it.
More examples of what can be done with plugins are found in the sections below.
## Plugin discovery
Quilla discovers the plugins by searching the installed modules for 'QuillaPlugins' entrypoints,
and by searching a local file (specifically, the `uiconf.py` file in the calling directory). The discovery
process is done by searching for the predefined hook functions (as found in the `hookspec` module). If
your module, or your `uiconf.py` file, provide a function with a hook name it will be automatically loaded
and used at the appropriate times. See the `hookspec` documentation to see exactly which plugins are currently
supported.
## Local Plugin Example - Configuration
Consider the example of a programmer that does not want to enable all of the debugging configurations, but does not
want the browser to run in headless mode. Without plugins, they would have to download the repository, make
changes to the code, install it, make changes to the code, and then run.
This is far too cumbersome for such a small change, but with plugins we can do it in just two lines!
1. In the directory that includes your validations, add a `uiconf.py` file
2. Add the following lines to `uiconf.py`:
```python
def quilla_configure(ctx):
ctx.run_headless = False
```
And you are all done! No further steps are required, you can just run the `quilla` CLI from that directory
and the configurations will be seamlessly picked up.
## Local Plugin Example - Adding CLI Arguments
Using the plugin system one can also seamlessly add (and act on) different CLI arguments which can be used later
on by your plugin, or maybe even by someone else's plugin!
As an example, consider the example from above of the programmer that wants to run the browser outside of headless
mode. Perhaps they wish to keep the default behaviour as running without headless mode, but they don't want to change
code whenever they swap between modes. They would need to add a new CLI argument, and consume it for their configuration!
1. In the validations directory, add a `uiconf.py` file
2. Add the following lines to `uiconf.py`:
```python
def quilla_addopts(parser):
parser.add_argument(
'-H',
'--headless',
action='store_true',
help='Run the browsers in headless mode'
)
def quilla_configure(ctx, args):
ctx.run_headless = args.headless
```
Now, whenever they run `quilla` it will run with the browsers not in headless mode,
if they run `quilla --headless` it will run with the browsers in headless mode, and if they run
`quilla --help`, they should see their CLI argument:
```text
usage: quilla [-h] [-f] [-d] [--driver-dir DRIVERS_PATH] [-P] [-H] json
Program to provide a report of UI validations given a json representation of the validations or given the filename
containing a json document describing the validations
positional arguments:
json The json file name or raw json string
optional arguments:
-h, --help show this help message and exit
-f, --file Whether to treat the argument as raw json or as a file
-d, --debug Enable debug mode
--driver-dir DRIVERS_PATH
The directory where browser drivers are stored
-P, --pretty Set this flag to have the output be pretty-printed
-H, --headless Run the browsers in headless mode
```
## Installed Plugin Example - Packaging
As a final example with our previous programmer, suppose they now want to publish this new package
to be used by others. They set up their package as follows:
```text
quilla-headless
+-- headless
| +-- __init__.py
| +-- cli_configs.py
+-- setup.py
```
```python
# Inside headless/cli_configs.py
def quilla_addopts(parser):
parser.add_argument(
'-H',
'--headless',
action='store_true',
help='Run the browsers in headless mode'
)
def quilla_configure(ctx, args):
ctx.run_headless = args.headless
```
```python
# Inside setup.py
from setuptools import setup, find_packages
setup(
name='quilla-headless',
version='0.1',
packages=find_packages(),
entry_points={
'quillaPlugins': ['quilla-headless = headless.cli_configs']
}
)
```
And they run `pip install .` in the `quilla-headless` directory. Their new CLI option and plugin will
now be globally available to the programmer, no matter where they call it from. Anyone who installs their package
will also be able to use this new CLI option!
## Local Plugin Example - Context expressions
The Quilla framework includes the ability to use the `Validation` and `Environment` context objects
(discussed in the [context expression documentation](context_expressions.md)) out-of-the-box, which allows
validations to produce (and use) outputs, and use environment variables. However, it is also possible to add new context
objects through the use of plugins. In this example, we'll be creating a local plugin that will add the `Driver` context
object, which will give us some basic information on the state of the current driver.
1. Create a `uiconf.py` file in the directory
2. Add the following to the `uiconf.py` file:
```python
def quilla_context_obj(ctx, root, path):
# We only handle 'Driver' context objects, so return None
# to allow other plugins to handle the object
if root != 'Driver':
return
# We only support 'name' and 'title' so any path of length
# longer than one cannot resolve with this plugin, but another
# plugin could support it so we'll still return None here
if len(path) > 1:
return
# Now we handle the actual options that we support
opt = path[0]
if opt == 'name':
return ctx.driver.name
if opt == 'title':
return ctx.driver.title
```
3. Create the `Validation.json` file in the same directory
4. Add the following to the `Validation.json` file:
```json
{
"targetBrowsers": ["Firefox"],
"path": "https://bing.ca",
"steps": [
{
"action": "OutputValue",
"target": "${{ Driver.name }}",
"parameters": {
"source": "Literal",
"outputName": "browserName"
}
},
{
"action": "OutputValue",
"target": "${{ Driver.title }}",
"parameters": {
"source": "Literal",
"outputName": "siteTitle"
}
}
]
}
```
5. Run `UIValidation -f Validation.json --pretty`. You should receive the following output:
```json
{
"Outputs": {
"browserName": "firefox",
"siteTitle": "Bing"
},
"reportSummary": {
"critical_failures": 0,
"failures": 0,
"reports": [],
"successes": 0,
"total_reports": 0
}
}
```

13
docs/preface.md Normal file
Просмотреть файл

@ -0,0 +1,13 @@
# Preface
This document has been written to be used by a multitude of individuals who interact with Quilla: Test writers, web developers, plugin creators, and code maintainers. As such, not every part of this document is important to every user.
For all who are unfamiliar with the concept of XPaths, make sure to brush up on what they are. The W3C Schools website has a good section on them [here](https://www.w3schools.com/XML/xml_xpath.asp)
If you are a test writer looking for basic information on how to write a test, start by reading the section on [writing validation files](validation_files.md). This should give you the basics of how to write Quilla tests, including a couple of examples. Then, should you want to extend your tests using non-static information, such as getting data from the environment variables or from a definition file, read the section on [context expressions and context objects](context_expressions.md). Should you decide you need more extensible configuration or you need to get data from custom sources for your validations (i.e. accessing a secret store), checkout the section on [writing plugins](plugins.md). Finally, if you are seeking to integrate Quilla tests with Pytest, read the section on [the pytest-quilla integration](quilla_pytest.md) which will cover how to enable running Quilla as part of pytest.
If you are a web developer looking to maintain a definition file (or multiple definition files) for a QA team to write tests with, check out the section on [context expressions and context objects](context_expressions.md), specifically the subsection on the `Definitions` context object.
If you are a plugin creator, make sure you're familiar with [context expressions and context objects](context_expressions.md) first, since they are a key aspect of exposing new functionality for test writers. Then, check out the section on [how to write plugins](plugins.md) to get an understanding of how plugins work in Quilla and how to publish one so that Quilla will auto-detect it. Finally, use the [hooks](hooks.rst) reference to see what plugin hooks are available to you, when they are called, and what data is exposed to you through them.
If you are a code maintainer or are looking to make a contribution to the code, first make sure you have a good understanding of [how quilla works](how_it_works.md), how to [write validation files](validation_files.md) and how to [use context objects](context_expressions.md) so that you understand the general structure of the data that Quilla processes. Then, read up on how [plugins work](plugins.md) and which [plugin hooks are exposed](hooks.rst). For information regarding the packaging & installation process, read the section in the [installation instructions](install.md) on how to package. For information on the documentation style, dependencies, and how to build the docs, check out the [documentation section](README.md). Finally, for information on how to customize the make commands using various environment variables, read the [makefile variable documentation](makefile_vars.md)

3
docs/quilla_pytest.md Normal file
Просмотреть файл

@ -0,0 +1,3 @@
# Running Quilla tests with Pytest
(TODO)

8
docs/source/modules.rst Normal file
Просмотреть файл

@ -0,0 +1,8 @@
API Reference
======================
.. toctree::
:maxdepth: 4
quilla
pytest_quilla

Просмотреть файл

@ -0,0 +1,18 @@
pytest\_quilla package
======================
.. automodule:: pytest_quilla
:members:
:undoc-members:
:show-inheritance:
Submodules
----------
pytest\_quilla.pytest\_classes module
-------------------------------------
.. automodule:: pytest_quilla.pytest_classes
:members:
:undoc-members:
:show-inheritance:

Просмотреть файл

@ -0,0 +1,26 @@
quilla.browser package
======================
.. automodule:: quilla.browser
:members:
:undoc-members:
:show-inheritance:
Submodules
----------
quilla.browser.browser\_validations module
------------------------------------------
.. automodule:: quilla.browser.browser_validations
:members:
:undoc-members:
:show-inheritance:
quilla.browser.drivers module
-----------------------------
.. automodule:: quilla.browser.drivers
:members:
:undoc-members:
:show-inheritance:

Просмотреть файл

@ -0,0 +1,34 @@
quilla.common package
=====================
.. automodule:: quilla.common
:members:
:undoc-members:
:show-inheritance:
Submodules
----------
quilla.common.enums module
--------------------------
.. automodule:: quilla.common.enums
:members:
:undoc-members:
:show-inheritance:
quilla.common.exceptions module
-------------------------------
.. automodule:: quilla.common.exceptions
:members:
:undoc-members:
:show-inheritance:
quilla.common.utils module
--------------------------
.. automodule:: quilla.common.utils
:members:
:undoc-members:
:show-inheritance:

Просмотреть файл

@ -0,0 +1,42 @@
quilla.reports package
======================
.. automodule:: quilla.reports
:members:
:undoc-members:
:show-inheritance:
Submodules
----------
quilla.reports.base\_report module
----------------------------------
.. automodule:: quilla.reports.base_report
:members:
:undoc-members:
:show-inheritance:
quilla.reports.report\_summary module
-------------------------------------
.. automodule:: quilla.reports.report_summary
:members:
:undoc-members:
:show-inheritance:
quilla.reports.step\_failure\_report module
-------------------------------------------
.. automodule:: quilla.reports.step_failure_report
:members:
:undoc-members:
:show-inheritance:
quilla.reports.validation\_report module
----------------------------------------
.. automodule:: quilla.reports.validation_report
:members:
:undoc-members:
:show-inheritance:

53
docs/source/quilla.rst Normal file
Просмотреть файл

@ -0,0 +1,53 @@
quilla package
==============
.. automodule:: quilla
:members:
:undoc-members:
:show-inheritance:
Submodules
----------
quilla.ctx module
-----------------
.. automodule:: quilla.ctx
:members:
:undoc-members:
:show-inheritance:
quilla.ui\_validation module
----------------------------
.. automodule:: quilla.ui_validation
:members:
:undoc-members:
:show-inheritance:
quilla.plugins module
---------------------
.. automodule:: quilla.plugins
:members:
:undoc-members:
:show-inheritance:
quilla.hookspecs module
-----------------------
.. automodule:: quilla.hookspecs
:members:
:undoc-members:
:show-inheritance:
Subpackages
-----------
.. toctree::
:maxdepth: 4
quilla.browser
quilla.common
quilla.reports
quilla.steps

Просмотреть файл

@ -0,0 +1,34 @@
quilla.steps package
====================
.. automodule:: quilla.steps
:members:
:undoc-members:
:show-inheritance:
Submodules
----------
quilla.steps.base\_steps module
-------------------------------
.. automodule:: quilla.steps.base_steps
:members:
:undoc-members:
:show-inheritance:
quilla.steps.steps module
-------------------------
.. automodule:: quilla.steps.steps
:members:
:undoc-members:
:show-inheritance:
quilla.steps.validations module
-------------------------------
.. automodule:: quilla.steps.validations
:members:
:undoc-members:
:show-inheritance:

7
docs/usage.rst Normal file
Просмотреть файл

@ -0,0 +1,7 @@
Command-Line Usage
=====================
.. sphinx_argparse_cli::
:module: quilla
:func: make_parser

276
docs/validation_files.md Normal file
Просмотреть файл

@ -0,0 +1,276 @@
# Validation Files
Quilla operates through validation files, which are `JSON` files that define the environment and process for performing a validation in a declarative way. Each validation file requires the following:
- One or more target browsers (Firefox, Chrome, Edge)
- A starting URL path
- A list of steps
Each step can be either a setup step, or a validation step. Setup steps do not ordinarily provide any reports if they are successful, and Quilla will abort the test if any steps cause an error, as it will assume that the error is not recoverable. In this case, a `StepFailureReport` will be produced, and the `ReportSummary` will indicate that there is a critical failure.
Validation steps will produce a `ValidationReport`, which can result in either a success or a failure. Although the hope is that every test you write will result in a success, Quilla will not abort if a validation fails, and will continue the test until it is finished or otherwise aborted. However, if in attempting to perform the validation some unrecoverable error occurs, Quilla will instead produce a `StepFailureReport` with the exception that occurred and abort the execution.
All Quilla integration tests are written as quilla tests and therefore can be referenced as examples when writing new Quilla tests.
## Supported actions
The table below summarizes all the supported actions, what they do, and what they require. The `Validate` and `OutputValue` actions are omitted from this table and are shown in later sections.
| Action | Description | Target | Parameters |
|--------|-------------|--------|------------|
| `Refresh` | Refreshes the current page | None | None |
| `NavigateBack` | Navigates back to the last page | None | None |
| `NavigateForward` | Navigates forward to the next page | None | None |
| `NavigateTo` | Navigates to the target URL | `URL` | None |
| `Click` | Clicks an element on the page | `XPath` | None |
| `Clear` | Clears the text from an element on the page | `XPath` | None |
| `Hover` | Hovers the cursor over the target element on the page | `XPath` | None |
| `SendKeys` | Types the specified keys onto the target element on the page | `XPath` | `data` |
| `WaitForExistence` | Waits until a target element exists on the page | `XPath` | `timeoutInSeconds` |
| `WaitForVisibility` | Waits until a target element is visible on the page | `XPath` | `timeoutInSeconds` |
| `SetBrowserSize` | Sets the browser size to a specific width & height | None | `width`, `height` |
## Output Values
Quilla is able to create 'outputs', which allows users to use values from the page within their own validations. This can come in handy when you need to react to the data on the page. A classic example is an application that displays some text that it requires the user to then type into a text field: instead of hardcoding the value for the UI tests (which may be impossible if the values are dynamically generated), a Quilla test could instead read the value from the XPath and use it in the same validation.
Below is a table displaying the supported output sources, the targets they support, and the parameters they require. Every single output requires an `outputName` parameter.
| Source | Description | Target type | Parameters |
|--------|-------------|-------------|------------|
| `Literal` | Creates an output from the literal value of the target | `string` | None |
| `XPathText` | Creates an output from the inner text value of the target | `XPath` | None |
| `XPathProperty` | Creates an output from the value of the specified property name of the target | `XPath` | `parameterName` |
## Validations
Each validation is performed when the `"action"` is set to `Validate`. The kind of validation is provided by the `"type"` key, and by default Quilla currently supports the `URL` and `XPath` types.
Each validation type supports a series of states provided by the `"state"` key, and requires some form of target to specify what is being validated.
### XPath Validation
Each `XPath` validation requires a `"target"` key with an `XPath` that describes an element on the page. The state of this `XPath` will then be validated, according to the valid states.
Some `XPath` validations also require parameters, such as the Attribute and Parameter-based validations of web elements.
The table below describes what the supported states are, and what they are validating
| State | Description | Parameters |
|-------|-------------|------------|
| `Exists` | The specified target exists on the page | None |
| `NotExists` | The specified target does *not* exist on the page | None |
| `Visible` | The specified target is visible on the page | None |
| `NotVisible` | The specified target is not visible on the page | None |
| `TextMatches` | Validates that the element text matches a regular expression pattern | `pattern` |
| `NotTextMatches` | Validates that the element text does not match a regular expression pattern | `pattern` |
| `HasProperty` | Ensures that the element has a property with a matching name | `name` |
| `NotHasProperty` | Ensures that the element does not have a property with a matching name | `name` |
| `HasAttribute` | Ensures that the element has an attribute with a matching name | `name` |
| `NotHasAttribute` | Ensures that the element does not have an attribute with a matching name | `name` |
| `PropertyHasValue` | Ensures that the property has a value matching the one specified | `name`, `value` |
| `NotPropertyHasValue` | Ensures that the property does not have a value matching the one specified | `name`, `value` |
| `AttributeHasValue` | Ensures that the attribute has a value matching the one specified | `name`, `value` |
| `AttributeHasValue` | Ensures that the attribute does not have a value matching the one specified | `name`, `value` |
### URL Validation
Each `URL` validation requires a `"target"` key with a string. The precise target will be defined by the `"state"` property.
| State | Description |
|-------|-------------|
| `Equals` | The specified target is exactly equal to the URL of the page at the time the validation runs |
| `NotEquals` | The specified target is not equal to the URL of the page at the time the validation runs |
| `Contains` | The specified target is a substring of the URL of the page at the time the validation runs |
| `NotContains` | The specified target is not a substring of the URL of the page at the time the validation runs |
## Examples
### Searching Bing for puppies
Example written in 2021-06-16
```json
{
"definitions": {
"HomePage": {
"SearchTextBox": "//input[@id='sb_form_q']",
"SearchButton": "//label[@for='sb_form_go']",
},
"ResultsPage": {
"MainInfoCard": "//div[@class='lite-entcard-main']"
}
},
"targetBrowsers": ["Firefox"],
"path": "https://www.bing.com",
"steps": [
{
"action": "Validate",
"type": "URL",
"state": "Contains",
"target": "bing"
},
{
"action": "Validate",
"type": "XPath",
"state": "Exists",
"target": "${{ Definitions.HomePage.SearchTextBox }}"
},
{
"action": "Validate",
"type": "XPath",
"state": "Exists",
"target": "${{ Definitions.HomePage.SearchButton }}"
},
{
"action": "SendKeys",
"target": "${{ Definitions.HomePage.SearchTextBox }}",
"parameters": {
"data": "Puppies"
}
},
{
"action": "Click",
"target": "${{ Definitions.HomePage.SearchButton }}"
},
{
"action": "WaitForExistence",
"target": "${{ Definitions.ResultsPage.MainInfoCard }}",
"parameters": {
"timeoutInSeconds": 10
}
},
{
"action": "Validate",
"type": "URL",
"state": "Contains",
"target": "search?q=Puppies"
}
]
}
```
## Signing In to Github
Example written on 2021-06-24
```json
{
"definitions": {
"Username": "validation-example-user",
"Password": "${{ Environment.GITHUB_EXAMPLE_USER_PASSWORD }}",
"WelcomePage": {
"SignInButton": "//div[@class='position-relative mr-3']/a"
},
"SignInPage": {
"UsernameInputField": "//input[@id='login_field']",
"PasswordInputField": "//input[@id='password']",
"SubmitButton": "//input[@class='btn btn-primary btn-block']"
},
"HomePage": {
"UserMenuIcon": "//div[@class='Header-item position-relative mr-0 d-none d-md-flex']",
"YourProfileDropdown": "//details-menu/a[@href='/${{ Definitions.Username }}']"
}
},
"targetBrowsers": ["Firefox"],
"path": "https://github.com",
"steps": [
{
"action": "Validate",
"type": "URL",
"state": "Contains",
"target": "github"
},
{
"action": "Validate",
"type": "XPath",
"state": "Exists",
"target": "${{ Definitions.WelcomePage.SignInButton }}"
},
{
"action": "Validate",
"type": "XPath",
"target": "${{ Definitions.WelcomePage.SignInButton }}",
"state": "NotTextMatches",
"parameters": {
"pattern": "[Ss]ign[ -]?[Ii]n"
}
},
{
"action": "Click",
"target": "${{ Definitions.WelcomePage.SignInButton }}"
},
{
"action": "Validate",
"type": "URL",
"state": "Contains",
"target": "/login"
},
{
"action": "Clear",
"target": "${{ Definitions.SignInPage.UsernameInputField }}"
},
{
"action": "Clear",
"target": "${{ Definitions.SignInPage.PasswordInputField }}"
},
{
"action": "SendKeys",
"target":"${{ Definitions.SignInPage.UsernameInputField }}",
"parameters": {
"data": "${{ Definitions.Username }}"
}
},
{
"action": "SendKeys",
"target": "${{ Definitions.SignInPage.PasswordInputField }}",
"parameters": {
"data": "${{ Definitions.Password }}"
}
},
{
"action": "Validate",
"type": "XPath",
"target": "${{ Definitions.SignInPage.PasswordInputField }}",
"state": "HasAttribute",
"parameters": {
"name": "id"
}
},
{
"action": "Validate",
"type": "XPath",
"target": "${{ Definitions.SignInPage.PasswordInputField }}",
"state": "AttributeHasValue",
"parameters": {
"name": "id",
"value": "password"
}
},
{
"action": "Click",
"target": "${{ Definitions.SignInPage.SubmitButton }}"
},
{
"action": "Validate",
"type": "URL",
"state": "Equals",
"target": "https://github.com/"
},
{
"action": "Click",
"target": "${{ Definitions.HomePage.UserMenuIcon }}"
},
{
"action": "Click",
"target": "${{ Definitions.HomePage.YourProfileDropdown }}"
},
{
"action": "Validate",
"type": "URL",
"state": "Contains",
"target": "${{ Definitions.Username }}"
}
]
}
```

5
requirements.txt Normal file
Просмотреть файл

@ -0,0 +1,5 @@
msedge-selenium-tools==3.141.3
pluggy==0.13.1
pydeepmerge==0.3.2
selenium==3.141.0
urllib3==1.26.6

24
setup.cfg Normal file
Просмотреть файл

@ -0,0 +1,24 @@
[flake8]
exclude = .git,__pycache__,dist,build,debian,*.egg,*.egg-info,*.venv,*.archive
max-line-length=100
filename = *.py
max-complexity = 10
[mypy]
files = src/**/*.py
[tool:pytest]
markers =
unit: Marks a unit test
cli: Marks a CLI test
smoke: An essential test indicating the health of the system
ctx: Marks a context test
util: Marks an util test
browser: Marks a browser test
firefox: Marks a firefox-specific test
slow: Marks a slow test. Only executes if --run-slow is passed
quilla: Marks tests written to be executed with Quilla
integration: Marks an integration test.
testpaths = tests
addopts = --cov=src --cov-report term-missing -p no:quilla
python_classes = *Tests

92
setup.py Normal file
Просмотреть файл

@ -0,0 +1,92 @@
from setuptools import setup, find_packages
from itertools import chain
with open('VERSION') as f:
version = f.read().strip()
with open('README.md') as f:
long_description = f.read()
extra_dependencies = {
'tests': [
'flake8',
'mypy',
'pytest',
'pytest-cov',
'pytest-sugar', # Added for better outputs
'pytest-xdist', # Parallelize the tests
],
'docs': [
'sphinx',
'sphinx-rtd-theme',
'sphinx_autodoc_typehints',
'myst_parser',
'sphinx_argparse_cli',
],
'pytest': [ # For the plugin
'pytest'
],
'dev': [
'pre-commit'
],
'release': [
'wheel',
'twine',
'gitchangelog'
]
}
extra_dependencies['all'] = list(
chain(dependencies for _, dependencies in extra_dependencies.items())
)
setup(
name='quilla',
version=version,
description="Declarative UI testing with JSON",
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/microsoft/quilla',
python_requires='>=3.8',
packages=find_packages('src'),
package_dir={'': 'src'},
package_data={
'quilla': ['py.typed']
},
include_package_data=True,
install_requires=[
'selenium',
'pluggy',
'msedge-selenium-tools',
'pydeepmerge'
],
tests_require=extra_dependencies['tests'],
extras_require=extra_dependencies,
entry_points={
'console_scripts': ['quilla = quilla:run'],
'pytest11': [
'quilla = pytest_quilla'
]
},
project_urls={
'Issues': 'https://github.com/microsoft/quilla/issues',
'Discussions': 'https://github.com/microsoft/quilla/discussions'
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Framework :: Pytest',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing',
'Typing :: Typed',
]
)

Просмотреть файл

@ -0,0 +1,40 @@
import pytest
from _pytest.config import Config
from _pytest.config.argparsing import Parser
from pytest_quilla.pytest_classes import collect_file
def pytest_addoption(parser: Parser):
'''
Adds quilla INI option for enabling
'''
parser.addini(
'use-quilla',
'Enables or disables the use of Quilla for pytest',
type='bool',
default=False
)
parser.addini(
'quilla-prefix',
'Prefix that JSON files should have to be considered quilla test files',
type='string',
default='quilla'
)
def pytest_load_initial_conftests(early_config: Config, parser: Parser):
if not early_config.getini('use-quilla'):
early_config.pluginmanager.set_blocked('quilla')
return
parser.addoption(
"--quilla-opts",
action="store",
default="",
help="Options to be passed through to the quilla runtime for the scenario tests"
)
def pytest_collect_file(parent: pytest.Session, path):
return collect_file(parent, path, parent.config.getini('quilla-prefix'))

Просмотреть файл

@ -0,0 +1,85 @@
import json
from pathlib import Path
import pytest
from py._path.local import LocalPath
from quilla import (
setup_context
)
from quilla.ui_validation import UIValidation
from quilla.reports.report_summary import ReportSummary
def collect_file(parent: pytest.Session, path: LocalPath, prefix: str):
'''
Collects files if their path ends with .json and starts with the prefix
Args:
parent: The session object performing the collection
path: The path to the file that might be collected
prefix: The prefix for files that should be collected
Returns:
A quilla file object if the path matches, None otherwise
'''
# TODO: change "path" to be "fspath" when pytest 6.3 is released:
# https://docs.pytest.org/en/latest/_modules/_pytest/hookspec.html#pytest_collect_file
if path.ext == ".json" and path.basename.startswith(prefix):
return QuillaFile.from_parent(parent, fspath=path)
class QuillaFile(pytest.File):
def collect(self):
'''
Loads the JSON test data from the path and creates the test instance
Yields:
A quilla item configured from the JSON data
'''
test_data = json.load(self.fspath.open())
yield QuillaItem.from_parent(self, name=self.fspath.purebasename, test_data=test_data)
class QuillaItem(pytest.Item):
def __init__(self, name: str, parent: QuillaFile, test_data: dict):
super(QuillaItem, self).__init__(name, parent)
self.test_data = test_data
markers = test_data.get('markers', [])
for marker in markers:
self.add_marker(marker)
def runtest(self):
'''
Runs the quilla test by creating an isolated context and executing the test
data retrieved from the JSON file.
'''
ctx = setup_context([*self.config.getoption('--quilla-opts'), ''], str(self.config.rootpath))
results = UIValidation.from_dict(ctx, self.test_data).validate_all()
self.results = results
try:
assert results.fails == 0
assert results.critical_failures == 0
except AssertionError:
raise QuillaJsonException(results)
def repr_failure(self, excinfo):
"""Called when self.runtest() raises an exception."""
if isinstance(excinfo.value, QuillaJsonException):
results: ReportSummary = excinfo.value.args[0]
return json.dumps(
results.to_dict(),
indent=4,
sort_keys=True
)
super().repr_failure(excinfo=excinfo)
def reportinfo(self):
return self.fspath, 0, 'failed test: %s' % self.name
class QuillaJsonException(Exception):
'''
Custom exception for when Quilla files fail
'''

173
src/quilla/__init__.py Normal file
Просмотреть файл

@ -0,0 +1,173 @@
'''
Entrypoint code for the Quilla module. Deals with setting up the parser and
the runtime context for the application, then executing the rest of the application
'''
import argparse
import sys
import json
from typing import List
from quilla.ui_validation import UIValidation
from quilla.ctx import (
Context,
get_default_context
)
from quilla.plugins import get_plugin_manager
def make_parser() -> argparse.ArgumentParser: # pragma: no cover
'''
Creates the required parser to run UIValidation from the command line
Returns:
A pre-configured ArgParser instance
'''
parser = argparse.ArgumentParser(
prog='quilla',
description='''
Program to provide a report of UI validations given a json representation
of the validations or given the filename containing a json document describing
the validations
'''
)
parser.add_argument(
'-f',
'--file',
dest='is_file',
action='store_true',
help='Whether to treat the argument as raw json or as a file',
)
parser.add_argument(
'json',
help='The json file name or raw json string',
)
parser.add_argument(
'--debug',
action='store_true',
help="Enable debug mode",
)
parser.add_argument(
'--driver-dir',
dest="drivers_path",
action='store',
default='.',
help='The directory where browser drivers are stored',
)
parser.add_argument(
'-P',
'--pretty',
action='store_true',
help='Set this flag to have the output be pretty-printed'
)
parser.add_argument(
'--no-sandbox',
dest='no_sandbox',
action='store_true',
help='Adds \'--no-sandbox\' to the Chrome and Edge browsers. Useful for running in docker containers'
)
parser.add_argument(
'-d',
'--definitions',
action='append',
metavar='file',
help='A file with definitions for the \'Definitions\' context object'
)
return parser
def execute(ctx: Context, json_data: str) -> int:
'''
Runs all defined UI validations from the json, either from file or from raw json text, and
prints all the resulting reports to the console
Args:
json: The json string describing the validation
Returns:
Status code for the execution of the UIValidation module, determined by whether or not
there were any reports that were flagged as failed
'''
ui_validation = UIValidation.from_json(ctx, json_data)
ctx.pm.hook.quilla_prevalidate(validation=ui_validation)
reports = ui_validation.validate_all()
ctx.pm.hook.quilla_postvalidate(ctx=ctx, reports=reports)
out = reports.to_dict()
if ctx._context_data['Outputs']:
out['Outputs'] = ctx._context_data['Outputs']
if ctx.pretty:
print(json.dumps(
out,
indent=ctx.pretty_print_indent,
sort_keys=True
))
else:
print(json.dumps(out))
if reports.fails > 0:
return 1
return 0
def setup_context(args: List[str], plugin_root: str = '.') -> Context:
'''
Starts up the plugin manager, creates parser, parses args and sets up the application context
Args:
args: A list of cli options, such as sys.argv[1:]
plugin_root: The directory used by the plugin manager to search for `uiconf.py` files
Returns:
A runtime context configured by the hooks and the args
'''
pm = get_plugin_manager(plugin_root)
parser = make_parser()
pm.hook.quilla_addopts(parser=parser) # type: ignore
args = parser.parse_args(args)
# Set to empty list since argparse defaults to None
if not args.definitions:
args.definitions = []
if not args.is_file:
json_data = args.json
else:
with open(args.json) as f:
json_data = f.read()
ctx = get_default_context(
pm,
args.debug,
args.drivers_path,
args.pretty,
json_data,
args.is_file,
args.no_sandbox,
args.definitions,
)
pm.hook.quilla_configure(ctx=ctx, args=args)
return ctx
def run():
'''
Creates the parser object, parses the command-line arguments, and runs them, finishing with the
appropriate exit code.
'''
ctx = setup_context(sys.argv[1:])
exit_code = execute(ctx, ctx.json)
sys.exit(exit_code)
if __name__ == '__main__':
run()

5
src/quilla/__main__.py Normal file
Просмотреть файл

@ -0,0 +1,5 @@
import quilla # pragma: no cover
if __name__ == "__main__": # pragma: no cover
quilla.run()

Просмотреть файл

Просмотреть файл

@ -0,0 +1,123 @@
from typing import (
Dict,
List,
Optional
)
# from selenium import webdriver
from selenium.webdriver.remote.webdriver import WebDriver
from quilla.ctx import Context
from quilla.browser import drivers
from quilla.common.enums import (
BrowserTargets,
)
from quilla.steps.steps_aggregator import StepsAggregator
from quilla.reports.base_report import BaseReport
from quilla.common.exceptions import InvalidBrowserStateException
BrowserSelector = Dict[BrowserTargets, WebDriver]
class BrowserValidations:
'''
A class that defines the behaviours for validating a set of steps
for a specific browser target.
Args:
ctx: The runtime context for the application
target: An enum specifying the target browser that is desired
url_root: The initial url for the browser to navigate to
steps: An aggregator class that manages all substeps
Attributes:
ctx: The runtime context for the application
'''
driver_selector: BrowserSelector = {
BrowserTargets.FIREFOX: drivers.FirefoxBrowser,
BrowserTargets.CHROME: drivers.ChromeBrowser,
BrowserTargets.EDGE: drivers.EdgeBrowser,
}
def __init__(
self,
ctx: Context,
target: BrowserTargets,
url_root: str,
steps: StepsAggregator,
) -> None:
self._target = target
self._root = url_root
self._steps = steps
self._driver: Optional[WebDriver] = None
self.ctx = ctx
@property
def target(self):
return self._target
@target.setter
def target(self, v: BrowserTargets):
if self._driver is not None:
raise InvalidBrowserStateException(
'Cannot change browser target while a driver is set'
)
self._target = v
def init(self):
'''
Creates the appropriate driver, sets the start URL to the specified root, and
sets all steps to have the appropriate driver
'''
driver: WebDriver = self.driver_selector[self._target](self.ctx)
self._driver = driver
driver.get(self._root)
self._steps.driver = driver # Set the driver for all the steps
self.ctx.driver = driver
def run_steps(self) -> List[BaseReport]:
'''
Executes all stored steps. Pass through method for the steps run_steps method
Returns:
A list of reports generated by the steps
'''
return self._steps.run_steps()
def clean(self):
'''
Closes the browser instance and resets all the step drivers to None state
'''
if self.ctx.close_browser:
try:
self._driver.close()
except Exception:
pass # Browser unable to be closed or is already closed
self._driver = None
self._steps.driver = None
self.ctx.driver = None
def validate(self) -> List[BaseReport]:
'''
Initializes the browser, runs the execution steps, and closes up the browser while
ensuring that any exceptions still allow for cleanup of the browser
Returns:
A list of reports produced by the run_steps function
Raises:
Exception: Any exception produced by the run_steps function
'''
self.init()
reports = []
try:
reports = self.run_steps()
except Exception as e:
# Catch exception and crash the program, but clean up
# the browser first
raise e
finally:
self.clean()
return reports

Просмотреть файл

@ -0,0 +1,64 @@
'''
Module for webdriver subclasses that configure the browsers
'''
from selenium import webdriver
from msedge.selenium_tools import Edge, EdgeOptions
from quilla.ctx import Context
class FirefoxBrowser(webdriver.Firefox):
'''
A class used to configure the Firefox browser driver for use in UIValidation module.
Args:
ctx: The runtime context for the application
'''
def __init__(self, ctx: Context):
options = webdriver.FirefoxOptions()
# If debugging, do not start browser in headless mode
if ctx.run_headless:
options.add_argument('-headless')
super().__init__(options=options)
class ChromeBrowser(webdriver.Chrome):
'''
A class used to configure the Chrome browser driver for use in UIValidation module.
Args:
ctx: The runtime context for the application
'''
def __init__(self, ctx: Context):
options = webdriver.ChromeOptions()
if ctx.no_sandbox:
options.add_argument('--no-sandbox')
if ctx.run_headless:
options.add_argument('--headless')
super().__init__(options=options)
class EdgeBrowser(Edge):
'''
A class used to configure the Edge browser driver for use in UIValidation module.
Args:
ctx: The runtime context for the application
'''
def __init__(self, ctx: Context):
options = EdgeOptions()
options.use_chromium = True
options.set_capability('platform', 'ANY') # Prevent Edge from defaulting to Windows
if ctx.no_sandbox:
options.add_argument('--no-sandbox')
if ctx.run_headless:
options.add_argument('--headless')
super().__init__(options=options)

Просмотреть файл

@ -0,0 +1,5 @@
'''
All shared bits of code that are used throughout much of the UIValidation package
that don't have any better place to be. This encompasses all shared utility code
such as the utility classes, enums, and exceptions used by this application.
'''

101
src/quilla/common/enums.py Normal file
Просмотреть файл

@ -0,0 +1,101 @@
'''
Module with all requried enums for the UIValidation
'''
from enum import Enum
# Enums
class ValidationTypes(Enum):
'''
The currently-supported types of validation allowed
'''
XPATH = 'XPath'
URL = 'URL'
class ValidationStates(Enum):
'''
Base class for validation state enums
'''
class XPathValidationStates(ValidationStates):
'''
States that the XPath validation class recognizes
'''
EXISTS = 'Exists'
NOT_EXISTS = 'NotExists'
VISIBLE = 'Visible'
NOT_VISIBLE = 'NotVisible'
TEXT_MATCHES = 'TextMatches'
NOT_TEXT_MATCHES = 'NotTextMatches'
HAS_PROPERTY = 'HasProperty'
NOT_HAS_PROPERTY = 'NotHasProperty'
PROPERTY_HAS_VALUE = 'PropertyHasValue'
NOT_PROPERTY_HAS_VALUE = 'NotPropertyHasValue'
HAS_ATTRIBUTE = 'HasAttribute'
NOT_HAS_ATTRIBUTE = 'NotHasAttribute'
ATTRIBUTE_HAS_VALUE = 'AttributeHasValue'
NOT_ATTRIBUTE_HAS_VALUE = 'NotAttributeHasValue'
class URLValidationStates(ValidationStates):
'''
States that the URL validation class recognizes
'''
CONTAINS = 'Contains'
NOT_CONTAINS = 'NotContains'
EQUALS = 'Equals'
NOT_EQUALS = 'NotEquals'
MATCHES = 'Matches'
NOT_MATCHES = 'NotMatches'
class UITestActions(Enum):
'''
All supported UI test actions
'''
CLICK = 'Click'
CLEAR = 'Clear'
SEND_KEYS = 'SendKeys'
WAIT_FOR_EXISTENCE = 'WaitForExistence'
WAIT_FOR_VISIBILITY = 'WaitForVisibility'
NAVIGATE_TO = 'NavigateTo'
VALIDATE = 'Validate'
REFRESH = "Refresh"
ADD_COOKIES = "AddCookies"
SET_COOKIES = "SetCookies"
REMOVE_COOKIE = "RemoveCookie"
CLEAR_COOKIES = "ClearCookies"
NAVIGATE_FORWARD = "NavigateForward"
NAVIGATE_BACK = "NavigateBack"
SET_BROWSER_SIZE = "SetBrowserSize"
HOVER = "Hover"
OUTPUT_VALUE = "OutputValue"
class ReportType(Enum):
'''
All the currently supported report types
'''
VALIDATION = "Validation"
STEP_FAILURE = "StepFailure"
class BrowserTargets(Enum):
'''
All the currently supported browser targets
'''
FIREFOX = "Firefox"
CHROME = "Chrome"
EDGE = "Edge"
class OutputSources(Enum):
'''
Supported sources for the OutputValue action
'''
LITERAL = 'Literal'
XPATH_TEXT = 'XPathText'
XPATH_PROPERTY = 'XPathProperty'

Просмотреть файл

@ -0,0 +1,57 @@
'''
Custom exceptions for the UIValidation module
'''
from enum import Enum
from typing import Type
# Exceptions
class UIValidationException(Exception):
'''
Base exception for all UIValidation module exceptions
'''
class NoDriverException(UIValidationException):
'''
Exception for when steps are called to action without being bound to a driver
'''
def __init__(self):
super().__init__("No driver currently bound")
class FailedStepException(UIValidationException):
'''
Exception for when there is a failed step in the chain
'''
class EnumValueNotFoundException(UIValidationException):
'''
Exception for when an enum value cannot be found by string
value
'''
def __init__(self, str_value: str, enum: Type[Enum]):
super().__init__(f'Cannot find {enum} with value {str_value}')
class InvalidContextExpressionException(UIValidationException):
'''
Exception caused by the context expression syntax being invalid
'''
class InvalidOutputName(UIValidationException):
'''
Exception caused by an invalid output name
'''
class InvalidBrowserStateException(UIValidationException):
'''
Exception caused by attempting to change the browser target
while the browser is currently open
'''

Просмотреть файл

@ -0,0 +1,85 @@
'''
A module containing an assortment of utility classes that don't really fit in anywhere else.
These classes define shared behaviour for specific actions that some other classes require, such
as checking for the actual existence of a valid driver when attempting to retrieve the driver,
or alternatively attempting to resolve a valid enum given its type and the value associated with it.
'''
from typing import (
Type,
Optional,
TypeVar
)
from enum import Enum
from selenium.webdriver.remote.webdriver import WebDriver
from quilla.common.exceptions import (
NoDriverException,
EnumValueNotFoundException
)
T = TypeVar('T', bound=Enum)
class DriverHolder:
'''
Utility class to define shared behaviour for classes that contain
a driver property
'''
def __init__(self, driver: Optional[WebDriver] = None):
self._driver = driver
@property
def driver(self) -> WebDriver:
'''
The webdriver attached to this object
Raises:
NoDriverException: if the internal driver is currently None
'''
if self._driver is None:
raise NoDriverException
return self._driver
@driver.setter
def driver(self, new_driver: Optional[WebDriver]) -> Optional[WebDriver]:
self._driver = new_driver
return new_driver
class EnumResolver:
'''
Utility class to define shared behaviour for classes that need to
resolve string values into appropriate enums
'''
@classmethod
def _name_to_enum(cls, name: str, enum: Type[T], ctx = None) -> T: # ctx type omitted due to circular import
'''
Converts a string value into the appropriate enum type.
Useful for inner representations of the data so we're not just working with strings
everywhere
Args:
ctx: the runtime context of the application
name: the string value to resolve into an enum
enum: the parent Enum class that contains an entry matching the name argument
Raises:
EnumValueNotFoundException: if this resolver fails to resolve an appropriate
enum value
'''
for enum_obj in enum:
if enum_obj.value == name:
return enum_obj
if ctx is not None:
resolved_plugin_value = ctx.pm.hook.quilla_resolve_enum_from_name(name=name, enum=enum)
if resolved_plugin_value is not None:
return resolved_plugin_value
raise EnumValueNotFoundException(name, enum)

307
src/quilla/ctx.py Normal file
Просмотреть файл

@ -0,0 +1,307 @@
import os
import re
from functools import lru_cache
from typing import (
Optional,
List,
)
from pathlib import Path
import json
from pluggy import PluginManager
import pydeepmerge as pdm
from quilla.common.exceptions import (
InvalidContextExpressionException,
InvalidOutputName,
)
from quilla.common.utils import DriverHolder
class Context(DriverHolder):
'''
Class defining configurations for the runtime context. This object
should not be created directly but retrieved with "get_default_context"
Args:
debug: Whether the configurations should be run as debug mode.
drivers_path: The directory where the different browser drivers are stored
context_data: Data to be stored to the Validation namespace for context expressions
Attributes:
pm: A PluginManager instance with all hooks already loaded
suppress_exceptions: Whether to suppress exceptions by generating reports or
to crash the application on exception
run_headless: Whether the browsers should be instructed to run headless
close_browser: Whether the cleanup process should close browsers or leave the
session open
pretty: If the output should be pretty-printed
json_data: The json describing the validations
is_file: Whether a file was originally passed in or if raw json was passed in
no_sandbox: Whether to pass the '--no-sandbox' arg to Chrome and Edge
'''
default_context: Optional["Context"] = None
_expression_regex = re.compile(r'\${{(.*)}}')
_context_obj_expression = re.compile(
# Used on the inside of the _expession_regex to
# find context objects embedded into the
# context expression regex
r'([a-zA-Z][a-zA-Z0-9_]+)(\.[a-zA-Z_][a-zA-Z0-9_]+)+'
)
_output_browser: str = 'Firefox'
pretty_print_indent: int = 4
def __init__(
self,
plugin_manager: PluginManager,
debug: bool = False,
drivers_path: str = '.',
pretty: bool = False,
json_data: str = '',
is_file: bool = False,
no_sandbox: bool = False,
definitions: List[str] = [],
):
super().__init__()
self.pm = plugin_manager
self._path = os.environ['PATH'] # snapshot the path
self.is_debug = debug
self.pretty = pretty
self.json = json_data
self.is_file = is_file
self.no_sandbox = no_sandbox
path = Path(drivers_path)
self.drivers_path = str(path.resolve())
self._context_data = {'Validation': {}, 'Outputs': {}, 'Definitions': {}}
self._load_definition_files(definitions)
@property
def is_debug(self) -> bool:
'''
A set of debug configurations. Will return true if 'debug' is originally passed or
this property is set, but one could edit the individual permissions to make a more
fine-tuned debugging experience
'''
return self._debug
@is_debug.setter
def is_debug(self, v: bool):
self._debug = v
self.suppress_exceptions: bool = not v
self.run_headless: bool = not v
self.close_browser: bool = not v
@property
def drivers_path(self) -> str:
'''
The path where the drivers will be stored. Setting this property will add it to
the PATH variable, so if the drivers are already in the PATH this can be omitted.
'''
return self._drivers_path
@drivers_path.setter
def drivers_path(self, v: str) -> str:
path = Path(v)
self._drivers_path = str(path.resolve())
self._set_path()
return v
def _set_path(self):
os.environ['PATH'] = f"{self._path}:{self._drivers_path}"
@lru_cache
def perform_replacements(self, text: str) -> str:
'''
Extracts any relevant context expressions from the text and attempts to
making suitable replacements for the context objects
Args:
text: Any string that supports context expressions
Returns:
The resulting string after executing the context expression
Examples:
>>> from quilla.ctx import Context
>>> ctx = Context(context_data={'name': 'examplesvc'})
>>> ctx.perform_replacements('/api/${{ Validation.name }}/get')
'/api/examplesvc/get'
'''
while (expression_match := self._expression_regex.search(text)) is not None:
expression = expression_match.group(1).strip()
processed = self._process_objects(expression)
text = (
text[:expression_match.start()] +
f'{processed}' +
text[expression_match.end():]
)
return text
def _escape_quotes(self, text: str) -> str:
return text.replace("'", "\\'")
def _process_objects(self, expression: str) -> str:
'''
Performs context object replacement to ensure that the returned string is ready for
the eval that will occur in perform_replacements
Args:
expression: a string that matches the _expression_regex regular expression
Returns:
an eval-ready string wherein all the valid context objects have been replaced
with the appropriate values from their respective sources
'''
while (object_match := self._context_obj_expression.search(expression)) is not None:
object_expression = object_match.group(0) # Grab the full expression
root, *path = object_expression.split('.')
# repl_value = ''
if root == 'Environment':
repl_value = self._escape_quotes(os.environ.get('.'.join(path), ''))
elif root == 'Validation' or root == 'Definitions':
data = self._context_data[root]
data = self._walk_data_tree(data, path, object_expression)
repl_value = data
elif self.pm is not None:
# Pass it to the defined hooks
hook_results = self.pm.hook.quilla_context_obj(ctx=self, root=root, path=tuple(path)) # type: ignore
# Hook results will always be either size 1 or 0
if len(hook_results) == 0:
repl_value = ''
else:
repl_value = hook_results[0]
expression = (
expression[:object_match.start()] +
f'{repl_value}' +
expression[object_match.end():]
)
return expression
def _walk_data_tree(self, data, exp, object_expression):
for entry in exp:
data = data.get(entry, None)
if data is None:
raise InvalidContextExpressionException(
f'\'{object_expression}\' does not exist'
)
return data
def create_output(self, output_name: str, value: str):
'''
Creates an output on the context object to be returned by the validation
module if the browser is the supported output browser, and sets the value
in the Validation context object. Setting the value in the
Validation context happens regardless of the browser type, since further
steps on the validation chain could need this specific value and the order in
which browsers are executed is dependent on the order that the user gave
in the validation json.
Args:
output_name: The name (ID) of this specific output, to be referred to
with ${{ Validation.<output_name> }}. This does support chaining of the
namespace to create a more dictionary-like structure
value: The value that the name will be associated with
'''
if self.driver.name.strip().capitalize() == self._output_browser:
self._deep_insert('Outputs', output_name, value)
self._deep_insert('Validation', output_name, value)
def _deep_insert(self, data_store: str, value_name: str, value: str):
store = self._context_data[data_store]
*name_iter, name = value_name.split('.')
for namespace in name_iter:
try:
new_store = store.get(namespace, {}) # type: ignore
store[namespace] = new_store
store = new_store
except Exception:
raise InvalidOutputName(
f'The name \'{value_name}.{name}\' has already been used or the '
'namespace is invalid'
)
if isinstance(store, str):
raise InvalidOutputName(
f'The name \'{value_name}.{name}\' has already been used or the '
'namespace is invalid'
)
store[name] = value # type: ignore
def _load_definition_files(self, definition_files: List[str]):
'''
Given a list of definition file names, loads all of them into the context data store
'''
for definition_file in definition_files:
with open(definition_file) as fp:
data_dict = json.load(fp)
self._load_definitions(data_dict)
def load_definitions(self, definitions_dict: dict):
'''
Loads the given dictionary into the context data, merging the dictionaries and preferring
the newer configurations wherever there is a conflict
Args:
definitions_dict: A dictionary containing all the definitions. Definitions
are strings saved either in an external file or in the 'definitions'
object of the validation json that works effectively as a macro, allowing
test writers to use declarative names for XPaths
'''
self._context_data['Definitions'] = pdm.deep_merge(
self._context_data['Definitions'],
definitions_dict
)
def get_default_context(
plugin_manager: PluginManager,
debug: bool = False,
drivers_path: str = '.',
pretty: bool = False,
json: str = '',
is_file: bool = False,
no_sandbox: bool = False,
definitions: List[str] = [],
recreate_context: bool = False,
) -> Context:
'''
Gets the default context, creating a new one if necessary.
If a context object already exists, all of the arguments passed into this function
are ignored.
Args:
plugin_manager: An instance of the plugin manager class to attach to the context
debug: Whether debug configurations should be enabled
drivers_path: The directory holding all the required drivers
pretty: Whether the output json should be pretty-printed
json: The json data describing the validations
is_file: Whether the json data was passed in originally raw or
as a string
no_sandbox: Specifies that Chrome and Edge should start with the --no-sandbox flag
definitions: A list file names that contain definitions for Quilla
recreate_context: Whether a new context object should be created or not
Returns
Application context shared for the entire application
'''
if Context.default_context is None or recreate_context:
Context.default_context = Context(
plugin_manager,
debug,
drivers_path,
pretty,
json,
is_file,
no_sandbox,
definitions,
)
return Context.default_context

132
src/quilla/hookspecs.py Normal file
Просмотреть файл

@ -0,0 +1,132 @@
from enum import Enum
from typing import (
Tuple,
Optional,
Dict,
Type,
TypeVar,
)
from argparse import (
ArgumentParser,
Namespace
)
import pluggy
from quilla.ctx import Context
from quilla.common.enums import UITestActions
from quilla.steps.base_steps import BaseStepFactory
from quilla.reports.report_summary import ReportSummary
from quilla.ui_validation import UIValidation
hookspec = pluggy.HookspecMarker('quilla')
StepFactorySelector = Dict[UITestActions, Type[BaseStepFactory]]
T = TypeVar('T', bound=Enum)
@hookspec(firstresult=True)
def quilla_context_obj(ctx: Context, root: str, path: Tuple[str]) -> Optional[str]:
'''
A hook to allow pluggins to resolve a context object given its root
and a path. All plugins that implement this hook *must* return None if they cannot
resolve the context object.
It is not possible to override the default context object handlers
Args:
ctx: The runtime context for the application
root: The name of the context object, which is expressed as the root
of a dot-separated path in the validation files
path: The remainder of the context object path, where data is being
retrieved from
Returns
the data stored at the context object if existing, None otherwise
'''
@hookspec
def quilla_addopts(parser: ArgumentParser):
'''
A hook to allow plugins to add additional arguments to the argument parser.
This can be used if a plugin requires additional parameters or data in some way.
This is called after the initial argument parser setup
Args:
parser: The argparse Argument Parser instance used by the application
'''
@hookspec
def quilla_configure(ctx: Context, args: Namespace):
'''
A hook to allow plugins to modify the context object, either changing its data
or adding data to it.
This is called after the initial setup of the context object
Args:
ctx: The runtime context for the application
args: Parsed CLI args, in case they are needed
'''
@hookspec
def quilla_prevalidate(validation: UIValidation):
'''
A hook called immediately before the validations attempt to be resolved
(i.e. before `validations.validate_all()` is called)
Args:
validation: The collected validations from the json passed to
the application
'''
@hookspec
def quilla_postvalidate(ctx: Context, reports: ReportSummary):
'''
A hook called immediately after all validations are executed and the full
ReportSummary is generated
Args:
ctx: The runtime context for the application
reports: An object capturing all generated reports and giving summary data
'''
@hookspec
def quilla_step_factory_selector(selector: StepFactorySelector):
'''
A hook called immediately before resolving the step factory for a given step definition.
This is used to register new step factories for custom step objects.
Most custom steps should just add themselves to the `quilla_step_selector` hook, but if
a custom step requires complex logic it might be beneficial to register a factory to
have more fine-grained control over the logic
Args:
selector: The factory selector dictionary.
'''
@hookspec(firstresult=True)
def quilla_resolve_enum_from_name(name: str, enum: Type[T]) -> Optional[T]:
'''
A hook called when a value specified by the quilla test should be resolved to an
enum, but no enum has been found. This is to allow plugins to register custom
enum values for quilla, such as new step actions, validation types, validation states,
output sources, etc.
Args:
name: the string value specified in the quilla test
enum: The enum subclass type that is being attempted to be resolved. This
should give an indication as to what is being resolved. For example,
UITestActions is the enum type being resolved for the 'actions' field.
Returns:
The resolved enum, if it can be resolved. None if the plugin can't resolve
the value.
'''

112
src/quilla/plugins.py Normal file
Просмотреть файл

@ -0,0 +1,112 @@
import pkg_resources
from importlib.machinery import SourceFileLoader
from pathlib import Path
import pluggy
from quilla import hookspecs
_hookimpl = pluggy.HookimplMarker('quilla')
class _DummyHooks:
'''
A class of dummy hook implementations that do nothing
'''
@_hookimpl
def quilla_addopts():
pass
@_hookimpl
def quilla_context_obj():
pass
@_hookimpl
def quilla_configure():
pass
@_hookimpl
def quilla_prevalidate():
pass
@_hookimpl
def quilla_postvalidate():
pass
@_hookimpl
def quilla_step_factory_selector():
pass
def _get_uiconf_plugins(pm: pluggy.PluginManager, root: Path):
'''
Attempts to load a conftest.py file on the root directory, and add all defined plugin
hooks on that file into the plugin manager
Args:
pm: The plugin manager
root: The directory in which to search for the conftest file
'''
uiconf_file = root / 'uiconf.py'
if not uiconf_file.exists() or not uiconf_file.is_file():
return # No plugin file to load
abs_path = uiconf_file.expanduser().resolve()
uiconf_module = SourceFileLoader('uiconf', str(abs_path)).load_module()
_load_hooks_from_module(pm, uiconf_module)
def _load_hooks_from_module(pm: pluggy.PluginManager, module):
'''
Load a module into the given plugin manager object by finding all
methods in the module that start with the `quilla_` prefix
and wrapping the in a _hookimpl so they are picked up by `pluggy`
Args:
pm: The plugin manager
module: The loaded module instance
'''
hooks = filter(lambda x: x.find('quilla_') == 0, dir(hookspecs))
for hook in hooks:
if hasattr(module, hook):
hook_function = getattr(module, hook)
hook_function = _hookimpl(hook_function)
setattr(module, hook, hook_function)
pm.register(module)
def _load_entrypoint_plugins(pm: pluggy.PluginManager):
for entry_point in pkg_resources.iter_entry_points('QuillaPlugins'):
try:
entry_point.require()
_load_hooks_from_module(pm, entry_point.load())
except pkg_resources.DistributionNotFound as e:
# Skips package if it cannot load it
pass
def get_plugin_manager(path: str) -> pluggy.PluginManager:
'''
Creates and configures a plugin manager by loading all the plugins defined
through entrypoints or through a `uiconf.py` file found at the `path` location
Args:
path: the directory in which the `uiconf.py` will be found
Returns:
a configured PluginManager instance with all plugins already loaded
'''
pm = pluggy.PluginManager('quilla')
pm.register(_DummyHooks)
_load_entrypoint_plugins(pm)
_get_uiconf_plugins(pm, Path(path))
return pm

0
src/quilla/py.typed Normal file
Просмотреть файл

Просмотреть файл

Просмотреть файл

@ -0,0 +1,76 @@
import json
from abc import (
abstractclassmethod,
abstractmethod,
)
from typing import Dict
from quilla.common.utils import EnumResolver
from quilla.common.enums import (
ReportType,
UITestActions,
)
class BaseReport(EnumResolver):
'''
Data class for producing reports on various steps performed
Args:
report_type: An enum specifying the kind of report
browser: The name of the browser
action: An enum specifying the kind of action that was taken
msg: A string giving further context to the report
'''
def __init__(self, report_type: ReportType, browser: str, action: UITestActions, msg: str = ""):
self.browser: str = browser
self.action: UITestActions = action
self.msg: str = msg
self.report_type: ReportType = report_type
@abstractclassmethod
def from_dict(cls, report: Dict[str, Dict[str, str]]) -> "BaseReport":
'''
Converts a dictionary report into a valid Report object
Args:
report: a dictionary that describes a report
'''
@abstractmethod
def to_dict(self):
'''
Converts the Report object into a dictionary representation
'''
def to_json(self) -> str:
'''
Returns:
a json representation of the object. Good for passing reports to different applications
'''
report = self.to_dict()
return json.dumps(report)
@classmethod
def from_json(cls, report_json: str) -> "BaseReport":
'''
Loads a valid json string and attempts to convert it into a Report object
Returns:
a report of the appropriate type
'''
st = json.loads(report_json)
return cls.from_dict(st) # type: ignore
@classmethod
def from_file(cls, fp) -> "BaseReport":
'''
Converts a fp (a file-like .read() supporting object) containing a json document
into a Report object
Returns:
a report of the appropriate type
'''
st = json.load(fp)
return cls.from_dict(st) # type: ignore

Просмотреть файл

@ -0,0 +1,184 @@
import json
from typing import (
Dict,
Type,
List,
Callable
)
from quilla.common.enums import ReportType
from quilla.reports.base_report import BaseReport
from quilla.reports.validation_report import ValidationReport
from quilla.reports.step_failure_report import StepFailureReport
class ReportSummary:
'''
A class to describe a series of report objects, as well as manipulating them for test purposes.
Args:
reports: A list of reports to produce a summary of
Attributes:
reports: A list of reports used to produce a summary
successes: The number of reports that are described as successful
fails: The numer of reports that are not described as successful
critical_failures: The number of reports representing critical (i.e. unrecoverable)
failures. This will be produced at any step if it causes an exception.
filter_by: A declarative way to filter through the various reports
'''
selector: Dict[str, Type[BaseReport]] = {
'validationReport': ValidationReport,
'stepFailureReport': StepFailureReport,
}
def __init__(self, reports: List[BaseReport] = []):
self.reports = reports
self.successes = 0
self.fails = 0
self.critical_failures = 0
self.filter_by = ReportSummary.FilterTypes(self)
self._summarize()
def to_dict(self):
'''
Returns:
a dictionary representation of the summary report
'''
return {
'reportSummary': {
'total_reports': len(self.reports),
'successes': self.successes,
'failures': self.fails,
'critical_failures': self.critical_failures,
'reports': [
report.to_dict() for report in self.reports
]
}
}
def to_json(self) -> str:
'''
Returns:
a json string representation of the summary report
'''
return json.dumps(self.to_dict())
@classmethod
def from_dict(cls, summary_dict):
'''
Loads a ReportSummary object that is represented as a dictionary. It does not trust the
metadata that is in the report, and will regenerate the metadata itself.
'''
reports = summary_dict['reportSummary']['reports']
obj_reports = []
for report in reports:
# Each report has a report tag as the root of the json document
report_type = list(report.keys())[0]
report_object = cls.selector[report_type]
obj_reports.append(report_object.from_dict(report))
obj_reports = [ValidationReport.from_dict(report) for report in reports]
return ReportSummary(obj_reports)
@classmethod
def from_json(cls, summary_json):
'''
Loads a ReportSummary object that is represented as a valid json string. Calls from_dict
with the default json loader
'''
return cls.from_dict(json.loads(summary_json))
def _summarize(self):
'''
Performs the summary operation over the stored reports
'''
for report in self.reports:
if report.report_type == ReportType.VALIDATION:
if report.success:
self.successes += 1
else:
self.fails += 1
elif report.report_type == ReportType.STEP_FAILURE:
self.fails += 1
self.critical_failures += 1
class FilterTypes:
'''
Inner class used to provide declarative filtering syntax for ReportSummary objects.
For example, to filter by only successful reports you would call
`reports.filter_by.success()`
'''
def __init__(self, summary: "ReportSummary"):
self._summary = summary
def _filter(self, condition: Callable[[BaseReport], bool]) -> "ReportSummary":
'''
Returns a new summary with only reports that match the condition passed as
a lambda function parameter
'''
reports = self._summary.reports.copy()
filtered_reports = filter(condition, reports)
return ReportSummary(list(filtered_reports))
def state(self, state: str) -> "ReportSummary":
'''
Returns:
a new summary with only reports that have a state matching the one
given by the state parameter
'''
return self._filter(
lambda x: isinstance(x, ValidationReport) and x.state.lower() == state
)
def browser(self, browser: str) -> "ReportSummary":
'''
Returns:
a new summary with only reports that have a browser matching the one
given by the browser parameter
'''
return self._filter(lambda x: x.browser.lower() == browser.lower())
def successful(self) -> "ReportSummary":
'''
Returns:
a new summary with only the reports that produced a success
'''
return self._filter(lambda x: isinstance(x, ValidationReport) and x.success)
def failure(self) -> "ReportSummary":
'''
Returns:
a new summary with only the reports that produced a failure
'''
return self._filter(lambda x: isinstance(x, ValidationReport) and not x.success)
def type(self, validation_type: str) -> "ReportSummary":
'''
Returns:
a new summary with only reports that have a type matching the one
given by the type parameter
'''
return self._filter(
lambda x:
isinstance(x, ValidationReport) and
x.validation_type.lower() == validation_type.lower()
)
def target(self, target: str) -> "ReportSummary":
'''
Returns:
a new summary with only reports that have a target matching the one
given by the target parameter
'''
return self._filter(
lambda x: isinstance(x, ValidationReport) and x.target.lower() == target.lower()
)
def critical_failure(self) -> "ReportSummary":
'''
Returns:
a new summary with only reports that constitute a critical failure
'''
return self._filter(lambda x: x.report_type == ReportType.STEP_FAILURE)

Просмотреть файл

@ -0,0 +1,64 @@
from typing import Union
from quilla.common.enums import (
UITestActions,
ReportType,
)
from quilla.reports.base_report import BaseReport
class StepFailureReport(BaseReport):
'''
Data class for specifying the cause of a step failing in a critical, usually unexpected way.
Args:
exception: Either an actual exception class, or a string that describes the failure
browser: The name of the browser this action failed on
action: An enum describing the action that was taken
step_index: The index of the step that failed, for debugging purposes
Attributes:
index: The index of the step that failed, for debugging purposes
'''
def __init__(
self,
exception: Union[Exception, str],
browser: str,
action: UITestActions,
step_index: int
):
super().__init__(ReportType.STEP_FAILURE, browser, action, repr(exception))
self.index = step_index
def to_dict(self):
'''
Converts this report into a dictionary object
Returns:
a dictionary containing the representation of this object
'''
return {
'stepFailureReport': {
'action': self.action.value,
'targetBrowser': self.browser,
'passed': False,
'stepIndex': self.index,
'msg': self.msg,
}
}
@classmethod
def from_dict(cls, report_dict):
'''
Converts a dictionary representing this object into a proper StepFailureReport object
Returns:
the appropriate StepFailureReport object
'''
report = report_dict['stepFailureReport']
return StepFailureReport(
report['msg'],
report['targetBrowser'],
cls._name_to_enum(report['action'], UITestActions),
report['stepIndex']
)

Просмотреть файл

@ -0,0 +1,87 @@
from quilla.common.enums import (
ReportType,
UITestActions,
)
from quilla.reports.base_report import BaseReport
class ValidationReport(BaseReport):
'''
Data class for producing reports on the results of the validations
Args:
validation_type: The string representation of the type of validation performed
target: The validation target
state: The desired state used for validation
browser_name: The name of the browser that the validation was performed on
success: Whether the validation passed or not
msg: An optional string adding further context to the report
Attributes:
validation_type: The string representation of the type of validation performed
target: The validation target
state: The desired state used for validation
success: Whether the validation passed or not
msg: An optional string adding further context to the report
'''
def __init__(
self,
validation_type: str,
target: str,
state: str,
browser_name: str,
success: bool,
msg: str = ""
):
super().__init__(
ReportType.VALIDATION,
browser_name,
UITestActions.VALIDATE,
msg
)
self.validation_type = validation_type
self.target = target
self.state = state
self.success = success
@classmethod
def from_dict(cls, report) -> "ValidationReport":
'''
Converts a dictionary into a ValidationReport object
Args:
report:
'''
params = report['validationReport']
msg = ""
if 'msg' in params:
msg = params['msg']
return ValidationReport(
type_=params['type'],
target=params['target'],
state=params['state'],
browser_name=params['targetBrowser'],
success=params['passed'],
msg=msg
)
def to_dict(self):
'''
Returns a dictionary representation of the object
'''
report = {
"validationReport": {
"action": self.action.value,
"type": self.validation_type,
"target": self.target,
"state": self.state,
"targetBrowser": self.browser,
"passed": self.success,
}
}
if self.msg:
report['validationReport']['msg'] = self.msg
return report

Просмотреть файл

Просмотреть файл

@ -0,0 +1,227 @@
from typing import (
Optional,
Callable,
Dict,
Any,
)
from abc import abstractclassmethod, abstractmethod
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver.common.by import By
from quilla.ctx import Context
from quilla.reports.base_report import BaseReport
from quilla.reports.validation_report import ValidationReport
from quilla.common.utils import (
DriverHolder,
EnumResolver
)
from quilla.common.enums import (
UITestActions,
ValidationTypes,
ValidationStates,
)
from quilla.common.exceptions import FailedStepException
class BaseStep(DriverHolder, EnumResolver):
'''
Base class for all step objects
Args:
ctx: The runtime context for the application
action_type: Enum defining which of the supported actions this class represents
driver: An optional argument to allow the driver to be bound at object creation.
Attributes:
ctx: The runtime context for the application
action: Enum defining which of the supported actions this class represents
'''
def __init__(
self,
ctx: Context,
action_type: UITestActions,
target: Optional[str] = None,
parameters: Optional[dict] = None,
driver: Optional[WebDriver] = None
):
self.action = action_type
self.ctx = ctx
self.target = target
self.parameters = parameters
super().__init__(driver)
@abstractmethod
def perform(self) -> Optional[BaseReport]: # pragma: no cover
'''
Runs the necessary action. If the action is a Validate action, will return a
ValidationReport
Returns:
A report produced by the step, or None if no report is required
'''
pass
@abstractmethod
def copy(self) -> "BaseStep":
'''
Returns a copy of the current Step object
'''
@property
def target(self):
'''
The target for an action, if applicable. Will resolve all context
expressions before being returned
'''
if self._target is not None:
return self.ctx.perform_replacements(self._target)
@target.setter
def target(self, val: str) -> str:
self._target = val
return val
@property
def parameters(self):
'''
Parameters for an action, if applicable. Will resolve all context
expressions before being returned
'''
if self._parameters is not None:
return self._deep_replace(self.ctx, self._parameters.copy())
@parameters.setter
def parameters(self, val: dict) -> dict:
self._parameters = val
return val
def _deep_replace(self, ctx: Context, params: Dict[str, Any]):
for key, value in params.items():
if isinstance(value, str):
params[key] = ctx.perform_replacements(value)
elif isinstance(value, Dict):
params[key] = self._deep_replace(ctx, value)
# (TODO): Add list support to deep replacement
return params
def _verify_parameters(self, *parameters: str):
for parameter in parameters:
if parameter not in self.parameters: # type: ignore
raise FailedStepException(
f'"{parameter}" parameter not specified for "{self.action.value}" action'
)
def _verify_target(self):
if self.target is None:
raise FailedStepException(f'No specified target for "{self.action.value}" action')
@property
def element(self) -> WebElement:
'''
Located WebElement instance
'''
return self.driver.find_element(*self.locator)
@property
def locator(self):
'''
Locator for selenium to find web elements
'''
return (By.XPATH, self.target)
class BaseStepFactory:
@abstractclassmethod
def from_dict(ctx: Context, step: Dict, driver: Optional[WebDriver] = None) -> BaseStep:
'''
Given a context, step dictionary, and optionally a driver, return an appropriate subclass
of BaseStep
Args:
ctx: The runtime context of the application
step: A dictionary defining the step
driver: Optionally, a driver to bind to the resulting step
Returns:
The resulting step object
'''
class BaseValidation(BaseStep):
'''
Base validation class with shared functionality for all validations
Args:
ctx: The runtime context for the application
type_: An enum describing the type of supported validation
target: The general target that this validation will seek. What it means is
specific to each subclass of this class
state: An enum describing the desired state of this validation. The specific
enum is a subclass of the ValidationStates class specific to the subclass
of BaseValidation being used
selector: A dictionary that maps the state enum to the appropriate function
'''
def __init__(
self,
ctx: Context,
type_: ValidationTypes,
target: str,
state: ValidationStates,
selector: Dict[ValidationStates, Callable[[], ValidationReport]],
parameters: Dict,
driver: Optional[WebDriver] = None,
) -> None:
super().__init__(ctx, UITestActions.VALIDATE, target=target, parameters=parameters, driver=driver)
self._type = type_
self._state = state
self._driver = driver
self._selector = selector
self._report: Optional[ValidationReport] = None
def copy(self) -> "BaseValidation":
# All classes derived from BaseValidation only need these three things
return self.__class__( # type: ignore
self.ctx, # type: ignore
self._target, # type: ignore
self._state, # type: ignore
self._parameters,
self._driver # type: ignore
)
def perform(self) -> ValidationReport:
'''
Performs the correct action based on what is defined within the selector,
and returns the resulting report produced.
Returns:
A report summarizing the results of the executed validation
'''
action_function = self._selector[self._state]
self._report = action_function()
return self._report
def _create_report(self, success: bool, msg: str = "") -> ValidationReport:
'''
Creates a new validation report. Used to simplify the common shared
behaviour that all validation reports require
Args:
success: Value representing the successfulness of the validation. True if
the validation passed, False otherwise
msg: An optional string message to be included in the report
Returns:
A report summarizing the results of the executed validation
'''
return ValidationReport(
self._type.value,
self._target,
self._state.value,
self.driver.name.capitalize(),
success=success,
msg=msg
)

100
src/quilla/steps/outputs.py Normal file
Просмотреть файл

@ -0,0 +1,100 @@
from typing import (
Optional,
Dict,
Any
)
from selenium.webdriver.remote.webdriver import WebDriver
from quilla.common.enums import UITestActions
from quilla.ctx import Context
from quilla.common.enums import (
OutputSources
)
from quilla.steps.base_steps import (
BaseStep,
BaseStepFactory
)
class OutputValueStep(BaseStep, BaseStepFactory):
required_params = [
'target',
'parameters'
]
@classmethod
def from_dict(
cls,
ctx: Context,
action_dict,
driver: Optional[WebDriver] = None
) -> "OutputValueStep":
'''
Factory method to extract needed parameters from a dictionary
'''
for item in cls.required_params:
if item not in action_dict:
raise AttributeError('Missing one or more required parameters')
params: Dict[str, Any] = {}
for param in cls.required_params:
params[param] = action_dict[param]
return OutputValueStep(ctx, **params, driver=driver)
def __init__(
self,
ctx: Context,
target: Optional[str] = None,
parameters: Optional[dict] = None,
driver: Optional[WebDriver] = None,
):
super().__init__(ctx, UITestActions.OUTPUT_VALUE, target, parameters, driver=driver)
self._verify_target()
self._verify_parameters('source', 'outputName')
self.selector = {
OutputSources.LITERAL: self._output_literal,
OutputSources.XPATH_TEXT: self._output_xpath_text,
OutputSources.XPATH_PROPERTY: self._output_xpath_property,
}
def perform(self):
value_producer = self.selector[self.parameters['source']]
output_value = value_producer()
self._create_output(output_value)
def _create_output(self, value):
self.ctx.create_output(self.parameters['outputName'], value)
def _output_literal(self):
return self.target
def _output_xpath_text(self):
return self.element.text
def _output_xpath_property(self):
self._verify_parameters('propertyName')
property_name = self.parameters['propertyName']
return self.element.get_property(property_name)
def copy(self) -> "OutputValueStep":
'''
Creates a shallow copy of the OutputValueStep object
This is used so that each browser can have an independent copy of
the steps, in case any script would want to edit individual browser
steps
'''
return OutputValueStep(
self.ctx,
self._target, # Make sure it's passed in raw
self._parameters, # Make sure it's passed in raw
self._driver
)

209
src/quilla/steps/steps.py Normal file
Просмотреть файл

@ -0,0 +1,209 @@
'''
Module containing all the requisite classes to perform test steps.
Adding new actions
-------------------
Creating new simple actions in the code is designed to be fairly straightforward, and only
requires three steps:
1. Add an entry for the action on the ``enums`` module
2. Create a function to perform the actual step under the ``TestStep`` class
3. Add an entry to the selector with the enum as a key and the function as a value
Keep in mind that the step function should also validate any required data, and that
updating the schema for proper json validation is essential.
If the parameters for the new action are expected to be enums, you must also add the logic
for converting the parameter from string to enum in the ``UIValidation`` class.
'''
from typing import (
Optional,
Dict,
Any,
)
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from quilla.ctx import Context
from quilla.common.enums import (
UITestActions,
)
from quilla.steps.base_steps import (
BaseStepFactory,
BaseStep
)
# Steps classes
class TestStep(BaseStep, BaseStepFactory):
'''
Class that contains the definition of a single test step.
Used for setting up validations
Args:
ctx: The runtime context of the application
action: The action enum for this step
target: What the target for this step is, if applicable
parameters: Extra options for certain actions
aggregator: The parent object holding this step
driver: The browser driver
Attributes:
selector: A dictionary that maps action enums to the action function
'''
required_params = [
'action',
]
optional_params = [
'target',
'parameters',
]
@classmethod
def from_dict(
cls,
ctx: Context,
action_dict,
driver: Optional[WebDriver] = None
) -> "TestStep":
'''
Factory method to extract needed parameters from a dictionary
'''
for item in cls.required_params:
if item not in action_dict:
raise AttributeError('Missing one or more required parameters')
params: Dict[str, Any] = {}
for param in cls.required_params:
params[param] = action_dict[param]
for param in cls.optional_params:
if param in action_dict:
params[param] = action_dict[param]
return TestStep(ctx, **params, driver=driver)
def __init__(
self,
ctx: Context,
action: UITestActions,
target: Optional[str] = None,
parameters: Optional[dict] = None,
driver: Optional[WebDriver] = None,
):
super().__init__(ctx, action, target=target, parameters=parameters, driver=driver)
self.selector = {
UITestActions.CLICK: self._click,
UITestActions.CLEAR: self._clear,
UITestActions.SEND_KEYS: self._send_keys,
UITestActions.NAVIGATE_TO: self._navigate_to,
UITestActions.WAIT_FOR_VISIBILITY: self._wait_for_visibility,
UITestActions.WAIT_FOR_EXISTENCE: self._wait_for_existence,
UITestActions.NAVIGATE_BACK: self._navigate_back,
UITestActions.NAVIGATE_FORWARD: self._navigate_forward,
UITestActions.HOVER: self._hover,
UITestActions.REFRESH: self._refresh,
UITestActions.SET_BROWSER_SIZE: self._set_browser_size,
UITestActions.ADD_COOKIES: self._add_cookies,
UITestActions.SET_COOKIES: self._set_cookies,
UITestActions.CLEAR_COOKIES: self._clear_cookies,
UITestActions.REMOVE_COOKIE: self._remove_cookie,
}
def copy(self) -> "TestStep":
'''
Creates a shallow copy of the TestStep object
This is used so that each browser can have an independent copy of
the steps, in case any script would want to edit individual browser
steps
'''
return TestStep(
self.ctx,
self.action,
self._target, # Make sure it's passed in raw
self._parameters, # Make sure it's passed in raw
self._driver
)
def perform(self):
'''
Runs the specified action. Wrapper for selecting proper inner method
'''
perform_action = self.selector[self.action]
return perform_action()
def _click(self):
self._verify_target()
self.element.click()
def _clear(self):
self._verify_target()
self.element.clear()
def _send_keys(self):
self._verify_parameters('data')
self.element.send_keys(self.parameters['data'])
def _navigate_to(self):
self._verify_target()
self.driver.get(self.target)
def _wait_for(self, condition):
self._verify_parameters('timeoutInSeconds')
WebDriverWait(self.driver, self.parameters['timeoutInSeconds']).until(condition)
def _wait_for_visibility(self):
self._verify_target()
self._wait_for(EC.visibility_of_element_located(self.locator))
def _wait_for_existence(self):
self._verify_target()
self._wait_for(EC.presence_of_element_located(self.locator))
def _navigate_back(self):
self.driver.back()
def _navigate_forward(self):
self.driver.forward()
def _refresh(self):
self.driver.refresh()
def _set_browser_size(self):
self._verify_parameters('width', 'height')
width = self._parameters['width']
height = self._parameters['height']
self.driver.set_window_size(width, height)
def _set_cookies(self):
self._clear_cookies()
self._add_cookies()
def _add_cookies(self):
self._verify_parameters('cookieJar')
self.driver.add_cookie(self.parameters['cookieJar'])
def _remove_cookie(self):
self._verify_parameters('cookieName')
self.driver.delete_cookie(self.parameters['cookieName'])
def _clear_cookies(self):
self.driver.delete_all_cookies()
def _hover(self):
self._verify_target()
ActionChains(self.driver).move_to_element(self.element).perform()
def _set_zoom_level(self):
self._verify_parameters('zoomLevel')
zoom_level = self._parameters['zoomLevel']
self.driver.execute_script(f'document.body.style.zoom="{zoom_level}%"')

Просмотреть файл

@ -0,0 +1,118 @@
from typing import (
List,
Optional,
Dict,
Type,
)
from selenium.webdriver.remote.webdriver import WebDriver
from quilla.ctx import Context
from quilla.common.utils import DriverHolder
from quilla.common.enums import (
UITestActions
)
from quilla.steps.base_steps import (
BaseStep,
BaseStepFactory,
)
from quilla.steps.steps import TestStep
from quilla.steps.validations import Validation
from quilla.steps.outputs import OutputValueStep
from quilla.reports.base_report import BaseReport
from quilla.reports.step_failure_report import StepFailureReport
class StepsAggregator(DriverHolder):
'''
Test step aggregator interface. Useful for abstracting operations
done on all the steps.
'''
def __init__(
self,
ctx: Context,
steps: List[Dict] = [],
driver: Optional[WebDriver] = None
):
'''
Turns an array of dictionaries into appropriate step objects, and saves them in a list
'''
self._steps: List[BaseStep] = []
self._driver = driver
self.ctx = ctx
step_factory_selector: Dict[UITestActions, Type[BaseStepFactory]] = {
UITestActions.VALIDATE: Validation,
UITestActions.OUTPUT_VALUE: OutputValueStep,
}
ctx.pm.hook.quilla_step_factory_selector(selector=step_factory_selector) # Allow plugins to add selectors
for step in steps:
step_factory = step_factory_selector.get(step['action'], TestStep)
self._steps.append(step_factory.from_dict(ctx, step, driver=driver))
@property
def driver(self) -> WebDriver:
'''
The webdriver attached to this object. Setting this property will also set
the driver of all steps in the aggregator.
Raises:
NoDriverException: if the internal driver is currently None
'''
return super().driver
@driver.setter
def driver(self, new_driver: Optional[WebDriver]):
for step in self._steps:
step.driver = new_driver
self._driver = new_driver
def run_steps(self) -> List[BaseReport]:
'''
Performs all bound steps, collecting generated reports and errors
Returns:
A list of reports generated
'''
reports: List[BaseReport] = []
for i, step in enumerate(self._steps):
try:
report = step.perform()
except Exception as e:
if not self.ctx.suppress_exceptions:
# If debugging, don't produce reports since a stack trace will be better
raise e
report = StepFailureReport(e, self.driver.name, step.action, i)
reports.append(report)
# Exit early, since steps producing exception can prevent future steps from working
return reports
if report is not None:
reports.append(report)
return reports
def copy(self) -> "StepsAggregator":
'''
Creates a copy of the StepsAggregator object
This is used so that each browser can have an independent copy of
the steps, in case any script would want to edit individual browser
steps
'''
steps = []
for step in self._steps:
steps.append(step.copy())
duplicate = StepsAggregator(self.ctx)
duplicate._steps = steps
duplicate._driver = self._driver
return duplicate

Просмотреть файл

@ -0,0 +1,333 @@
from typing import (
cast,
Optional,
List,
Dict,
Union,
Callable,
Type
)
import re
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver.common.by import By
from quilla.ctx import Context
from quilla.common.enums import (
ValidationTypes,
ValidationStates,
XPathValidationStates,
URLValidationStates,
)
from quilla.reports.validation_report import ValidationReport
from quilla.steps.base_steps import BaseStepFactory, BaseValidation
ValidationDictionary = Dict[str, Union[str, ValidationStates, ValidationTypes]]
class XPathValidation(BaseValidation):
'''
Class defining the behaviour for performing XPath validations
Args:
ctx: The runtime context for the application
target: The XPath of the element to perform the validation against
state: The desired state of the target web element
driver: An optional argument to allow the driver to be bound at object creation.
'''
def __init__(
self,
ctx: Context,
target: str,
state: XPathValidationStates,
parameters: Optional[Dict],
driver: Optional[WebDriver] = None,
) -> None:
selector: Dict[ValidationStates, Callable[[], ValidationReport]] = {
XPathValidationStates.EXISTS: self._check_exists,
XPathValidationStates.NOT_EXISTS: self._check_not_exists,
XPathValidationStates.VISIBLE: self._check_visible,
XPathValidationStates.NOT_VISIBLE: self._check_not_visible,
XPathValidationStates.TEXT_MATCHES: self._check_text_matches,
XPathValidationStates.NOT_TEXT_MATCHES: self._check_not_text_matches,
XPathValidationStates.HAS_PROPERTY: self._check_has_property,
XPathValidationStates.NOT_HAS_PROPERTY: self._check_not_has_property,
XPathValidationStates.PROPERTY_HAS_VALUE: self._check_property_has_value,
XPathValidationStates.NOT_PROPERTY_HAS_VALUE: self._check_not_property_has_value,
XPathValidationStates.HAS_ATTRIBUTE: self._check_has_attribute,
XPathValidationStates.NOT_HAS_ATTRIBUTE: self._check_not_has_attribute,
XPathValidationStates.ATTRIBUTE_HAS_VALUE: self._check_attribute_has_value,
XPathValidationStates.NOT_ATTRIBUTE_HAS_VALUE: self._check_not_attribute_has_value,
}
super().__init__(
ctx,
ValidationTypes.XPATH,
target,
state,
selector,
parameters=parameters,
driver=driver
)
def _find_all(self) -> List[WebElement]:
'''
Proxy method to find all elements specified by the _target attribute
Returns:
A list of all the elements found for that specific target, searched by XPath
Raises:
NoDriverException: If the driver is not currently bound to this step
'''
return self.driver.find_elements(By.XPATH, self.target)
def _element_text_matches_pattern(self) -> bool:
self._verify_parameters('pattern')
element_text = self.element.text
pattern = self.parameters['pattern']
return re.search(pattern, element_text) is not None
def _element_exists(self) -> bool:
return len(self._find_all()) > 0
def _element_visible(self) -> bool:
return self.element.is_displayed()
def _element_has_property(self) -> bool:
self._verify_parameters('name')
return self.element.get_property(self.parameters['name']) is not None
def _element_has_attribute(self) -> bool:
self._verify_parameters('name')
return self.element.get_attribute(self.parameters['name']) is not None
def _element_check_value(self, value_fetch_fn: Callable[[str], Optional[str]]) -> bool:
self._verify_parameters('name', 'value')
element_value = value_fetch_fn(self.parameters['name'])
return element_value == self.parameters['value']
def _element_property_has_value(self) -> bool:
return self._element_check_value(self.element.get_property)
def _element_attribute_has_value(self) -> bool:
return self._element_check_value(self.element.get_attribute)
def _check_exists(self) -> ValidationReport:
return self._create_report(
self._element_exists()
)
def _check_not_exists(self) -> ValidationReport:
return self._create_report(
not self._element_exists
)
def _check_visible(self) -> ValidationReport:
return self._create_report(
self._element_visible()
)
def _check_not_visible(self) -> ValidationReport:
return self._create_report(
not self._element_visible
)
def _check_text_matches(self) -> ValidationReport:
text_matches = self._element_text_matches_pattern()
msg = ''
if not text_matches:
msg = (
f'Element text "{self.element.text}" '
f'does not match pattern "{self._parameters["pattern"]}"'
)
return self._create_report(
text_matches,
msg
)
def _check_not_text_matches(self) -> ValidationReport:
text_matches = self._element_text_matches_pattern()
msg = ''
if text_matches:
msg = (
f'Element text "{self.element.text}" '
f'matches pattern "{self._parameters["pattern"]}"'
)
return self._create_report(
not text_matches,
msg
)
def _check_has_property(self) -> ValidationReport:
return self._create_report(
self._element_has_property()
)
def _check_not_has_property(self) -> ValidationReport:
return self._create_report(
not self._element_has_property()
)
def _check_has_attribute(self) -> ValidationReport:
return self._create_report(
self._element_has_attribute()
)
def _check_not_has_attribute(self) -> ValidationReport:
return self._create_report(
not self._element_has_attribute()
)
def _check_property_has_value(self) -> ValidationReport:
return self._create_report(
self._element_property_has_value()
)
def _check_not_property_has_value(self) -> ValidationReport:
return self._create_report(
not self._element_property_has_value()
)
def _check_attribute_has_value(self) -> ValidationReport:
return self._create_report(
self._element_attribute_has_value()
)
def _check_not_attribute_has_value(self) -> ValidationReport:
return self._create_report(
not self._element_attribute_has_value
)
class URLValidation(BaseValidation):
'''
Class defining the behaviour for performing URL validations
Args:
ctx: The runtime context for the application
target: The URL to perform the validation with
state: The desired state of the target url
driver: An optional argument to allow the driver to be bound at object creation.
'''
def __init__(
self,
ctx: Context,
target: str,
state: URLValidationStates,
parameters: Optional[Dict],
driver: Optional[WebDriver] = None,
) -> None:
selector: Dict[ValidationStates, Callable[[], ValidationReport]] = {
URLValidationStates.CONTAINS: self._check_contains,
URLValidationStates.NOT_CONTAINS: self._check_not_contains,
URLValidationStates.EQUALS: self._check_equals,
URLValidationStates.NOT_EQUALS: self._check_not_equals
}
super().__init__(
ctx,
ValidationTypes.URL,
target,
state,
selector=selector,
parameters=parameters,
driver=driver
)
def perform(self) -> ValidationReport:
'''
Performs the correct action based on what is defined within the selector, and returns
the resulting report produced.
Returns:
A report summarizing the results of the executed validation
'''
report = super().perform()
if not report.success:
report.msg = f'Expected URL: "{self._target}", Received URL "{self.url}"'
return report
@property
def url(self) -> str:
'''
The current URL of the browser
Raises:
NoDriverException: If the driver is not currently bound to this step
'''
return self.driver.current_url
def _check_contains(self) -> ValidationReport:
return self._create_report(
self.url.find(self.target) > -1
)
def _check_not_contains(self) -> ValidationReport:
return self._create_report(
self.url.find(self.target) == -1
)
def _check_equals(self) -> ValidationReport:
return self._create_report(
self.url == self.target
)
def _check_not_equals(self) -> ValidationReport:
return self._create_report(
self.url != self.target
)
class Validation(BaseStepFactory):
'''
Factory class for the different validations
'''
validation_selector: Dict[ValidationTypes, Type[BaseValidation]] = {
ValidationTypes.XPATH: XPathValidation,
ValidationTypes.URL: URLValidation,
}
@classmethod
def from_dict(
cls,
ctx: Context,
validation_dict: ValidationDictionary,
driver: Optional[WebDriver] = None
) -> BaseValidation:
'''
From a validation dict, produces the appropriate validation object
Args:
ctx: The runtime context for the application
validation_dict: A dictionary containing the definition of a validation, including
the target, state, and type of validation to be performed
driver: The driver that will be connected to the validation, if any
Returns:
Validation object of the type requested in the validation dictionary
'''
validation_params = {
'driver': driver,
'target': validation_dict['target'],
'state': validation_dict['state'],
'parameters': validation_dict.get('parameters', None),
}
validation_type = cast(ValidationTypes, validation_dict['type'])
validation = cls.validation_selector[validation_type]
return validation(ctx=ctx, **validation_params) # type: ignore

155
src/quilla/ui_validation.py Normal file
Просмотреть файл

@ -0,0 +1,155 @@
from typing import (
List,
Type,
Dict
)
import json
from quilla.ctx import Context
from quilla.common.enums import (
BrowserTargets,
UITestActions,
URLValidationStates,
ValidationStates,
ValidationTypes,
XPathValidationStates,
OutputSources
)
from quilla.steps.steps_aggregator import StepsAggregator
from quilla.browser.browser_validations import BrowserValidations
from quilla.reports.base_report import BaseReport
from quilla.reports.report_summary import ReportSummary
from quilla.common.utils import EnumResolver
# All UI Validations
class UIValidation(EnumResolver):
'''
A class to convert data into a valid UIValidation instance, which is able to resolve
raw text data into the appropriate enums to be used by the internal classes.
Creates shallow copies of all the steps to ensure independence
Args:
ctx: The runtime context for the application
browsers: A list of browsers to run the validations against
root: The starting path of the validations
setup_steps: a list of steps used to create the validation report
Attributes:
browsers: A list of instantiated browser validations, each containing an
independent steps aggregator
'''
validation_type_selector: Dict[ValidationTypes, Type[ValidationStates]] = {
ValidationTypes.XPATH: XPathValidationStates,
ValidationTypes.URL: URLValidationStates,
}
@classmethod
def from_json(cls, ctx: Context, validation_json: str) -> "UIValidation": # pragma: no cover
'''
Converts a json string into a UIValidation object
'''
return UIValidation.from_dict(ctx, json.loads(validation_json))
@classmethod
def from_file(cls, ctx: Context, fp) -> "UIValidation": # pragma: no cover
'''
Converts an fp (a .read() supporting file-like object) containing a json
document into a UIValidation object
'''
return UIValidation.from_dict(ctx, json.load(fp))
@classmethod
def from_filename(cls, ctx: Context, path: str) -> "UIValidation": # pragma: no cover
'''
Reads a file at the specified path and attempts to convert it into a
UIValidation object
'''
with open(path) as fp:
return UIValidation.from_file(ctx, fp)
@classmethod
def from_dict(cls, ctx: Context, validation_parameters: dict) -> "UIValidation":
'''
Converts a dictionary that represents a single UIValidation test case into
the appropriate validation object.
Note:
The browsers are effectively a cartesian product with the steps & validations
'''
root_path: str = validation_parameters['path']
definitions = validation_parameters.get('definitions', {})
ctx.load_definitions(definitions)
browsers: List[BrowserTargets] = []
for browser_name in validation_parameters['targetBrowsers']:
browsers.append(cls._name_to_enum(browser_name, BrowserTargets, ctx=ctx))
steps = validation_parameters['steps']
for step in steps:
action = step['action']
action = cls._name_to_enum(action, UITestActions)
step['action'] = action
if action == UITestActions.VALIDATE:
validation_type = step['type']
validation_state = step['state']
validation_type = cls._name_to_enum(validation_type, ValidationTypes, ctx=ctx)
state_enum = cls.validation_type_selector[validation_type]
validation_state = cls._name_to_enum(validation_state, state_enum, ctx=ctx)
step['type'] = validation_type
step['state'] = validation_state
if 'parameters' in step:
params = step['parameters']
if 'source' in params:
source = params['source']
params['source'] = cls._name_to_enum(source, OutputSources, ctx=ctx)
step['parameters'] = params
return UIValidation(
ctx,
browsers,
root_path,
steps,
)
def __init__(
self,
ctx: Context,
browsers: List[BrowserTargets],
root: str,
setup_steps: list,
):
self._steps = steps = StepsAggregator(ctx, setup_steps)
self.browsers: List[BrowserValidations] = []
for browser_target in browsers:
self.browsers.append(
BrowserValidations(
ctx,
browser_target,
root,
steps.copy(),
)
)
def validate_all(self) -> ReportSummary:
'''
Performs all the setup test steps required for each test case
and executes the validations, producing a set of validation
reports.
'''
validation_reports: List[BaseReport] = []
for browser in self.browsers:
validation_reports.extend(browser.validate())
return ReportSummary(validation_reports)

19
test-requirements.txt Normal file
Просмотреть файл

@ -0,0 +1,19 @@
attrs==21.2.0
coverage==5.5
flake8==3.9.2
iniconfig==1.1.1
mccabe==0.6.1
mypy==0.910
mypy-extensions==0.4.3
packaging==20.9
pluggy==0.13.1
py==1.10.0
pycodestyle==2.7.0
pyflakes==2.3.1
pyparsing==2.4.7
pytest==6.2.4
pytest-cov==2.12.1
pytest-sugar==0.9.4
termcolor==1.1.0
toml==0.10.2
typing-extensions==3.10.0.0

Просмотреть файл

@ -0,0 +1,22 @@
import pytest
from quilla.ctx import Context
from quilla.browser.drivers import (
FirefoxBrowser
)
@pytest.mark.browser
@pytest.mark.firefox
@pytest.mark.slow
class FirefoxBrowserTests:
def test_runs_headless(self, ctx: Context):
'''
Only test that the default behaviour is to run headless, since the testing environment
might not support a display
'''
browser = FirefoxBrowser(ctx)
assert browser.capabilities['moz:headless'] is True
browser.quit()

53
tests/conftest.py Normal file
Просмотреть файл

@ -0,0 +1,53 @@
from unittest.mock import Mock
from typing import List
import pytest
from _pytest.config import PytestPluginManager
from _pytest.nodes import Item
from _pytest.config import Config
from selenium.webdriver.remote.webdriver import WebDriver
from quilla import get_plugin_manager
from quilla.ctx import Context
from pytest_quilla.pytest_classes import (
collect_file
)
@pytest.fixture()
def ctx(driver: WebDriver, plugin_manager):
'''
Ensures every test that requires a context gets its own isolated context
'''
mock_ctx = Context(plugin_manager)
driver.name = mock_ctx._output_browser
mock_ctx.driver = driver
return mock_ctx
@pytest.fixture()
def plugin_manager(pytestconfig: Config):
pm = get_plugin_manager(pytestconfig.rootpath)
return pm
@pytest.fixture()
def driver():
mock_driver = Mock(spec=WebDriver)
return mock_driver
def pytest_addoption(parser, pluginmanager: PytestPluginManager):
pluginmanager.set_blocked('quilla')
parser.addoption(
"--quilla-opts",
action="store",
default="",
help="Options to be passed through to the quilla runtime for the scenario tests"
)
def pytest_collect_file(parent, path):
return collect_file(parent, path, 'test')

Просмотреть файл

@ -0,0 +1,22 @@
{
"markers": ["integration", "slow"],
"definitions": {
"WelcomePage": {
"SignInButtonDiv": "//div[@class='position-relative mr-3']"
}
},
"targetBrowsers": ["Firefox"],
"path": "https://github.com",
"steps": [
{
"action": "Validate",
"type": "XPath",
"state": "AttributeHasValue",
"target": "${{ Definitions.WelcomePage.SignInButtonDiv }}",
"parameters": {
"name": "class",
"value": "position-relative mr-3"
}
}
]
}

Просмотреть файл

@ -0,0 +1,21 @@
{
"markers": ["integration", "quilla", "slow"],
"targetBrowsers": ["Firefox"],
"path": "https://bing.com",
"steps": [
{
"action": "OutputValue",
"target": "bing",
"parameters": {
"source": "Literal",
"outputName": "url_contains"
}
},
{
"action": "Validate",
"type": "URL",
"state": "Contains",
"target": "${{ Validation.url_contains }}"
}
]
}

Просмотреть файл

@ -0,0 +1,21 @@
{
"markers": ["integration", "slow"],
"definitions": {
"WelcomePage": {
"SignInButtonDiv": "//div[@class='position-relative mr-3']"
}
},
"targetBrowsers": ["Firefox"],
"path": "https://github.com",
"steps": [
{
"action": "Validate",
"type": "XPath",
"state": "HasAttribute",
"target": "${{ Definitions.WelcomePage.SignInButtonDiv }}",
"parameters": {
"name": "class"
}
}
]
}

Просмотреть файл

@ -0,0 +1,21 @@
{
"markers": ["integration", "slow"],
"definitions": {
"WelcomePage": {
"SignInButtonDiv": "//div[@class='position-relative mr-3']"
}
},
"targetBrowsers": ["Firefox"],
"path": "https://github.com",
"steps": [
{
"action": "Validate",
"type": "XPath",
"state": "HasProperty",
"target": "${{ Definitions.WelcomePage.SignInButtonDiv }}",
"parameters": {
"name": "className"
}
}
]
}

Просмотреть файл

@ -0,0 +1,50 @@
{
"markers": ["integration", "quilla", "slow"],
"targetBrowsers": ["Firefox"],
"path": "https://techstepacademy.com/trial-of-the-stones",
"steps": [
{
"action": "Validate",
"type": "URL",
"state": "Equals",
"target": "https://techstepacademy.com/trial-of-the-stones"
},
{
"action": "NavigateTo",
"target": "https://bing.com"
},
{
"action": "Validate",
"type": "URL",
"state": "Contains",
"target": "bing.com"
},
{
"action": "NavigateBack"
},
{
"action": "Validate",
"type": "URL",
"state": "Equals",
"target": "https://techstepacademy.com/trial-of-the-stones"
},
{
"action": "NavigateForward"
},
{
"action": "Validate",
"type": "URL",
"state": "Contains",
"target": "bing.com"
},
{
"action": "Refresh"
},
{
"action": "Validate",
"type": "URL",
"state": "Contains",
"target": "bing.com"
}
]
}

Просмотреть файл

@ -0,0 +1,13 @@
{
"markers": ["integration", "quilla", "slow"],
"targetBrowsers": ["Firefox"],
"path": "https://bing.com",
"steps": [
{
"action": "Validate",
"type": "URL",
"state": "Contains",
"target": "bing"
}
]
}

Просмотреть файл

@ -0,0 +1,22 @@
{
"markers": ["integration", "slow"],
"definitions": {
"WelcomePage": {
"SignInButtonDiv": "//div[@class='position-relative mr-3']"
}
},
"targetBrowsers": ["Firefox"],
"path": "https://github.com",
"steps": [
{
"action": "Validate",
"type": "XPath",
"state": "PropertyHasValue",
"target": "${{ Definitions.WelcomePage.SignInButtonDiv }}",
"parameters": {
"name": "className",
"value": "position-relative mr-3"
}
}
]
}

Просмотреть файл

@ -0,0 +1,21 @@
{
"markers": ["integration", "quilla", "slow"],
"targetBrowsers": ["Firefox"],
"path": "https://bing.com",
"steps": [
{
"action": "OutputValue",
"target": "${{ Driver.title }}",
"parameters": {
"source": "Literal",
"outputName": "site_title"
}
},
{
"action": "Validate",
"target": "${{ Validation.site_title }}",
"type": "URL",
"state": "Contains"
}
]
}

Просмотреть файл

@ -0,0 +1,19 @@
{
"markers": ["integration", "quilla", "slow"],
"targetBrowsers": ["Firefox"],
"path": "https://techstepacademy.com/trial-of-the-stones",
"steps": [
{
"action": "SendKeys",
"target": "//input[@id='r1Input']",
"parameters": {
"data": "rock"
}
},
{
"action": "Click",
"target": "//button[@id='r1Btn']"
}
]
}

Просмотреть файл

@ -0,0 +1,21 @@
{
"markers": ["integration", "slow"],
"definitions": {
"WelcomePage": {
"SignInButton": "//div[@class='position-relative mr-3']/a"
}
},
"targetBrowsers": ["Firefox"],
"path": "https://github.com",
"steps": [
{
"action": "Validate",
"type": "XPath",
"state": "TextMatches",
"target": "${{ Definitions.WelcomePage.SignInButton }}",
"parameters": {
"pattern": "Sign in"
}
}
]
}

Просмотреть файл

@ -0,0 +1,21 @@
{
"markers": ["integration", "slow"],
"definitions": {
"WelcomePage": {
"SignInButton": "//div[@class='position-relative mr-3']/a"
}
},
"targetBrowsers": ["Firefox"],
"path": "https://github.com",
"steps": [
{
"action": "Validate",
"type": "XPath",
"state": "TextMatches",
"target": "${{ Definitions.WelcomePage.SignInButton }}",
"parameters": {
"pattern": "[Ss]ign[ -]?[Ii]n"
}
}
]
}

Просмотреть файл

@ -0,0 +1,68 @@
{
"definitions": {
"RiddleInput": "//input[@id='r1Input']",
"PasswordInput": "//input[@id='r2Input']",
"MerchantInput": "//input[@id='r3Input']",
"RiddleSubmitButton": "//button[@id='r1Btn']",
"PasswordSubmitButton": "//button[@id='r2Butn']",
"MerchantSubmitButton": "//button[@id='r3Butn']",
"TrialSubmitButton": "//button[@id='checkButn']",
"PasswordBanner": "//div[@id='passwordBanner']",
"TrialCompleteBanner": "//div[@id='trialCompleteBanner']"
},
"targetBrowsers": ["Firefox"],
"path": "https://techstepacademy.com/trial-of-the-stones",
"steps": [
{
"action": "SendKeys",
"target": "${{ Definitions.RiddleInput }}",
"parameters": {
"data": "rock"
}
},
{
"action": "Click",
"target": "${{ Definitions.RiddleSubmitButton }}"
},
{
"action": "OutputValue",
"target": "${{ Definitions.PasswordBanner }}",
"parameters": {
"source": "XPathText",
"outputName": "trialPassword"
}
},
{
"action": "SendKeys",
"target": "${{ Definitions.PasswordInput }}",
"parameters": {
"data": "${{ Validation.trialPassword }}"
}
},
{
"action": "Click",
"target": "${{ Definitions.PasswordSubmitButton }}"
},
{
"action": "SendKeys",
"target": "${{ Definitions.MerchantInput }}",
"parameters": {
"data": "Jessica"
}
},
{
"action": "Click",
"target": "${{ Definitions.MerchantSubmitButton }}"
},
{
"action": "Click",
"target": "${{ Definitions.TrialSubmitButton }}"
},
{
"action": "Validate",
"type": "XPath",
"state": "Visible",
"target": "${{ Definitions.TrialCompleteBanner }}"
}
]
}

Просмотреть файл

@ -0,0 +1,24 @@
{
"markers": ["integration", "quilla", "slow"],
"targetBrowsers": ["Firefox"],
"path": "https://techstepacademy.com/trial-of-the-stones",
"steps": [
{
"action": "SendKeys",
"target": "//input[@id='r1Input']",
"parameters": {
"data": "rock"
}
},
{
"action": "Click",
"target": "//button[@id='r1Btn']"
},
{
"action": "Validate",
"type": "XPath",
"target": "//div[@id='passwordBanner']",
"state": "Exists"
}
]
}

137
tests/test_ctx.py Normal file
Просмотреть файл

@ -0,0 +1,137 @@
import os
import pytest
from pluggy import PluginManager
from quilla.ctx import (
get_default_context,
Context
)
from quilla.common.exceptions import (
InvalidContextExpressionException,
InvalidOutputName,
)
@pytest.mark.smoke
@pytest.mark.ctx
class ContextTests:
@pytest.mark.unit
def test_default_context_singleton(self, plugin_manager: PluginManager):
'''
Ensures that the `get_default_context` returns the same object every time
'''
ctx = get_default_context(plugin_manager)
other_ctx = get_default_context(plugin_manager)
assert ctx is other_ctx
@pytest.mark.unit
@pytest.mark.parametrize('debug_opt', [True, False])
def test_context_sets_debug_options(self, ctx: Context, debug_opt: bool):
'''
Ensures that setting the is_debug parameter properly sets all debug options
'''
ctx.is_debug = debug_opt
assert ctx.suppress_exceptions is not debug_opt
assert ctx.run_headless is not debug_opt
assert ctx.close_browser is not debug_opt
assert ctx._debug is debug_opt
assert ctx.is_debug is debug_opt
@pytest.mark.unit
def test_context_sets_path_var(self):
'''
Ensures that setting the drivers_path property updates the system
PATH variable
'''
ctx = Context(
None,
drivers_path='/some/path'
)
assert ctx.drivers_path == '/some/path'
assert os.environ['PATH'].find('/some/path') > -1
@pytest.mark.unit
@pytest.mark.parametrize('expression', [
'some_text',
'some_other_text',
'some_text_with_(parenthesis)',
'$ some_text_with_$',
'${ some_text_with_more_characters',
'${ incorrect expression }',
])
def test_context_replacement_returns_same_on_no_ctx_expression(
self,
ctx: Context,
expression: str
):
'''
Ensures that strings with no context expressions are not altered
by the perform_replacements function
'''
assert ctx.perform_replacements(expression) == expression
@pytest.mark.parametrize('output_name', [
'my_output',
'my.output',
'my.deeply.nested.output',
'my.deeply.nested_output'
])
@pytest.mark.parametrize('output_value', [
'some_text',
'some_other_text',
'this_is_just_some_value_I_guess',
'some_other_test_value',
])
@pytest.mark.parametrize('context_obj,setup_func', [
('Validation', lambda ctx, x, y: ctx.create_output(x, y)),
('Environment', lambda _, x, y: os.environ.update({x: y})),
])
def test_context_uses_output_expression(
self,
ctx: Context,
output_name: str,
output_value: str,
context_obj: str,
setup_func,
):
'''
Ensures that the perform_replacements function can adequately
retrieve outputed & environment values
'''
setup_func(ctx, output_name, output_value)
context_expression = '${{ %s.%s }}' % (context_obj, output_name)
assert ctx.perform_replacements(context_expression) == output_value
def test_context_can_create_nonstr_output(self, ctx: Context):
'''
Ensures that it is possible to create non-string outputs, but they will return as
strings
'''
ctx.create_output('some_value', 3)
assert ctx.perform_replacements('${{ Validation.some_value }}') == '3'
def test_context_errors_on_invalid_expression(self, ctx: Context):
'''
Ensures that attempting to reference values that don't exist will cause an error
'''
with pytest.raises(InvalidContextExpressionException):
ctx.perform_replacements('${{ Validation.some.nonexistent_value }}')
@pytest.mark.parametrize('nested_output_name', [
'nested_value',
'deeply.nested.value'
])
def test_context_expression_errors_on_used_name(
self,
ctx: Context,
nested_output_name: str
):
ctx.create_output('some_output', 'some_output')
with pytest.raises(InvalidOutputName):
ctx.create_output('some_output.%s' % nested_output_name, 'some_output')

64
tests/test_utils.py Normal file
Просмотреть файл

@ -0,0 +1,64 @@
from enum import Enum
from typing import Type
import pytest
from quilla.common.utils import (
DriverHolder,
EnumResolver
)
from quilla.common.exceptions import (
NoDriverException,
EnumValueNotFoundException
)
from quilla.common import enums
@pytest.mark.smoke
@pytest.mark.util
class UtilTests:
def test_driverholder_exception(self):
'''
Tests that any driver holder will cause an exception when trying
to access a driver when none are set.
'''
holder = DriverHolder()
assert holder._driver is None
with pytest.raises(NoDriverException):
holder.driver
def test_driverholder_returns_expected(self):
holder = DriverHolder()
holder.driver = "Some Value"
assert holder.driver == "Some Value"
@pytest.mark.parametrize("enum_type", [
enums.UITestActions,
enums.XPathValidationStates,
enums.URLValidationStates,
enums.ReportType,
enums.BrowserTargets,
enums.OutputSources
])
def test_enumresolver_can_resolve_expected(self, enum_type: Type[Enum]):
resolver = EnumResolver()
for val in enum_type:
assert resolver._name_to_enum(val.value, enum_type) is val
@pytest.mark.parametrize("enum_type", [
enums.UITestActions,
enums.XPathValidationStates,
enums.URLValidationStates,
enums.ReportType,
enums.BrowserTargets,
enums.OutputSources
])
def test_enumresolver_raises_exception_if_no_result(self, enum_type: Type[Enum]):
resolver = EnumResolver()
with pytest.raises(EnumValueNotFoundException):
resolver._name_to_enum('', enum_type)