Backed out 4 changesets (bug 1635260) for bustages on metrics.yaml . CLOSED TREE

Backed out changeset 40fca1886746 (bug 1635260)
Backed out changeset 71095f700b94 (bug 1635260)
Backed out changeset 320c91b98262 (bug 1635260)
Backed out changeset 62dd07b3dcda (bug 1635260)
This commit is contained in:
Narcis Beleuzu 2020-07-28 00:48:21 +03:00
Родитель 4fd5e1a2d3
Коммит 4520632fe0
439 изменённых файлов: 1122 добавлений и 39632 удалений

33
third_party/python/Jinja2/CHANGES.rst поставляемый
Просмотреть файл

@ -1,38 +1,5 @@
.. currentmodule:: jinja2
Version 2.11.2
--------------
Released 2020-04-13
- Fix a bug that caused callable objects with ``__getattr__``, like
:class:`~unittest.mock.Mock` to be treated as a
:func:`contextfunction`. :issue:`1145`
- Update ``wordcount`` filter to trigger :class:`Undefined` methods
by wrapping the input in :func:`soft_unicode`. :pr:`1160`
- Fix a hang when displaying tracebacks on Python 32-bit.
:issue:`1162`
- Showing an undefined error for an object that raises
``AttributeError`` on access doesn't cause a recursion error.
:issue:`1177`
- Revert changes to :class:`~loaders.PackageLoader` from 2.10 which
removed the dependency on setuptools and pkg_resources, and added
limited support for namespace packages. The changes caused issues
when using Pytest. Due to the difficulty in supporting Python 2 and
:pep:`451` simultaneously, the changes are reverted until 3.0.
:pr:`1182`
- Fix line numbers in error messages when newlines are stripped.
:pr:`1178`
- The special ``namespace()`` assignment object in templates works in
async environments. :issue:`1180`
- Fix whitespace being removed before tags in the middle of lines when
``lstrip_blocks`` is enabled. :issue:`1138`
- :class:`~nativetypes.NativeEnvironment` doesn't evaluate
intermediate strings during rendering. This prevents early
evaluation which could change the value of an expression.
:issue:`1186`
Version 2.11.1
--------------

2
third_party/python/Jinja2/PKG-INFO поставляемый
Просмотреть файл

@ -1,6 +1,6 @@
Metadata-Version: 2.1
Name: Jinja2
Version: 2.11.2
Version: 2.11.1
Summary: A very fast and expressive template engine.
Home-page: https://palletsprojects.com/p/jinja/
Author: Armin Ronacher

Просмотреть файл

@ -1,18 +0,0 @@
from __future__ import print_function
from jinja2 import Environment
env = Environment(
line_statement_prefix="#", variable_start_string="${", variable_end_string="}"
)
print(
env.from_string(
"""\
<ul>
# for item in range(10)
<li class="${loop.cycle('odd', 'even')}">${item}</li>
# endfor
</ul>\
"""
).render()
)

Просмотреть файл

@ -1,8 +0,0 @@
from __future__ import print_function
from jinja2 import Environment
from jinja2.loaders import FileSystemLoader
env = Environment(loader=FileSystemLoader("templates"))
tmpl = env.get_template("broken.html")
print(tmpl.render(seq=[3, 2, 4, 5, 3, 2, 0, 2, 1]))

Просмотреть файл

@ -1,15 +0,0 @@
from __future__ import print_function
from jinja2 import Environment
from jinja2.loaders import DictLoader
env = Environment(
loader=DictLoader(
{
"a": "[A[{% block body %}{% endblock %}]]",
"b": "{% extends 'a' %}{% block body %}[B]{% endblock %}",
"c": "{% extends 'b' %}{% block body %}###{{ super() }}###{% endblock %}",
}
)
)
print(env.get_template("c").render())

Просмотреть файл

@ -1,6 +0,0 @@
{% from 'subbroken.html' import may_break %}
<ul>
{% for item in seq %}
<li>{{ may_break(item) }}</li>
{% endfor %}
</ul>

Просмотреть файл

@ -1,3 +0,0 @@
{% macro may_break(item) -%}
[{{ item / 0 }}]
{%- endmacro %}

Просмотреть файл

@ -1,31 +0,0 @@
from __future__ import print_function
from jinja2 import Environment
from jinja2.loaders import DictLoader
env = Environment(
loader=DictLoader(
{
"child.html": u"""\
{% extends master_layout or 'master.html' %}
{% include helpers = 'helpers.html' %}
{% macro get_the_answer() %}42{% endmacro %}
{% title = 'Hello World' %}
{% block body %}
{{ get_the_answer() }}
{{ helpers.conspirate() }}
{% endblock %}
""",
"master.html": u"""\
<!doctype html>
<title>{{ title }}</title>
{% block body %}{% endblock %}
""",
"helpers.html": u"""\
{% macro conspirate() %}23{% endmacro %}
""",
}
)
)
tmpl = env.get_template("child.html")
print(tmpl.render())

Просмотреть файл

@ -1,29 +0,0 @@
from __future__ import print_function
from jinja2 import Environment
env = Environment(
line_statement_prefix="%", variable_start_string="${", variable_end_string="}"
)
tmpl = env.from_string(
"""\
% macro foo()
${caller(42)}
% endmacro
<ul>
% for item in seq
<li>${item}</li>
% endfor
</ul>
% call(var) foo()
[${var}]
% endcall
% filter escape
<hello world>
% for item in [1, 2, 3]
- ${item}
% endfor
% endfilter
"""
)
print(tmpl.render(seq=range(10)))

Просмотреть файл

@ -1,15 +0,0 @@
from __future__ import print_function
from jinja2 import Environment
tmpl = Environment().from_string(
"""\
<ul>
{%- for item in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] if item % 2 == 0 %}
<li>{{ loop.index }} / {{ loop.length }}: {{ item }}</li>
{%- endfor %}
</ul>
if condition: {{ 1 if foo else 0 }}
"""
)
print(tmpl.render(foo=True))

Просмотреть файл

@ -1,20 +0,0 @@
from __future__ import print_function
from jinja2 import Environment
env = Environment(extensions=["jinja2.ext.i18n"])
env.globals["gettext"] = {"Hello %(user)s!": "Hallo %(user)s!"}.__getitem__
env.globals["ngettext"] = lambda s, p, n: {
"%(count)s user": "%(count)d Benutzer",
"%(count)s users": "%(count)d Benutzer",
}[n == 1 and s or p]
print(
env.from_string(
"""\
{% trans %}Hello {{ user }}!{% endtrans %}
{% trans count=users|count -%}
{{ count }} user{% pluralize %}{{ count }} users
{% endtrans %}
"""
).render(user="someone", users=[1, 2, 3])
)

Просмотреть файл

@ -41,4 +41,4 @@ from .utils import evalcontextfunction
from .utils import is_undefined
from .utils import select_autoescape
__version__ = "2.11.2"
__version__ = "2.11.1"

Просмотреть файл

@ -26,16 +26,17 @@ async def async_select_or_reject(args, kwargs, modfunc, lookup_attr):
def dualfilter(normal_filter, async_filter):
wrap_evalctx = False
if getattr(normal_filter, "environmentfilter", False) is True:
if getattr(normal_filter, "environmentfilter", False):
def is_async(args):
return args[0].is_async
wrap_evalctx = False
else:
has_evalctxfilter = getattr(normal_filter, "evalcontextfilter", False) is True
has_ctxfilter = getattr(normal_filter, "contextfilter", False) is True
wrap_evalctx = not has_evalctxfilter and not has_ctxfilter
if not getattr(normal_filter, "evalcontextfilter", False) and not getattr(
normal_filter, "contextfilter", False
):
wrap_evalctx = True
def is_async(args):
return args[0].environment.is_async

Просмотреть файл

@ -1307,13 +1307,13 @@ class CodeGenerator(NodeVisitor):
def finalize(value):
return default(env_finalize(value))
if getattr(env_finalize, "contextfunction", False) is True:
if getattr(env_finalize, "contextfunction", False):
src += "context, "
finalize = None # noqa: F811
elif getattr(env_finalize, "evalcontextfunction", False) is True:
elif getattr(env_finalize, "evalcontextfunction", False):
src += "context.eval_ctx, "
finalize = None
elif getattr(env_finalize, "environmentfunction", False) is True:
elif getattr(env_finalize, "environmentfunction", False):
src += "environment, "
def finalize(value):
@ -1689,11 +1689,11 @@ class CodeGenerator(NodeVisitor):
func = self.environment.filters.get(node.name)
if func is None:
self.fail("no filter named %r" % node.name, node.lineno)
if getattr(func, "contextfilter", False) is True:
if getattr(func, "contextfilter", False):
self.write("context, ")
elif getattr(func, "evalcontextfilter", False) is True:
elif getattr(func, "evalcontextfilter", False):
self.write("context.eval_ctx, ")
elif getattr(func, "environmentfilter", False) is True:
elif getattr(func, "environmentfilter", False):
self.write("environment, ")
# if the filter node is None we are inside a filter block

Просмотреть файл

@ -245,7 +245,10 @@ else:
class _CTraceback(ctypes.Structure):
_fields_ = [
# Extra PyObject slots when compiled with Py_TRACE_REFS.
("PyObject_HEAD", ctypes.c_byte * object().__sizeof__()),
(
"PyObject_HEAD",
ctypes.c_byte * (32 if hasattr(sys, "getobjects") else 16),
),
# Only care about tb_next as an object, not a traceback.
("tb_next", ctypes.py_object),
]

Просмотреть файл

@ -492,20 +492,20 @@ class Environment(object):
if func is None:
fail_for_missing_callable("no filter named %r", name)
args = [value] + list(args or ())
if getattr(func, "contextfilter", False) is True:
if getattr(func, "contextfilter", False):
if context is None:
raise TemplateRuntimeError(
"Attempted to invoke context filter without context"
)
args.insert(0, context)
elif getattr(func, "evalcontextfilter", False) is True:
elif getattr(func, "evalcontextfilter", False):
if eval_ctx is None:
if context is not None:
eval_ctx = context.eval_ctx
else:
eval_ctx = EvalContext(self)
args.insert(0, eval_ctx)
elif getattr(func, "environmentfilter", False) is True:
elif getattr(func, "environmentfilter", False):
args.insert(0, self)
return func(*args, **(kwargs or {}))

Просмотреть файл

@ -761,7 +761,7 @@ def do_wordwrap(
def do_wordcount(s):
"""Count the words in that string."""
return len(_word_re.findall(soft_unicode(s)))
return len(_word_re.findall(s))
def do_int(value, default=0, base=10):

21
third_party/python/Jinja2/src/jinja2/lexer.py поставляемый
Просмотреть файл

@ -681,8 +681,6 @@ class Lexer(object):
source_length = len(source)
balancing_stack = []
lstrip_unless_re = self.lstrip_unless_re
newlines_stripped = 0
line_starting = True
while 1:
# tokenizer loop
@ -719,9 +717,7 @@ class Lexer(object):
if strip_sign == "-":
# Strip all whitespace between the text and the tag.
stripped = text.rstrip()
newlines_stripped = text[len(stripped) :].count("\n")
groups = (stripped,) + groups[1:]
groups = (text.rstrip(),) + groups[1:]
elif (
# Not marked for preserving whitespace.
strip_sign != "+"
@ -732,11 +728,11 @@ class Lexer(object):
):
# The start of text between the last newline and the tag.
l_pos = text.rfind("\n") + 1
if l_pos > 0 or line_starting:
# If there's only whitespace between the newline and the
# tag, strip it.
if not lstrip_unless_re.search(text, l_pos):
groups = (text[:l_pos],) + groups[1:]
# If there's only whitespace between the newline and the
# tag, strip it.
if not lstrip_unless_re.search(text, l_pos):
groups = (text[:l_pos],) + groups[1:]
for idx, token in enumerate(tokens):
# failure group
@ -762,8 +758,7 @@ class Lexer(object):
data = groups[idx]
if data or token not in ignore_if_empty:
yield lineno, token, data
lineno += data.count("\n") + newlines_stripped
newlines_stripped = 0
lineno += data.count("\n")
# strings as token just are yielded as it.
else:
@ -795,8 +790,6 @@ class Lexer(object):
yield lineno, tokens, data
lineno += data.count("\n")
line_starting = m.group()[-1:] == "\n"
# fetch new position into new variable so that we can check
# if there is a internal parsing error which would result
# in an infinite loop

Просмотреть файл

@ -3,9 +3,11 @@
sources.
"""
import os
import pkgutil
import sys
import weakref
from hashlib import sha1
from importlib import import_module
from os import path
from types import ModuleType
@ -215,75 +217,141 @@ class FileSystemLoader(BaseLoader):
class PackageLoader(BaseLoader):
"""Load templates from python eggs or packages. It is constructed with
the name of the python package and the path to the templates in that
package::
"""Load templates from a directory in a Python package.
loader = PackageLoader('mypackage', 'views')
:param package_name: Import name of the package that contains the
template directory.
:param package_path: Directory within the imported package that
contains the templates.
:param encoding: Encoding of template files.
If the package path is not given, ``'templates'`` is assumed.
The following example looks up templates in the ``pages`` directory
within the ``project.ui`` package.
Per default the template encoding is ``'utf-8'`` which can be changed
by setting the `encoding` parameter to something else. Due to the nature
of eggs it's only possible to reload templates if the package was loaded
from the file system and not a zip file.
.. code-block:: python
loader = PackageLoader("project.ui", "pages")
Only packages installed as directories (standard pip behavior) or
zip/egg files (less common) are supported. The Python API for
introspecting data in packages is too limited to support other
installation methods the way this loader requires.
There is limited support for :pep:`420` namespace packages. The
template directory is assumed to only be in one namespace
contributor. Zip files contributing to a namespace are not
supported.
.. versionchanged:: 2.11.0
No longer uses ``setuptools`` as a dependency.
.. versionchanged:: 2.11.0
Limited PEP 420 namespace package support.
"""
def __init__(self, package_name, package_path="templates", encoding="utf-8"):
from pkg_resources import DefaultProvider
from pkg_resources import get_provider
from pkg_resources import ResourceManager
if package_path == os.path.curdir:
package_path = ""
elif package_path[:2] == os.path.curdir + os.path.sep:
package_path = package_path[2:]
provider = get_provider(package_name)
self.encoding = encoding
self.manager = ResourceManager()
self.filesystem_bound = isinstance(provider, DefaultProvider)
self.provider = provider
package_path = os.path.normpath(package_path).rstrip(os.path.sep)
self.package_path = package_path
self.package_name = package_name
self.encoding = encoding
# Make sure the package exists. This also makes namespace
# packages work, otherwise get_loader returns None.
import_module(package_name)
self._loader = loader = pkgutil.get_loader(package_name)
# Zip loader's archive attribute points at the zip.
self._archive = getattr(loader, "archive", None)
self._template_root = None
if hasattr(loader, "get_filename"):
# A standard directory package, or a zip package.
self._template_root = os.path.join(
os.path.dirname(loader.get_filename(package_name)), package_path
)
elif hasattr(loader, "_path"):
# A namespace package, limited support. Find the first
# contributor with the template directory.
for root in loader._path:
root = os.path.join(root, package_path)
if os.path.isdir(root):
self._template_root = root
break
if self._template_root is None:
raise ValueError(
"The %r package was not installed in a way that"
" PackageLoader understands." % package_name
)
def get_source(self, environment, template):
pieces = split_template_path(template)
p = "/".join((self.package_path,) + tuple(pieces))
p = os.path.join(self._template_root, *split_template_path(template))
if not self.provider.has_resource(p):
raise TemplateNotFound(template)
if self._archive is None:
# Package is a directory.
if not os.path.isfile(p):
raise TemplateNotFound(template)
filename = uptodate = None
with open(p, "rb") as f:
source = f.read()
if self.filesystem_bound:
filename = self.provider.get_resource_filename(self.manager, p)
mtime = path.getmtime(filename)
mtime = os.path.getmtime(p)
def uptodate():
try:
return path.getmtime(filename) == mtime
except OSError:
return False
def up_to_date():
return os.path.isfile(p) and os.path.getmtime(p) == mtime
source = self.provider.get_resource_string(self.manager, p)
return source.decode(self.encoding), filename, uptodate
else:
# Package is a zip file.
try:
source = self._loader.get_data(p)
except OSError:
raise TemplateNotFound(template)
# Could use the zip's mtime for all template mtimes, but
# would need to safely reload the module if it's out of
# date, so just report it as always current.
up_to_date = None
return source.decode(self.encoding), p, up_to_date
def list_templates(self):
path = self.package_path
if path[:2] == "./":
path = path[2:]
elif path == ".":
path = ""
offset = len(path)
results = []
def _walk(path):
for filename in self.provider.resource_listdir(path):
fullname = path + "/" + filename
if self._archive is None:
# Package is a directory.
offset = len(self._template_root)
if self.provider.resource_isdir(fullname):
_walk(fullname)
else:
results.append(fullname[offset:].lstrip("/"))
for dirpath, _, filenames in os.walk(self._template_root):
dirpath = dirpath[offset:].lstrip(os.path.sep)
results.extend(
os.path.join(dirpath, name).replace(os.path.sep, "/")
for name in filenames
)
else:
if not hasattr(self._loader, "_files"):
raise TypeError(
"This zip import does not have the required"
" metadata to list templates."
)
# Package is a zip file.
prefix = (
self._template_root[len(self._archive) :].lstrip(os.path.sep)
+ os.path.sep
)
offset = len(prefix)
for name in self._loader._files.keys():
# Find names under the templates directory that aren't directories.
if name.startswith(prefix) and name[-1] != os.path.sep:
results.append(name[offset:].replace(os.path.sep, "/"))
_walk(path)
results.sort()
return results

Просмотреть файл

@ -1,3 +1,4 @@
import types
from ast import literal_eval
from itertools import chain
from itertools import islice
@ -10,7 +11,7 @@ from .environment import Environment
from .environment import Template
def native_concat(nodes):
def native_concat(nodes, preserve_quotes=True):
"""Return a native Python type from the list of compiled nodes. If
the result is a single node, its value is returned. Otherwise, the
nodes are concatenated as strings. If the result can be parsed with
@ -18,6 +19,9 @@ def native_concat(nodes):
the string is returned.
:param nodes: Iterable of nodes to concatenate.
:param preserve_quotes: Whether to re-wrap literal strings with
quotes, to preserve quotes around expressions for later parsing.
Should be ``False`` in :meth:`NativeEnvironment.render`.
"""
head = list(islice(nodes, 2))
@ -27,17 +31,29 @@ def native_concat(nodes):
if len(head) == 1:
raw = head[0]
else:
raw = u"".join([text_type(v) for v in chain(head, nodes)])
if isinstance(nodes, types.GeneratorType):
nodes = chain(head, nodes)
raw = u"".join([text_type(v) for v in nodes])
try:
return literal_eval(raw)
literal = literal_eval(raw)
except (ValueError, SyntaxError, MemoryError):
return raw
# If literal_eval returned a string, re-wrap with the original
# quote character to avoid dropping quotes between expression nodes.
# Without this, "'{{ a }}', '{{ b }}'" results in "a, b", but should
# be ('a', 'b').
if preserve_quotes and isinstance(literal, str):
return "{quote}{}{quote}".format(literal, quote=raw[0])
return literal
class NativeCodeGenerator(CodeGenerator):
"""A code generator which renders Python types by not adding
``to_string()`` around output nodes.
``to_string()`` around output nodes, and using :func:`native_concat`
to convert complex strings back to Python types if possible.
"""
@staticmethod
@ -45,7 +61,7 @@ class NativeCodeGenerator(CodeGenerator):
return value
def _output_const_repr(self, group):
return repr(u"".join([text_type(v) for v in group]))
return repr(native_concat(group))
def _output_child_to_const(self, node, frame, finalize):
const = node.as_const(frame.eval_ctx)
@ -84,9 +100,10 @@ class NativeTemplate(Template):
Otherwise, the string is returned.
"""
vars = dict(*args, **kwargs)
try:
return native_concat(self.root_render_func(self.new_context(vars)))
return native_concat(
self.root_render_func(self.new_context(vars)), preserve_quotes=False
)
except Exception:
return self.environment.handle_exception()

Просмотреть файл

@ -671,7 +671,7 @@ class Filter(Expr):
# python 3. because of that, do not rename filter_ to filter!
filter_ = self.environment.filters.get(self.name)
if filter_ is None or getattr(filter_, "contextfilter", False) is True:
if filter_ is None or getattr(filter_, "contextfilter", False):
raise Impossible()
# We cannot constant handle async filters, so we need to make sure
@ -684,9 +684,9 @@ class Filter(Expr):
args, kwargs = args_as_const(self, eval_ctx)
args.insert(0, self.node.as_const(eval_ctx))
if getattr(filter_, "evalcontextfilter", False) is True:
if getattr(filter_, "evalcontextfilter", False):
args.insert(0, eval_ctx)
elif getattr(filter_, "environmentfilter", False) is True:
elif getattr(filter_, "environmentfilter", False):
args.insert(0, self.environment)
try:

Просмотреть файл

@ -280,11 +280,11 @@ class Context(with_metaclass(ContextMeta)):
break
if callable(__obj):
if getattr(__obj, "contextfunction", False) is True:
if getattr(__obj, "contextfunction", 0):
args = (__self,) + args
elif getattr(__obj, "evalcontextfunction", False) is True:
elif getattr(__obj, "evalcontextfunction", 0):
args = (__self.eval_ctx,) + args
elif getattr(__obj, "environmentfunction", False) is True:
elif getattr(__obj, "environmentfunction", 0):
args = (__self.environment,) + args
try:
return __obj(*args, **kwargs)

13
third_party/python/Jinja2/src/jinja2/utils.py поставляемый
Просмотреть файл

@ -165,15 +165,11 @@ def object_type_repr(obj):
return "None"
elif obj is Ellipsis:
return "Ellipsis"
cls = type(obj)
# __builtin__ in 2.x, builtins in 3.x
if cls.__module__ in ("__builtin__", "builtins"):
name = cls.__name__
if obj.__class__.__module__ in ("__builtin__", "builtins"):
name = obj.__class__.__name__
else:
name = cls.__module__ + "." + cls.__name__
name = obj.__class__.__module__ + "." + obj.__class__.__name__
return "%s object" % name
@ -697,8 +693,7 @@ class Namespace(object):
self.__attrs = dict(*args, **kwargs)
def __getattribute__(self, name):
# __class__ is needed for the awaitable check in async mode
if name in {"_Namespace__attrs", "__class__"}:
if name == "_Namespace__attrs":
return object.__getattribute__(self, name)
try:
return self.__attrs[name]

19
third_party/python/MarkupSafe/docs/Makefile поставляемый Normal file
Просмотреть файл

@ -0,0 +1,19 @@
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
SOURCEDIR = .
BUILDDIR = _build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)

4
third_party/python/MarkupSafe/docs/changes.rst поставляемый Normal file
Просмотреть файл

@ -0,0 +1,4 @@
Changes
=======
.. include:: ../CHANGES.rst

42
third_party/python/MarkupSafe/docs/conf.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,42 @@
from pallets_sphinx_themes import get_version
from pallets_sphinx_themes import ProjectLink
# Project --------------------------------------------------------------
project = "MarkupSafe"
copyright = "2010 Pallets Team"
author = "Pallets Team"
release, version = get_version("MarkupSafe")
# General --------------------------------------------------------------
master_doc = "index"
extensions = ["sphinx.ext.autodoc", "sphinx.ext.intersphinx", "pallets_sphinx_themes"]
intersphinx_mapping = {"python": ("https://docs.python.org/3/", None)}
# HTML -----------------------------------------------------------------
html_theme = "flask"
html_theme_options = {"index_sidebar_logo": False}
html_context = {
"project_links": [
ProjectLink("Donate to Pallets", "https://palletsprojects.com/donate"),
ProjectLink("Website", "https://palletsprojects.com/p/markupsafe/"),
ProjectLink("PyPI releases", "https://pypi.org/project/MarkupSafe/"),
ProjectLink("Source Code", "https://github.com/pallets/markupsafe/"),
ProjectLink("Issue Tracker", "https://github.com/pallets/markupsafe/issues/"),
]
}
html_sidebars = {
"index": ["project.html", "localtoc.html", "searchbox.html"],
"**": ["localtoc.html", "relations.html", "searchbox.html"],
}
singlehtml_sidebars = {"index": ["project.html", "localtoc.html"]}
html_title = "MarkupSafe Documentation ({})".format(version)
html_show_sourcelink = False
# LaTeX ----------------------------------------------------------------
latex_documents = [
(master_doc, "MarkupSafe-{}.tex".format(version), html_title, author, "manual")
]

21
third_party/python/MarkupSafe/docs/escaping.rst поставляемый Normal file
Просмотреть файл

@ -0,0 +1,21 @@
.. module:: markupsafe
Working With Safe Text
======================
.. autofunction:: escape
.. autoclass:: Markup
:members: escape, unescape, striptags
Optional Values
---------------
.. autofunction:: escape_silent
Convert an Object to a String
-----------------------------
.. autofunction:: soft_unicode

77
third_party/python/MarkupSafe/docs/formatting.rst поставляемый Normal file
Просмотреть файл

@ -0,0 +1,77 @@
.. currentmodule:: markupsafe
String Formatting
=================
The :class:`Markup` class can be used as a format string. Objects
formatted into a markup string will be escaped first.
Format Method
-------------
The ``format`` method extends the standard :meth:`str.format` behavior
to use an ``__html_format__`` method.
#. If an object has an ``__html_format__`` method, it is called as a
replacement for the ``__format__`` method. It is passed a format
specifier if it's given. The method must return a string or
:class:`Markup` instance.
#. If an object has an ``__html__`` method, it is called. If a format
specifier was passed and the class defined ``__html__`` but not
``__html_format__``, a ``ValueError`` is raised.
#. Otherwise Python's default format behavior is used and the result
is escaped.
For example, to implement a ``User`` that wraps its ``name`` in a
``span`` tag, and adds a link when using the ``'link'`` format
specifier:
.. code-block:: python
class User(object):
def __init__(self, id, name):
self.id = id
self.name = name
def __html_format__(self, format_spec):
if format_spec == 'link':
return Markup(
'<a href="/user/{}">{}</a>'
).format(self.id, self.__html__())
elif format_spec:
raise ValueError('Invalid format spec')
return self.__html__()
def __html__(self):
return Markup(
'<span class="user">{0}</span>'
).format(self.name)
.. code-block:: pycon
>>> user = User(3, '<script>')
>>> escape(user)
Markup('<span class="user">&lt;script&gt;</span>')
>>> Markup('<p>User: {user:link}').format(user=user)
Markup('<p>User: <a href="/user/3"><span class="user">&lt;script&gt;</span></a>
See Python's docs on :ref:`format string syntax <python:formatstrings>`.
printf-style Formatting
-----------------------
Besides escaping, there's no special behavior involved with percent
formatting.
.. code-block:: pycon
>>> user = User(3, '<script>')
>>> Markup('<a href="/user/%d">"%s</a>') % (user.id, user.name)
Markup('<a href="/user/3">&lt;script&gt;</a>')
See Python's docs on :ref:`printf-style formatting <python:old-string-formatting>`.

51
third_party/python/MarkupSafe/docs/html.rst поставляемый Normal file
Просмотреть файл

@ -0,0 +1,51 @@
.. currentmodule:: markupsafe
HTML Representations
====================
In many frameworks, if a class implements an ``__html__`` method it
will be used to get the object's representation in HTML. MarkupSafe's
:func:`escape` function and :class:`Markup` class understand and
implement this method. If an object has an ``__html__`` method it will
be called rather than converting the object to a string, and the result
will be assumed safe and not escaped.
For example, an ``Image`` class might automatically generate an
``<img>`` tag:
.. code-block:: python
class Image:
def __init__(self, url):
self.url = url
def __html__(self):
return '<img src="%s">' % self.url
.. code-block:: pycon
>>> img = Image('/static/logo.png')
>>> Markup(img)
Markup('<img src="/static/logo.png">')
Since this bypasses escaping, you need to be careful about using
user-provided data in the output. For example, a user's display name
should still be escaped:
.. code-block:: python
class User:
def __init__(self, id, name):
self.id = id
self.name = name
def __html__(self):
return '<a href="/user/{}">{}</a>'.format(
self.id, escape(self.name)
)
.. code-block:: pycon
>>> user = User(3, '<script>')
>>> escape(user)
Markup('<a href="/users/3">&lt;script&gt;</a>')

53
third_party/python/MarkupSafe/docs/index.rst поставляемый Normal file
Просмотреть файл

@ -0,0 +1,53 @@
.. currentmodule:: markupsafe
MarkupSafe
==========
MarkupSafe escapes characters so text is safe to use in HTML and XML.
Characters that have special meanings are replaced so that they display
as the actual characters. This mitigates injection attacks, meaning
untrusted user input can safely be displayed on a page.
The :func:`escape` function escapes text and returns a :class:`Markup`
object. The object won't be escaped anymore, but any text that is used
with it will be, ensuring that the result remains safe to use in HTML.
>>> from markupsafe import escape
>>> hello = escape('<em>Hello</em>')
>>> hello
Markup('&lt;em&gt;Hello&lt;/em&gt;')
>>> escape(hello)
Markup('&lt;em&gt;Hello&lt;/em&gt;')
>>> hello + ' <strong>World</strong>'
Markup('&lt;em&gt;Hello&lt;/em&gt; &lt;strong&gt;World&lt;/strong&gt;')
.. note::
The docs assume you're using Python 3. The terms "text" and "string"
refer to the :class:`str` class. In Python 2, this would be the
``unicode`` class instead.
Installing
----------
Install and update using `pip`_:
.. code-block:: text
pip install -U MarkupSafe
.. _pip: https://pip.pypa.io/en/stable/quickstart/
Table of Contents
-----------------
.. toctree::
:maxdepth: 2
escaping
html
formatting
license
changes

4
third_party/python/MarkupSafe/docs/license.rst поставляемый Normal file
Просмотреть файл

@ -0,0 +1,4 @@
License
=======
.. include:: ../LICENSE.rst

35
third_party/python/MarkupSafe/docs/make.bat поставляемый Normal file
Просмотреть файл

@ -0,0 +1,35 @@
@ECHO OFF
pushd %~dp0
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set SOURCEDIR=.
set BUILDDIR=_build
if "%1" == "" goto help
%SPHINXBUILD% >NUL 2>NUL
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.http://sphinx-doc.org/
exit /b 1
)
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
goto end
:help
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
:end
popd

2
third_party/python/MarkupSafe/docs/requirements.txt поставляемый Normal file
Просмотреть файл

@ -0,0 +1,2 @@
Sphinx~=1.8.0
Pallets-Sphinx-Themes~=1.1.0

8
third_party/python/appdirs/.gitignore поставляемый
Просмотреть файл

@ -1,8 +0,0 @@
*.pyc
*.egg-info
tmp/
build/
dist/
.tox/
MANIFEST
*.komodoproject

10
third_party/python/appdirs/.travis.yml поставляемый
Просмотреть файл

@ -1,10 +0,0 @@
language: python
python:
- "2.7"
- "pypy"
- "3.4"
- "3.5"
- "3.6"
- "3.7"
- "3.8"
script: python setup.py test

9
third_party/python/appdirs/CHANGES.rst поставляемый
Просмотреть файл

@ -1,15 +1,6 @@
appdirs Changelog
=================
appdirs 1.4.4
-------------
- [PR #92] Don't import appdirs from setup.py
Project officially classified as Stable which is important
for inclusion in other distros such as ActivePython.
First of several incremental releases to catch up on maintenance.
appdirs 1.4.3
-------------
- [PR #76] Python 3.6 invalid escape sequence deprecation fixes

13
third_party/python/appdirs/Dockerfile поставляемый
Просмотреть файл

@ -1,13 +0,0 @@
FROM activestate/activepython:2.7
# For Python3 compact
RUN apt-get -y update && apt-get -y install python3-setuptools && \
apt-get -y clean
WORKDIR /app
ADD . /app
RUN python setup.py install && python setup.py test
RUN python3 setup.py install && python3 setup.py test
RUN python -m appdirs
RUN python3 -m appdirs

16
third_party/python/appdirs/HACKING.md поставляемый
Просмотреть файл

@ -1,16 +0,0 @@
# HACKING
## release
ensure correct version in CHANGES.md and appdirs.py, and:
```
python setup.py register sdist bdist_wheel upload
```
## docker image
```
docker build -t appdirs .
```

26
third_party/python/appdirs/PKG-INFO поставляемый
Просмотреть файл

@ -1,12 +1,10 @@
Metadata-Version: 1.2
Metadata-Version: 1.1
Name: appdirs
Version: 1.4.4
Version: 1.4.3
Summary: A small Python module for determining appropriate platform-specific dirs, e.g. a "user data dir".
Home-page: http://github.com/ActiveState/appdirs
Author: Trent Mick
Author-email: trentm@gmail.com
Maintainer: Jeff Rouse
Maintainer-email: jr@its.to
Author: Trent Mick; Sridhar Ratnakumar; Jeff Rouse
Author-email: trentm@gmail.com; github@srid.name; jr@its.to
License: MIT
Description:
.. image:: https://secure.travis-ci.org/ActiveState/appdirs.png
@ -152,15 +150,6 @@ Description:
appdirs Changelog
=================
appdirs 1.4.4
-------------
- [PR #92] Don't import appdirs from setup.py
Project officially classified as Stable which is important
for inclusion in other distros such as ActivePython.
First of several incremental releases to catch up on maintenance.
appdirs 1.4.3
-------------
- [PR #76] Python 3.6 invalid escape sequence deprecation fixes
@ -245,18 +234,19 @@ Description:
Keywords: application directory log cache user
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
Classifier: Development Status :: 4 - Beta
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: MIT License
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.6
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.2
Classifier: Programming Language :: Python :: 3.3
Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: Implementation :: PyPy
Classifier: Programming Language :: Python :: Implementation :: CPython
Classifier: Topic :: Software Development :: Libraries :: Python Modules

1
third_party/python/appdirs/TODO.md поставляемый
Просмотреть файл

@ -1 +0,0 @@
- add some Windows 7 examples

4
third_party/python/appdirs/appdirs.py поставляемый
Просмотреть файл

@ -13,8 +13,8 @@ See <http://github.com/ActiveState/appdirs> for details and usage.
# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
__version__ = "1.4.4"
__version_info__ = tuple(int(segment) for segment in __version__.split("."))
__version_info__ = (1, 4, 3)
__version__ = '.'.join(map(str, __version_info__))
import sys

22
third_party/python/appdirs/setup.py поставляемый
Просмотреть файл

@ -7,7 +7,7 @@ try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import ast
import appdirs
tests_require = []
if sys.version_info < (2, 7):
@ -21,32 +21,26 @@ def read(fname):
return out
# Do not import `appdirs` yet, lest we import some random version on sys.path.
for line in read("appdirs.py").splitlines():
if line.startswith("__version__"):
version = ast.literal_eval(line.split("=", 1)[1].strip())
break
setup(
name='appdirs',
version=version,
version=appdirs.__version__,
description='A small Python module for determining appropriate ' + \
'platform-specific dirs, e.g. a "user data dir".',
long_description=read('README.rst') + '\n' + read('CHANGES.rst'),
classifiers=[c.strip() for c in """
Development Status :: 5 - Production/Stable
Development Status :: 4 - Beta
Intended Audience :: Developers
License :: OSI Approved :: MIT License
Operating System :: OS Independent
Programming Language :: Python :: 2
Programming Language :: Python :: 2.6
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.2
Programming Language :: Python :: 3.3
Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: Implementation :: PyPy
Programming Language :: Python :: Implementation :: CPython
Topic :: Software Development :: Libraries :: Python Modules
@ -56,8 +50,8 @@ setup(
keywords='application directory log cache user',
author='Trent Mick',
author_email='trentm@gmail.com',
maintainer='Jeff Rouse',
maintainer_email='jr@its.to',
maintainer='Trent Mick; Sridhar Ratnakumar; Jeff Rouse',
maintainer_email='trentm@gmail.com; github@srid.name; jr@its.to',
url='http://github.com/ActiveState/appdirs',
license='MIT',
py_modules=["appdirs"],

5
third_party/python/appdirs/tox.ini поставляемый
Просмотреть файл

@ -1,5 +0,0 @@
[tox]
envlist = py26, py27, py32, py33, py34, py35, py36
[testenv]
commands = python setup.py test

Просмотреть файл

@ -1,36 +1,17 @@
version: 2.1
commands:
test-start:
steps:
- checkout
- run:
name: environment
command: |
echo 'export PATH=.:$HOME/.local/bin:$PATH' >> $BASH_ENV
test-min-requirements:
steps:
- run:
name: install minimum requirements
command: |
# Use requirements-builder to determine the minimum versions of
# all requirements and test those
# We install requirements-builder itself into its own venv, since
# otherwise its dependencies might install newer versions of
# glean_parser's dependencies.
python3 -m venv .rb
.rb/bin/pip install requirements-builder
.rb/bin/requirements-builder --level=min setup.py > min_requirements.txt
pip install --progress-bar off --user -U -r min_requirements.txt
test-python-version:
parameters:
requirements-file:
type: string
default: "requirements_dev.txt"
steps:
- checkout
- run:
name: environment
command: |
echo 'export PATH=.:$HOME/.local/bin:$PATH' >> $BASH_ENV
- run:
name: install
command: |
@ -56,26 +37,23 @@ commands:
command: make test
jobs:
build-35:
docker:
- image: circleci/python:3.5.9
steps:
- test-python-version:
requirements-file: requirements_dev_py35.txt
build-36:
docker:
- image: circleci/python:3.6.9
steps:
- test-start
- test-python-version
build-36-min:
docker:
- image: circleci/python:3.6.9
steps:
- test-start
- test-min-requirements
- test-python-version
build-37:
docker:
- image: circleci/python:3.7.5
steps:
- test-start
- test-python-version
- run:
name: make-docs
@ -90,15 +68,6 @@ jobs:
docker:
- image: circleci/python:3.8.0
steps:
- test-start
- test-python-version
build-38-min:
docker:
- image: circleci/python:3.8.0
steps:
- test-start
- test-min-requirements
- test-python-version
docs-deploy:
@ -146,11 +115,11 @@ workflows:
version: 2
build:
jobs:
- build-36:
- build-35:
filters:
tags:
only: /.*/
- build-36-min:
- build-36:
filters:
tags:
only: /.*/
@ -162,16 +131,12 @@ workflows:
filters:
tags:
only: /.*/
- build-38-min:
filters:
tags:
only: /.*/
- docs-deploy:
requires:
- build-37
filters:
branches:
only: main
only: master
- pypi-deploy:
requires:
- build-37

Просмотреть файл

@ -87,6 +87,10 @@ Ready to contribute? Here's how to set up `glean_parser` for local development.
$ pip install -r requirements_dev.txt
If using Python 3.5:
$ pip install -r requirements_dev_35.txt
Optionally, if you want to ensure that the generated Kotlin code lints correctly, install a Java SDK, and then run::
$ make install-kotlin-linters
@ -113,7 +117,7 @@ Before you submit a pull request, check that it meets these guidelines:
2. If the pull request adds functionality, the docs should be updated. Put
your new functionality into a function with a docstring, and add the
feature to the list in README.rst.
3. The pull request should work for Python 3.6, 3.7 and 3.8 (The CI system will take care of testing all of these Python versions).
3. The pull request should work for Python 3.5, 3.6, 3.7 and 3.8 (The CI system will take care of testing all of these Python versions).
4. The pull request should update the changelog in `HISTORY.rst`.
Tips
@ -129,11 +133,11 @@ Deploying
A reminder for the maintainers on how to deploy.
Get a clean main branch with all of the changes from `upstream`::
Get a clean master branch with all of the changes from `upstream`::
$ git checkout main
$ git checkout master
$ git fetch upstream
$ git rebase upstream/main
$ git rebase upstream/master
- Update the header with the new version and date in HISTORY.rst.
@ -143,9 +147,10 @@ Get a clean main branch with all of the changes from `upstream`::
- Push the changes upstream::
$ git push upstream main
$ git push upstream master
- Wait for [continuous integration to pass](https://circleci.com/gh/mozilla/glean/tree/main) on main.
- Wait for [continuous integration to
pass](https://circleci.com/gh/mozilla/glean/tree/master) on master.
- Make the release on GitHub using [this link](https://github.com/mozilla/glean_parser/releases/new)

88
third_party/python/glean_parser/HISTORY.rst поставляемый
Просмотреть файл

@ -5,94 +5,6 @@ History
Unreleased
----------
1.28.0 (2020-07-23)
-------------------
* **Breaking change:** The internal ping `deletion-request` was misnamed in pings.py causing the linter to not allow use of the correctly named ping for adding legacy ids to. Consuming apps will need to update their metrics.yaml if they are using `deletion_request` in any `send_in_pings` to `deletion-request` after updating.
1.27.0 (2020-07-21)
-------------------
* Rename the `data_category` field to `data_sensitivity` to be clearer.
1.26.0 (2020-07-21)
-------------------
* Add support for JWE metric types.
* Add a `data_sensitivity` field to all metrics for specifying the type of data collected in the field.
1.25.0 (2020-07-17)
-------------------
* Add support for generating C# code.
* BUGFIX: The memory unit is now correctly passed to the MemoryDistribution
metric type in Swift.
1.24.0 (2020-06-30)
-------------------
* BUGFIX: look for metrics in send_if_empty pings. Metrics for these kinds of pings were being ignored.
1.23.0 (2020-06-27)
-------------------
* Support for Python 3.5 has been dropped.
* BUGFIX: The ordering of event extra keys will now match with their enum, fixing a serious bug where keys of extras may not match the correct values in the data payload. See https://bugzilla.mozilla.org/show_bug.cgi?id=1648768.
1.22.0 (2020-05-28)
-------------------
* **Breaking change:** (Swift only) Combine all metrics and pings into a single generated file `Metrics.swift`.
1.21.0 (2020-05-25)
-------------------
* `glinter` messages have been improved with more details and to be more
actionable.
* A maximum of 10 `extra_keys` is now enforced for `event` metric types.
* BUGFIX: the `Lifetime` enum values now match the values of the implementation in mozilla/glean.
1.20.4 (2020-05-07)
-------------------
* BUGFIX: yamllint errors are now reported using the correct file name.
1.20.3 (2020-05-06)
-------------------
* Support for using `timing_distribution`'s `time_unit` parameter to control the range of acceptable values is documented. The default unit for this use case is `nanosecond` to avoid creating a breaking change. See [bug 1630997](https://bugzilla.mozilla.org/show_bug.cgi?id=1630997) for more information.
1.20.2 (2020-04-24)
-------------------
* Dependencies that depend on the version of Python being used are now specified using the `Declaring platform specific dependencies syntax in setuptools <https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies>`__. This means that more recent versions of dependencies are likely to be installed on Python 3.6 and later, and unnecessary backport libraries won't be installed on more recent Python versions.
1.20.1 (2020-04-21)
-------------------
* The minimum version of the runtime dependencies has been lowered to increase compatibility with other tools. These minimum versions are now tested in CI, in addition to testing the latest versions of the dependencies that was already happening in CI.
1.20.0 (2020-04-15)
-------------------
* **Breaking change:** glinter errors found during the `translate` command will now return an error code. glinter warnings will be displayed, but not return an error code.
* `glean_parser` now produces a linter warning when `user` lifetime metrics are
set to expire. See [bug 1604854](https://bugzilla.mozilla.org/show_bug.cgi?id=1604854)
for additional context.
1.19.0 (2020-03-18)
-------------------
* **Breaking change:** The regular expression used to validate labels is
stricter and more correct.
* Add more information about pings to markdown documentation:
* State whether the ping includes client id;
* Add list of data review links;
* Add list of related bugs links.
* `glean_parser` now makes it easier to write external translation functions for
different language targets.
* BUGFIX: glean_parser now works on 32-bit Windows.
1.18.3 (2020-02-24)
-------------------

7
third_party/python/glean_parser/Makefile поставляемый
Просмотреть файл

@ -21,7 +21,7 @@ clean-build: ## remove build artifacts
rm -fr dist/
rm -fr .eggs/
find . -name '*.egg-info' -exec rm -fr {} +
find . -name '*.egg' -exec rm -fr {} +
find . -name '*.egg' -exec rm -f {} +
clean-pyc: ## remove Python file artifacts
find . -name '*.pyc' -exec rm -f {} +
@ -36,11 +36,10 @@ clean-test: ## remove test and coverage artifacts
lint: ## check style with flake8
python3 -m flake8 glean_parser tests
if python3 --version | grep 'Python 3\.[678]\..*'; then \
bash -c 'if [[ `python3 --version` =~ "Python 3\.[678]\..*" ]]; then \
python3 -m black --check glean_parser tests setup.py; \
fi
fi'
python3 -m yamllint glean_parser tests
python3 -m mypy glean_parser
test: ## run tests quickly with the default Python
py.test

99
third_party/python/glean_parser/PKG-INFO поставляемый
Просмотреть файл

@ -1,6 +1,6 @@
Metadata-Version: 1.1
Name: glean_parser
Version: 1.28.0
Version: 1.18.3
Summary: Parser tools for Mozilla's Glean telemetry
Home-page: https://github.com/mozilla/glean_parser
Author: Michael Droettboom
@ -26,7 +26,7 @@ Description: ============
Requirements
------------
- Python 3.6 (or later)
- Python 3.5 (or later)
The following library requirements are installed automatically when glean_parser
is installed by `pip`.
@ -38,10 +38,14 @@ Description: ============
- jsonschema
- PyYAML
Additionally on Python 3.6:
Additionally on Python 3.6 and 3.5:
- iso8601
And on Python 3.5:
- pep487
Usage
-----
@ -69,94 +73,6 @@ Description: ============
Unreleased
----------
1.28.0 (2020-07-23)
-------------------
* **Breaking change:** The internal ping `deletion-request` was misnamed in pings.py causing the linter to not allow use of the correctly named ping for adding legacy ids to. Consuming apps will need to update their metrics.yaml if they are using `deletion_request` in any `send_in_pings` to `deletion-request` after updating.
1.27.0 (2020-07-21)
-------------------
* Rename the `data_category` field to `data_sensitivity` to be clearer.
1.26.0 (2020-07-21)
-------------------
* Add support for JWE metric types.
* Add a `data_sensitivity` field to all metrics for specifying the type of data collected in the field.
1.25.0 (2020-07-17)
-------------------
* Add support for generating C# code.
* BUGFIX: The memory unit is now correctly passed to the MemoryDistribution
metric type in Swift.
1.24.0 (2020-06-30)
-------------------
* BUGFIX: look for metrics in send_if_empty pings. Metrics for these kinds of pings were being ignored.
1.23.0 (2020-06-27)
-------------------
* Support for Python 3.5 has been dropped.
* BUGFIX: The ordering of event extra keys will now match with their enum, fixing a serious bug where keys of extras may not match the correct values in the data payload. See https://bugzilla.mozilla.org/show_bug.cgi?id=1648768.
1.22.0 (2020-05-28)
-------------------
* **Breaking change:** (Swift only) Combine all metrics and pings into a single generated file `Metrics.swift`.
1.21.0 (2020-05-25)
-------------------
* `glinter` messages have been improved with more details and to be more
actionable.
* A maximum of 10 `extra_keys` is now enforced for `event` metric types.
* BUGFIX: the `Lifetime` enum values now match the values of the implementation in mozilla/glean.
1.20.4 (2020-05-07)
-------------------
* BUGFIX: yamllint errors are now reported using the correct file name.
1.20.3 (2020-05-06)
-------------------
* Support for using `timing_distribution`'s `time_unit` parameter to control the range of acceptable values is documented. The default unit for this use case is `nanosecond` to avoid creating a breaking change. See [bug 1630997](https://bugzilla.mozilla.org/show_bug.cgi?id=1630997) for more information.
1.20.2 (2020-04-24)
-------------------
* Dependencies that depend on the version of Python being used are now specified using the `Declaring platform specific dependencies syntax in setuptools <https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies>`__. This means that more recent versions of dependencies are likely to be installed on Python 3.6 and later, and unnecessary backport libraries won't be installed on more recent Python versions.
1.20.1 (2020-04-21)
-------------------
* The minimum version of the runtime dependencies has been lowered to increase compatibility with other tools. These minimum versions are now tested in CI, in addition to testing the latest versions of the dependencies that was already happening in CI.
1.20.0 (2020-04-15)
-------------------
* **Breaking change:** glinter errors found during the `translate` command will now return an error code. glinter warnings will be displayed, but not return an error code.
* `glean_parser` now produces a linter warning when `user` lifetime metrics are
set to expire. See [bug 1604854](https://bugzilla.mozilla.org/show_bug.cgi?id=1604854)
for additional context.
1.19.0 (2020-03-18)
-------------------
* **Breaking change:** The regular expression used to validate labels is
stricter and more correct.
* Add more information about pings to markdown documentation:
* State whether the ping includes client id;
* Add list of data review links;
* Add list of related bugs links.
* `glean_parser` now makes it easier to write external translation functions for
different language targets.
* BUGFIX: glean_parser now works on 32-bit Windows.
1.18.3 (2020-02-24)
-------------------
@ -437,6 +353,7 @@ Classifier: Development Status :: 2 - Pre-Alpha
Classifier: Intended Audience :: Developers
Classifier: Natural Language :: English
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8

8
third_party/python/glean_parser/README.rst поставляемый
Просмотреть файл

@ -18,7 +18,7 @@ The full documentation is available `here <https://mozilla.github.io/glean_parse
Requirements
------------
- Python 3.6 (or later)
- Python 3.5 (or later)
The following library requirements are installed automatically when glean_parser
is installed by `pip`.
@ -30,10 +30,14 @@ is installed by `pip`.
- jsonschema
- PyYAML
Additionally on Python 3.6:
Additionally on Python 3.6 and 3.5:
- iso8601
And on Python 3.5:
- pep487
Usage
-----

Просмотреть файл

@ -1,148 +0,0 @@
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Outputter to generate C# code for metrics.
"""
import enum
import json
from pathlib import Path
from typing import Any, Dict, List, Union # noqa
from . import metrics
from . import pings
from . import util
def csharp_datatypes_filter(value: util.JSONType) -> str:
"""
A Jinja2 filter that renders C# literals.
Based on Python's JSONEncoder, but overrides:
- lists to use `new string[] {}` (only strings)
- dicts to use mapOf (not currently supported)
- sets to use setOf (not currently supported)
- enums to use the like-named C# enum
"""
class CSharpEncoder(json.JSONEncoder):
def iterencode(self, value):
if isinstance(value, list):
assert all(isinstance(x, str) for x in value)
yield "new string[] {"
first = True
for subvalue in value:
if not first:
yield ", "
yield from self.iterencode(subvalue)
first = False
yield "}"
elif isinstance(value, dict):
yield "mapOf("
first = True
for key, subvalue in value.items():
if not first:
yield ", "
yield from self.iterencode(key)
yield " to "
yield from self.iterencode(subvalue)
first = False
yield ")"
elif isinstance(value, enum.Enum):
yield (value.__class__.__name__ + "." + util.Camelize(value.name))
elif isinstance(value, set):
yield "setOf("
first = True
for subvalue in sorted(list(value)):
if not first:
yield ", "
yield from self.iterencode(subvalue)
first = False
yield ")"
else:
yield from super().iterencode(value)
return "".join(CSharpEncoder().iterencode(value))
def type_name(obj: Union[metrics.Metric, pings.Ping]) -> str:
"""
Returns the C# type to use for a given metric or ping object.
"""
generate_enums = getattr(obj, "_generate_enums", [])
if len(generate_enums):
template_args = []
for member, suffix in generate_enums:
if len(getattr(obj, member)):
template_args.append(util.camelize(obj.name) + suffix)
else:
if suffix == "Keys":
template_args.append("NoExtraKeys")
else:
template_args.append("No" + suffix)
return "{}<{}>".format(class_name(obj.type), ", ".join(template_args))
return class_name(obj.type)
def class_name(obj_type: str) -> str:
"""
Returns the C# class name for a given metric or ping type.
"""
if obj_type == "ping":
return "PingType"
if obj_type.startswith("labeled_"):
obj_type = obj_type[8:]
return util.Camelize(obj_type) + "MetricType"
def output_csharp(
objs: metrics.ObjectTree, output_dir: Path, options: Dict[str, Any] = {}
) -> None:
"""
Given a tree of objects, output C# code to `output_dir`.
:param objects: A tree of objects (metrics and pings) as returned from
`parser.parse_objects`.
:param output_dir: Path to an output directory to write to.
:param options: options dictionary, with the following optional keys:
- `namespace`: The package namespace to declare at the top of the
generated files. Defaults to `GleanMetrics`.
- `glean_namespace`: The package namespace of the glean library itself.
This is where glean objects will be imported from in the generated
code.
"""
template = util.get_jinja2_template(
"csharp.jinja2",
filters=(
("csharp", csharp_datatypes_filter),
("type_name", type_name),
("class_name", class_name),
),
)
namespace = options.get("namespace", "GleanMetrics")
glean_namespace = options.get("glean_namespace", "Mozilla.Glean")
for category_key, category_val in objs.items():
filename = util.Camelize(category_key) + ".cs"
filepath = output_dir / filename
with filepath.open("w", encoding="utf-8") as fd:
fd.write(
template.render(
category_name=category_key,
objs=category_val,
extra_args=util.extra_args,
namespace=namespace,
glean_namespace=glean_namespace,
)
)
# Jinja2 squashes the final newline, so we explicitly add it
fd.write("\n")

Просмотреть файл

@ -11,15 +11,11 @@ Outputter to generate Kotlin code for metrics.
from collections import OrderedDict
import enum
import json
from pathlib import Path
from typing import Any, Dict, List, Union # noqa
from . import metrics
from . import pings
from . import util
def kotlin_datatypes_filter(value: util.JSONType) -> str:
def kotlin_datatypes_filter(value):
"""
A Jinja2 filter that renders Kotlin literals.
@ -69,7 +65,7 @@ def kotlin_datatypes_filter(value: util.JSONType) -> str:
return "".join(KotlinEncoder().iterencode(value))
def type_name(obj: Union[metrics.Metric, pings.Ping]) -> str:
def type_name(obj):
"""
Returns the Kotlin type to use for a given metric or ping object.
"""
@ -90,7 +86,7 @@ def type_name(obj: Union[metrics.Metric, pings.Ping]) -> str:
return class_name(obj.type)
def class_name(obj_type: str) -> str:
def class_name(obj_type):
"""
Returns the Kotlin class name for a given metric or ping type.
"""
@ -101,15 +97,13 @@ def class_name(obj_type: str) -> str:
return util.Camelize(obj_type) + "MetricType"
def output_gecko_lookup(
objs: metrics.ObjectTree, output_dir: Path, options: Dict[str, Any] = {}
) -> None:
def output_gecko_lookup(objs, output_dir, options={}):
"""
Given a tree of objects, generate a Kotlin map between Gecko histograms and
Glean SDK metric types.
:param objects: A tree of objects (metrics and pings) as returned from
`parser.parse_objects`.
`parser.parse_objects`.
:param output_dir: Path to an output directory to write to.
:param options: options dictionary, with the following optional keys:
@ -144,9 +138,7 @@ def output_gecko_lookup(
# },
# "other-type": {}
# }
gecko_metrics: OrderedDict[
str, OrderedDict[str, List[Dict[str, str]]]
] = OrderedDict()
gecko_metrics = OrderedDict()
# Define scalar-like types.
SCALAR_LIKE_TYPES = ["boolean", "string", "quantity"]
@ -156,9 +148,7 @@ def output_gecko_lookup(
# Glean SDK and GeckoView. See bug 1566356 for more context.
for metric in category_val.values():
# This is not a Gecko metric, skip it.
if isinstance(metric, pings.Ping) or not getattr(
metric, "gecko_datapoint", False
):
if not getattr(metric, "gecko_datapoint", False):
continue
# Put scalars in their own categories, histogram-like in "histograms" and
@ -196,14 +186,12 @@ def output_gecko_lookup(
fd.write("\n")
def output_kotlin(
objs: metrics.ObjectTree, output_dir: Path, options: Dict[str, Any] = {}
) -> None:
def output_kotlin(objs, output_dir, options={}):
"""
Given a tree of objects, output Kotlin code to `output_dir`.
:param objects: A tree of objects (metrics and pings) as returned from
`parser.parse_objects`.
`parser.parse_objects`.
:param output_dir: Path to an output directory to write to.
:param options: options dictionary, with the following optional keys:
@ -222,6 +210,25 @@ def output_kotlin(
),
)
# The object parameters to pass to constructors
extra_args = [
"allowed_extra_keys",
"bucket_count",
"category",
"disabled",
"histogram_type",
"include_client_id",
"send_if_empty",
"lifetime",
"memory_unit",
"name",
"range_max",
"range_min",
"reason_codes",
"send_in_pings",
"time_unit",
]
namespace = options.get("namespace", "GleanMetrics")
glean_namespace = options.get("glean_namespace", "mozilla.components.service.glean")
@ -242,7 +249,7 @@ def output_kotlin(
category_name=category_key,
objs=category_val,
obj_types=obj_types,
extra_args=util.extra_args,
extra_args=extra_args,
namespace=namespace,
has_labeled_metrics=has_labeled_metrics,
glean_namespace=glean_namespace,

Просмотреть файл

@ -3,53 +3,25 @@
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import enum
from pathlib import Path
import re
import sys
from typing import Any, Callable, Dict, Generator, List, Iterable, Tuple, Union # noqa
from . import metrics
from . import parser
from . import pings
from . import util
from yamllint.config import YamlLintConfig # type: ignore
from yamllint import linter # type: ignore
from yamllint.config import YamlLintConfig
from yamllint import linter
LintGenerator = Generator[str, None, None]
class CheckType(enum.Enum):
warning = 0
error = 1
def _split_words(name: str) -> List[str]:
def _split_words(name):
"""
Helper function to split words on either `.` or `_`.
"""
return re.split("[._]", name)
def _english_list(items: List[str]) -> str:
"""
Helper function to format a list [A, B, C] as "'A', 'B', or 'C'".
"""
if len(items) == 0:
return ""
elif len(items) == 1:
return f"'{items[0]}'"
else:
return "{}, or '{}'".format(
", ".join([f"'{x}'" for x in items[:-1]]), items[-1]
)
def _hamming_distance(str1: str, str2: str) -> int:
def _hamming_distance(str1, str2):
"""
Count the # of differences between strings str1 and str2,
padding the shorter one with whitespace
@ -67,9 +39,7 @@ def _hamming_distance(str1: str, str2: str) -> int:
return diffs
def check_common_prefix(
category_name: str, metrics: Iterable[metrics.Metric]
) -> LintGenerator:
def check_common_prefix(category_name, metrics):
"""
Check if all metrics begin with a common prefix.
"""
@ -88,16 +58,12 @@ def check_common_prefix(
if i > 0:
common_prefix = "_".join(first[:i])
yield (
f"Within category '{category_name}', all metrics begin with "
f"prefix '{common_prefix}'."
"Remove the prefixes on the metric names and (possibly) "
"rename the category."
)
"Within category '{}', all metrics begin with prefix "
"'{}'. Remove prefixes and (possibly) rename category."
).format(category_name, common_prefix)
def check_unit_in_name(
metric: metrics.Metric, parser_config: Dict[str, Any] = {}
) -> LintGenerator:
def check_unit_in_name(metric, parser_config={}):
"""
The metric name ends in a unit.
"""
@ -121,160 +87,105 @@ def check_unit_in_name(
name_words = _split_words(metric.name)
unit_in_name = name_words[-1]
time_unit = getattr(metric, "time_unit", None)
memory_unit = getattr(metric, "memory_unit", None)
unit = getattr(metric, "unit", None)
if time_unit is not None:
if hasattr(metric, "time_unit"):
if (
unit_in_name == TIME_UNIT_ABBREV.get(time_unit.name)
or unit_in_name == time_unit.name
unit_in_name == TIME_UNIT_ABBREV.get(metric.time_unit.name)
or unit_in_name == metric.time_unit.name
):
yield (
f"Suffix '{unit_in_name}' is redundant with time_unit "
f"'{time_unit.name}'. Only include time_unit."
)
"Suffix '{}' is redundant with time_unit. " "Only include time_unit."
).format(unit_in_name)
elif (
unit_in_name in TIME_UNIT_ABBREV.keys()
or unit_in_name in TIME_UNIT_ABBREV.values()
):
yield (
f"Suffix '{unit_in_name}' doesn't match time_unit "
f"'{time_unit.name}'. "
"Suffix '{}' doesn't match time_unit. "
"Confirm the unit is correct and only include time_unit."
)
).format(unit_in_name)
elif memory_unit is not None:
elif hasattr(metric, "memory_unit"):
if (
unit_in_name == MEMORY_UNIT_ABBREV.get(memory_unit.name)
or unit_in_name == memory_unit.name
unit_in_name == MEMORY_UNIT_ABBREV.get(metric.memory_unit.name)
or unit_in_name == metric.memory_unit.name
):
yield (
f"Suffix '{unit_in_name}' is redundant with memory_unit "
f"'{memory_unit.name}'. "
"Suffix '{}' is redundant with memory_unit. "
"Only include memory_unit."
)
).format(unit_in_name)
elif (
unit_in_name in MEMORY_UNIT_ABBREV.keys()
or unit_in_name in MEMORY_UNIT_ABBREV.values()
):
yield (
f"Suffix '{unit_in_name}' doesn't match memory_unit "
f"{memory_unit.name}'. "
"Suffix '{}' doesn't match memory_unit. "
"Confirm the unit is correct and only include memory_unit."
)
).format(unit_in_name)
elif unit is not None:
if unit_in_name == unit:
elif hasattr(metric, "unit"):
if unit_in_name == metric.unit:
yield (
f"Suffix '{unit_in_name}' is redundant with unit param "
f"'{unit}'. "
"Only include unit."
)
"Suffix '{}' is redundant with unit param. " "Only include unit."
).format(unit_in_name)
def check_category_generic(
category_name: str, metrics: Iterable[metrics.Metric]
) -> LintGenerator:
def check_category_generic(category_name, metrics):
"""
The category name is too generic.
"""
GENERIC_CATEGORIES = ["metrics", "events"]
if category_name in GENERIC_CATEGORIES:
yield (
f"Category '{category_name}' is too generic. "
f"Don't use {_english_list(GENERIC_CATEGORIES)} for category names"
)
yield "Category '{}' is too generic.".format(category_name)
def check_bug_number(
metric: metrics.Metric, parser_config: Dict[str, Any] = {}
) -> LintGenerator:
def check_bug_number(metric, parser_config={}):
number_bugs = [str(bug) for bug in metric.bugs if isinstance(bug, int)]
if len(number_bugs):
yield (
f"For bugs {', '.join(number_bugs)}: "
"Bug numbers are deprecated and should be changed to full URLs. "
"For example, use 'http://bugzilla.mozilla.org/12345' instead of '12345'."
)
"For bugs {}: "
"Bug numbers are deprecated and should be changed to full URLs."
).format(", ".join(number_bugs))
def check_valid_in_baseline(
metric: metrics.Metric, parser_config: Dict[str, Any] = {}
) -> LintGenerator:
def check_valid_in_baseline(metric, parser_config={}):
allow_reserved = parser_config.get("allow_reserved", False)
if not allow_reserved and "baseline" in metric.send_in_pings:
yield (
"The baseline ping is Glean-internal. "
"Remove 'baseline' from the send_in_pings array."
"User metrics should go into the 'metrics' ping or custom pings."
)
def check_misspelled_pings(
metric: metrics.Metric, parser_config: Dict[str, Any] = {}
) -> LintGenerator:
def check_misspelled_pings(metric, parser_config={}):
builtin_pings = ["metrics", "events"]
for ping in metric.send_in_pings:
for builtin in pings.RESERVED_PING_NAMES:
for builtin in builtin_pings:
distance = _hamming_distance(ping, builtin)
if distance == 1:
yield f"Ping '{ping}' seems misspelled. Did you mean '{builtin}'?"
yield ("Ping '{}' seems misspelled. Did you mean '{}'?").format(
ping, builtin
)
def check_user_lifetime_expiration(
metric: metrics.Metric, parser_config: Dict[str, Any] = {}
) -> LintGenerator:
if metric.lifetime == metrics.Lifetime.user and metric.expires != "never":
yield (
"Metrics with 'user' lifetime cannot have an expiration date. "
"They live as long as the user profile does. "
"Set expires to 'never'."
)
# The checks that operate on an entire category of metrics:
# {NAME: (function, is_error)}
CATEGORY_CHECKS: Dict[
str, Tuple[Callable[[str, Iterable[metrics.Metric]], LintGenerator], CheckType]
] = {
"COMMON_PREFIX": (check_common_prefix, CheckType.error),
"CATEGORY_GENERIC": (check_category_generic, CheckType.error),
CATEGORY_CHECKS = {
"COMMON_PREFIX": check_common_prefix,
"CATEGORY_GENERIC": check_category_generic,
}
# The checks that operate on individual metrics:
# {NAME: (function, is_error)}
INDIVIDUAL_CHECKS: Dict[
str, Tuple[Callable[[metrics.Metric, dict], LintGenerator], CheckType]
] = {
"UNIT_IN_NAME": (check_unit_in_name, CheckType.error),
"BUG_NUMBER": (check_bug_number, CheckType.error),
"BASELINE_PING": (check_valid_in_baseline, CheckType.error),
"MISSPELLED_PING": (check_misspelled_pings, CheckType.error),
"USER_LIFETIME_EXPIRATION": (check_user_lifetime_expiration, CheckType.warning),
INDIVIDUAL_CHECKS = {
"UNIT_IN_NAME": check_unit_in_name,
"BUG_NUMBER": check_bug_number,
"BASELINE_PING": check_valid_in_baseline,
"MISSPELLED_PING": check_misspelled_pings,
}
class GlinterNit:
def __init__(self, check_name: str, name: str, msg: str, check_type: CheckType):
self.check_name = check_name
self.name = name
self.msg = msg
self.check_type = check_type
def format(self):
return (
f"{self.check_type.name.upper()}: {self.check_name}: "
f"{self.name}: {self.msg}"
)
def lint_metrics(
objs: metrics.ObjectTree, parser_config: Dict[str, Any] = {}, file=sys.stderr
) -> List[GlinterNit]:
def lint_metrics(objs, parser_config={}, file=sys.stderr):
"""
Performs glinter checks on a set of metrics objects.
@ -282,40 +193,26 @@ def lint_metrics(
:param file: The stream to write errors to.
:returns: List of nits.
"""
nits: List[GlinterNit] = []
for (category_name, category) in sorted(list(objs.items())):
nits = []
for (category_name, metrics) in sorted(list(objs.items())):
if category_name == "pings":
continue
# Make sure the category has only Metrics, not Pings
category_metrics = dict(
(name, metric)
for (name, metric) in category.items()
if isinstance(metric, metrics.Metric)
)
for (cat_check_name, (cat_check_func, check_type)) in CATEGORY_CHECKS.items():
if any(
cat_check_name in metric.no_lint for metric in category_metrics.values()
):
for (check_name, check_func) in CATEGORY_CHECKS.items():
if any(check_name in metric.no_lint for metric in metrics.values()):
continue
nits.extend(
GlinterNit(cat_check_name, category_name, msg, check_type)
for msg in cat_check_func(category_name, category_metrics.values())
(check_name, category_name, msg)
for msg in check_func(category_name, metrics.values())
)
for (metric_name, metric) in sorted(list(category_metrics.items())):
for (check_name, (check_func, check_type)) in INDIVIDUAL_CHECKS.items():
for (metric_name, metric) in sorted(list(metrics.items())):
for (check_name, check_func) in INDIVIDUAL_CHECKS.items():
new_nits = list(check_func(metric, parser_config))
if len(new_nits):
if check_name not in metric.no_lint:
nits.extend(
GlinterNit(
check_name,
".".join([metric.category, metric.name]),
msg,
check_type,
)
(check_name, ".".join([metric.category, metric.name]), msg)
for msg in new_nits
)
else:
@ -324,21 +221,20 @@ def lint_metrics(
and check_name in metric.no_lint
):
nits.append(
GlinterNit(
(
"SUPERFLUOUS_NO_LINT",
".".join([metric.category, metric.name]),
(
f"Superfluous no_lint entry '{check_name}'. "
"Superfluous no_lint entry '{}'. "
"Please remove it."
),
CheckType.warning,
).format(check_name),
)
)
if len(nits):
print("Sorry, Glean found some glinter nits:", file=file)
for nit in nits:
print(nit.format(), file=file)
for check_name, name, msg in nits:
print("{}: {}: {}".format(check_name, name, msg), file=file)
print("", file=file)
print("Please fix the above nits to continue.", file=file)
print(
@ -352,7 +248,7 @@ def lint_metrics(
return nits
def lint_yaml_files(input_filepaths: Iterable[Path], file=sys.stderr) -> List:
def lint_yaml_files(input_filepaths, file=sys.stderr):
"""
Performs glinter YAML lint on a set of files.
@ -361,36 +257,32 @@ def lint_yaml_files(input_filepaths: Iterable[Path], file=sys.stderr) -> List:
:returns: List of nits.
"""
# Generic type since the actual type comes from yamllint, which we don't
# control.
nits: List = []
nits = []
for path in input_filepaths:
# yamllint needs both the file content and the path.
file_content = None
with path.open("r", encoding="utf-8") as fd:
with path.open("r") as fd:
file_content = fd.read()
problems = linter.run(file_content, YamlLintConfig("extends: default"), path)
nits.extend((path, p) for p in problems)
nits.extend(p for p in problems)
if len(nits):
print("Sorry, Glean found some glinter nits:", file=file)
for (path, p) in nits:
print(f"{path} ({p.line}:{p.column}) - {p.message}")
for p in nits:
print("{} ({}:{}) - {}".format(path, p.line, p.column, p.message))
print("", file=file)
print("Please fix the above nits to continue.", file=file)
return [x[1] for x in nits]
return nits
def glinter(
input_filepaths: Iterable[Path], parser_config: Dict[str, Any] = {}, file=sys.stderr
) -> int:
def glinter(input_filepaths, parser_config={}, file=sys.stderr):
"""
Commandline helper for glinter.
:param input_filepaths: List of Path objects to load metrics from.
:param parser_config: Parser configuration object, passed to
:param parser_config: Parser configuration objects, passed to
`parser.parse_objects`.
:param file: The stream to write the errors to.
:return: Non-zero if there were any glinter errors.
@ -403,9 +295,8 @@ def glinter(
if util.report_validation_errors(objs):
return 1
nits = lint_metrics(objs.value, parser_config=parser_config, file=file)
if any(nit.check_type == CheckType.error for nit in nits):
if lint_metrics(objs.value, parser_config=parser_config, file=file):
return 1
if len(nits) == 0:
print("✨ Your metrics are Glean! ✨", file=file)
print("✨ Your metrics are Glean! ✨", file=file)
return 0

Просмотреть файл

@ -8,17 +8,13 @@
Outputter to generate Markdown documentation for metrics.
"""
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
from . import metrics
from . import pings
from . import util
from collections import defaultdict
def extra_info(obj: Union[metrics.Metric, pings.Ping]) -> List[Tuple[str, str]]:
def extra_info(obj):
"""
Returns a list of string to string tuples with extra information for the type
(e.g. extra keys for events) or an empty list if nothing is available.
@ -33,13 +29,10 @@ def extra_info(obj: Union[metrics.Metric, pings.Ping]) -> List[Tuple[str, str]]:
for label in obj.ordered_labels:
extra_info.append((label, None))
if isinstance(obj, metrics.Jwe):
extra_info.append(("decrypted_name", obj.decrypted_name))
return extra_info
def ping_desc(ping_name: str, custom_pings_cache: Dict[str, pings.Ping] = {}) -> str:
def ping_desc(ping_name, custom_pings_cache={}):
"""
Return a text description of the ping. If a custom_pings_cache
is available, look in there for non-reserved ping names description.
@ -59,21 +52,23 @@ def ping_desc(ping_name: str, custom_pings_cache: Dict[str, pings.Ping] = {}) ->
return desc
def metrics_docs(obj_name: str) -> str:
def metrics_docs(obj_name):
"""
Return a link to the documentation entry for the Glean SDK metric of the
requested type.
"""
base_url = "https://mozilla.github.io/glean/book/user/metrics/{}.html"
# We need to fixup labeled stuff, as types are singular and docs refer
# to them as plural.
fixedup_name = obj_name
if obj_name.startswith("labeled_"):
fixedup_name += "s"
return f"https://mozilla.github.io/glean/book/user/metrics/{fixedup_name}.html"
return base_url.format(fixedup_name)
def ping_docs(ping_name: str) -> str:
def ping_docs(ping_name):
"""
Return a link to the documentation entry for the requested Glean SDK
built-in ping.
@ -81,19 +76,17 @@ def ping_docs(ping_name: str) -> str:
if ping_name not in pings.RESERVED_PING_NAMES:
return ""
return f"https://mozilla.github.io/glean/book/user/pings/{ping_name}.html"
return "https://mozilla.github.io/glean/book/user/pings/{}.html".format(ping_name)
def if_empty(ping_name: str, custom_pings_cache: Dict[str, pings.Ping] = {}) -> bool:
if ping_name in custom_pings_cache:
return custom_pings_cache[ping_name].send_if_empty
else:
return False
def if_empty(ping_name, custom_pings_cache={}):
return (
custom_pings_cache.get(ping_name)
and custom_pings_cache[ping_name].send_if_empty
)
def ping_reasons(
ping_name: str, custom_pings_cache: Dict[str, pings.Ping]
) -> Dict[str, str]:
def ping_reasons(ping_name, custom_pings_cache):
"""
Returns the reasons dictionary for the ping.
"""
@ -105,45 +98,7 @@ def ping_reasons(
return {}
def ping_data_reviews(
ping_name: str, custom_pings_cache: Dict[str, pings.Ping] = {}
) -> Optional[List[str]]:
if ping_name in custom_pings_cache:
return custom_pings_cache[ping_name].data_reviews
else:
return None
def ping_bugs(
ping_name: str, custom_pings_cache: Dict[str, pings.Ping] = {}
) -> Optional[List[str]]:
if ping_name in custom_pings_cache:
return custom_pings_cache[ping_name].bugs
else:
return None
def ping_include_client_id(
ping_name: str, custom_pings_cache: Dict[str, pings.Ping] = {}
) -> bool:
if ping_name in custom_pings_cache:
return custom_pings_cache[ping_name].include_client_id
else:
return False
def data_sensitivity_numbers(
data_sensitivity: Optional[List[metrics.DataSensitivity]],
) -> str:
if data_sensitivity is None:
return "unknown"
else:
return ", ".join(str(x.value) for x in data_sensitivity)
def output_markdown(
objs: metrics.ObjectTree, output_dir: Path, options: Dict[str, Any] = {}
) -> None:
def output_markdown(objs, output_dir, options={}):
"""
Given a tree of objects, output Markdown docs to `output_dir`.
@ -151,7 +106,7 @@ def output_markdown(
contents and a section for each ping metrics are collected for.
:param objects: A tree of objects (metrics and pings) as returned from
`parser.parse_objects`.
`parser.parse_objects`.
:param output_dir: Path to an output directory to write to.
:param options: options dictionary, with the following optional key:
- `project_title`: The projects title.
@ -172,23 +127,21 @@ def output_markdown(
# }
#
# This also builds a dictionary of custom pings, if available.
custom_pings_cache: Dict[str, pings.Ping] = defaultdict()
metrics_by_pings: Dict[str, List[metrics.Metric]] = defaultdict(list)
custom_pings_cache = defaultdict()
metrics_by_pings = defaultdict(list)
for category_key, category_val in objs.items():
for obj in category_val.values():
# Filter out custom pings. We will need them for extracting
# the description
if isinstance(obj, pings.Ping):
custom_pings_cache[obj.name] = obj
# Pings that have `send_if_empty` set to true,
# might not have any metrics. They need to at least have an
# empty array of metrics to show up on the template.
if obj.send_if_empty and not metrics_by_pings[obj.name]:
if obj.send_if_empty:
metrics_by_pings[obj.name] = []
# If this is an internal Glean metric, and we don't
# want docs for it.
if isinstance(obj, metrics.Metric) and not obj.is_internal_metric():
elif obj.is_internal_metric():
# This is an internal Glean metric, and we don't
# want docs for it.
continue
else:
# If we get here, obj is definitely a metric we want
# docs for.
for ping_name in obj.send_in_pings:
@ -212,13 +165,6 @@ def output_markdown(
("ping_send_if_empty", lambda x: if_empty(x, custom_pings_cache)),
("ping_docs", ping_docs),
("ping_reasons", lambda x: ping_reasons(x, custom_pings_cache)),
("ping_data_reviews", lambda x: ping_data_reviews(x, custom_pings_cache)),
("ping_bugs", lambda x: ping_bugs(x, custom_pings_cache)),
(
"ping_include_client_id",
lambda x: ping_include_client_id(x, custom_pings_cache),
),
("data_sensitivity_numbers", data_sensitivity_numbers),
),
)

Просмотреть файл

@ -9,55 +9,50 @@ Classes for each of the high-level metric types.
"""
import enum
from typing import Any, Dict, List, Optional, Type, Union # noqa
import sys
from . import pings
from . import util
# Important: if the values are ever changing here, make sure
# to also fix mozilla/glean. Otherwise language bindings may
# break there.
# Import a backport of PEP487 to support __init_subclass__
if sys.version_info < (3, 6):
import pep487
base_object = pep487.PEP487Object
else:
base_object = object
class Lifetime(enum.Enum):
ping = 0
application = 1
user = 2
user = 1
application = 2
class DataSensitivity(enum.Enum):
technical = 1
interaction = 2
web_activity = 3
highly_sensitive = 4
class Metric:
typename: str = "ERROR"
glean_internal_metric_cat: str = "glean.internal.metrics"
metric_types: Dict[str, Any] = {}
default_store_names: List[str] = ["metrics"]
class Metric(base_object):
glean_internal_metric_cat = "glean.internal.metrics"
metric_types = {}
default_store_names = ["metrics"]
def __init__(
self,
type: str,
category: str,
name: str,
bugs: List[str],
description: str,
notification_emails: List[str],
expires: str,
data_reviews: Optional[List[str]] = None,
version: int = 0,
disabled: bool = False,
lifetime: str = "ping",
send_in_pings: Optional[List[str]] = None,
unit: str = "",
gecko_datapoint: str = "",
no_lint: Optional[List[str]] = None,
data_sensitivity: Optional[List[str]] = None,
_config: Optional[Dict[str, Any]] = None,
_validated: bool = False,
type,
category,
name,
bugs,
description,
notification_emails,
expires,
data_reviews=None,
version=0,
disabled=False,
lifetime="ping",
send_in_pings=None,
unit="",
gecko_datapoint="",
no_lint=None,
_config=None,
_validated=False,
):
# Avoid cyclical import
from . import parser
@ -83,10 +78,6 @@ class Metric:
if no_lint is None:
no_lint = []
self.no_lint = no_lint
if data_sensitivity is not None:
self.data_sensitivity = [
getattr(DataSensitivity, x) for x in data_sensitivity
]
# _validated indicates whether this metric has already been jsonschema
# validated (but not any of the Python-level validation).
@ -94,7 +85,7 @@ class Metric:
data = {
"$schema": parser.METRICS_ID,
self.category: {self.name: self.serialize()},
} # type: Dict[str, util.JSONType]
}
for error in parser.validate(data):
raise ValueError(error)
@ -110,14 +101,7 @@ class Metric:
super().__init_subclass__(**kwargs)
@classmethod
def make_metric(
cls,
category: str,
name: str,
metric_info: Dict[str, util.JSONType],
config: Dict[str, Any] = {},
validated: bool = False,
):
def make_metric(cls, category, name, metric_info, config={}, validated=False):
"""
Given a metric_info dictionary from metrics.yaml, return a metric
instance.
@ -132,17 +116,15 @@ class Metric:
:return: A new Metric instance.
"""
metric_type = metric_info["type"]
if not isinstance(metric_type, str):
raise TypeError(f"Unknown metric type {metric_type}")
return cls.metric_types[metric_type](
category=category,
name=name,
_validated=validated,
_config=config,
**metric_info,
**metric_info
)
def serialize(self) -> Dict[str, util.JSONType]:
def serialize(self):
"""
Serialize the metric back to JSON object model.
"""
@ -153,13 +135,11 @@ class Metric:
d[key] = d[key].name
if isinstance(val, set):
d[key] = sorted(list(val))
if isinstance(val, list) and len(val) and isinstance(val[0], enum.Enum):
d[key] = [x.name for x in val]
del d["name"]
del d["category"]
return d
def identifier(self) -> str:
def identifier(self):
"""
Create an identifier unique for this metric.
Generally, category.name; however, Glean internal
@ -169,17 +149,17 @@ class Metric:
return self.name
return ".".join((self.category, self.name))
def is_disabled(self) -> bool:
def is_disabled(self):
return self.disabled or self.is_expired()
def is_expired(self) -> bool:
def is_expired(self):
return util.is_expired(self.expires)
@staticmethod
def validate_expires(expires) -> None:
def validate_expires(expires):
return util.validate_expires(expires)
def is_internal_metric(self) -> bool:
def is_internal_metric(self):
return self.category in (Metric.glean_internal_metric_cat, "")
@ -226,10 +206,6 @@ class Timespan(TimeBase):
class TimingDistribution(TimeBase):
typename = "timing_distribution"
def __init__(self, *args, **kwargs):
self.time_unit = getattr(TimeUnit, kwargs.pop("time_unit", "nanosecond"))
Metric.__init__(self, *args, **kwargs)
class MemoryUnit(enum.Enum):
byte = 0
@ -273,7 +249,7 @@ class Event(Metric):
default_store_names = ["events"]
_generate_enums = [("allowed_extra_keys", "Keys")]
_generate_enums = [("extra_keys", "Keys")]
def __init__(self, *args, **kwargs):
self.extra_keys = kwargs.pop("extra_keys", {})
@ -286,7 +262,7 @@ class Event(Metric):
return sorted(list(self.extra_keys.keys()))
@staticmethod
def validate_extra_keys(extra_keys: Dict[str, str], config: Dict[str, Any]) -> None:
def validate_extra_keys(extra_keys, config):
if not config.get("allow_reserved") and any(
k.startswith("glean.") for k in extra_keys.keys()
):
@ -300,14 +276,6 @@ class Uuid(Metric):
typename = "uuid"
class Jwe(Metric):
typename = "jwe"
def __init__(self, *args, **kwargs):
self.decrypted_name = kwargs.pop("decrypted_name")
super().__init__(*args, **kwargs)
class Labeled(Metric):
labeled = True
@ -321,7 +289,7 @@ class Labeled(Metric):
self.labels = None
super().__init__(*args, **kwargs)
def serialize(self) -> Dict[str, util.JSONType]:
def serialize(self):
"""
Serialize the metric back to JSON object model.
"""
@ -341,6 +309,3 @@ class LabeledString(Labeled, String):
class LabeledCounter(Labeled, Counter):
typename = "labeled_counter"
ObjectTree = Dict[str, Dict[str, Union[Metric, pings.Ping]]]

Просмотреть файл

@ -12,12 +12,11 @@ from collections import OrderedDict
import functools
from pathlib import Path
import textwrap
from typing import Any, Dict, Generator, Iterable, Optional, Tuple, Union
import jsonschema # type: ignore
from jsonschema.exceptions import ValidationError # type: ignore
import jsonschema
from jsonschema.exceptions import ValidationError
from .metrics import Metric, ObjectTree
from .metrics import Metric
from .pings import Ping, RESERVED_PING_NAMES
from . import util
@ -46,15 +45,13 @@ def _update_validator(validator):
if len(missing_properties):
missing_properties = sorted(list(missing_properties))
yield ValidationError(
f"Missing required properties: {', '.join(missing_properties)}"
"Missing required properties: {}".format(", ".join(missing_properties))
)
validator.VALIDATORS["required"] = required
def _load_file(
filepath: Path,
) -> Generator[str, None, Tuple[Dict[str, util.JSONType], Optional[str]]]:
def _load_file(filepath):
"""
Load a metrics.yaml or pings.yaml format file.
"""
@ -65,20 +62,15 @@ def _load_file(
return {}, None
if content is None:
yield util.format_error(filepath, "", f"'{filepath}' file can not be empty.")
return {}, None
if not isinstance(content, dict):
yield util.format_error(
filepath, "", "'{}' file can not be empty.".format(filepath)
)
return {}, None
if content == {}:
return {}, None
schema_key = content.get("$schema")
if not isinstance(schema_key, str):
raise TypeError(f"Invalid schema key {schema_key}")
filetype = FILE_TYPES.get(schema_key)
filetype = FILE_TYPES.get(content.get("$schema"))
for error in validate(content, filepath):
content = {}
@ -88,7 +80,7 @@ def _load_file(
@functools.lru_cache(maxsize=1)
def _load_schemas() -> Dict[str, Tuple[Any, Any]]:
def _load_schemas():
"""
Load all of the known schemas from disk, and put them in a map based on the
schema's $id.
@ -105,9 +97,7 @@ def _load_schemas() -> Dict[str, Tuple[Any, Any]]:
return schemas
def _get_schema(
schema_id: str, filepath: Union[str, Path] = "<input>"
) -> Tuple[Any, Any]:
def _get_schema(schema_id, filepath="<input>"):
"""
Get the schema for the given schema $id.
"""
@ -115,25 +105,22 @@ def _get_schema(
if schema_id not in schemas:
raise ValueError(
util.format_error(
filepath, "", f"$schema key must be one of {', '.join(schemas.keys())}",
filepath,
"",
"$schema key must be one of {}".format(", ".join(schemas.keys())),
)
)
return schemas[schema_id]
def _get_schema_for_content(
content: Dict[str, util.JSONType], filepath: Union[str, Path]
) -> Tuple[Any, Any]:
def _get_schema_for_content(content, filepath):
"""
Get the appropriate schema for the given JSON content.
"""
schema_url = content.get("$schema")
if not isinstance(schema_url, str):
raise TypeError("Invalid $schema type {schema_url}")
return _get_schema(schema_url, filepath)
return _get_schema(content.get("$schema"), filepath)
def get_parameter_doc(key: str) -> str:
def get_parameter_doc(key):
"""
Returns documentation about a specific metric parameter.
"""
@ -141,7 +128,7 @@ def get_parameter_doc(key: str) -> str:
return schema["definitions"]["metric"]["properties"][key]["description"]
def get_ping_parameter_doc(key: str) -> str:
def get_ping_parameter_doc(key):
"""
Returns documentation about a specific ping parameter.
"""
@ -149,9 +136,7 @@ def get_ping_parameter_doc(key: str) -> str:
return schema["additionalProperties"]["properties"][key]["description"]
def validate(
content: Dict[str, util.JSONType], filepath: Union[str, Path] = "<input>"
) -> Generator[str, None, None]:
def validate(content, filepath="<input>"):
"""
Validate the given content against the appropriate schema.
"""
@ -166,13 +151,7 @@ def validate(
)
def _instantiate_metrics(
all_objects: ObjectTree,
sources: Dict[Any, Path],
content: Dict[str, util.JSONType],
filepath: Path,
config: Dict[str, Any],
) -> Generator[str, None, None]:
def _instantiate_metrics(all_objects, sources, content, filepath, config):
"""
Load a list of metrics.yaml files, convert the JSON information into Metric
objects, and merge them into a single tree.
@ -187,16 +166,12 @@ def _instantiate_metrics(
if not config.get("allow_reserved") and category_key.split(".")[0] == "glean":
yield util.format_error(
filepath,
f"For category '{category_key}'",
"For category '{}'".format(category_key),
"Categories beginning with 'glean' are reserved for "
"Glean internal use.",
)
continue
all_objects.setdefault(category_key, OrderedDict())
if not isinstance(category_val, dict):
raise TypeError(f"Invalid content for {category_key}")
for metric_key, metric_val in category_val.items():
try:
metric_obj = Metric.make_metric(
@ -204,7 +179,9 @@ def _instantiate_metrics(
)
except Exception as e:
yield util.format_error(
filepath, f"On instance {category_key}.{metric_key}", str(e),
filepath,
"On instance {}.{}".format(category_key, metric_key),
str(e),
)
metric_obj = None
else:
@ -214,7 +191,7 @@ def _instantiate_metrics(
):
yield util.format_error(
filepath,
f"On instance {category_key}.{metric_key}",
"On instance {}.{}".format(category_key, metric_key),
'Only internal metrics may specify "all-pings" '
'in "send_in_pings"',
)
@ -229,9 +206,8 @@ def _instantiate_metrics(
yield util.format_error(
filepath,
"",
(
f"Duplicate metric name '{category_key}.{metric_key}' "
f"already defined in '{already_seen}'"
("Duplicate metric name '{}.{}'" "already defined in '{}'").format(
category_key, metric_key, already_seen
),
)
else:
@ -239,13 +215,7 @@ def _instantiate_metrics(
sources[(category_key, metric_key)] = filepath
def _instantiate_pings(
all_objects: ObjectTree,
sources: Dict[Any, Path],
content: Dict[str, util.JSONType],
filepath: Path,
config: Dict[str, Any],
) -> Generator[str, None, None]:
def _instantiate_pings(all_objects, sources, content, filepath, config):
"""
Load a list of pings.yaml files, convert the JSON information into Ping
objects.
@ -257,18 +227,18 @@ def _instantiate_pings(
if ping_key in RESERVED_PING_NAMES:
yield util.format_error(
filepath,
f"For ping '{ping_key}'",
f"Ping uses a reserved name ({RESERVED_PING_NAMES})",
"For ping '{}'".format(ping_key),
"Ping uses a reserved name ({})".format(RESERVED_PING_NAMES),
)
continue
if not isinstance(ping_val, dict):
raise TypeError(f"Invalid content for ping {ping_key}")
ping_val["name"] = ping_key
try:
ping_obj = Ping(**ping_val)
except Exception as e:
yield util.format_error(filepath, f"On instance '{ping_key}'", str(e))
continue
yield util.format_error(
filepath, "On instance '{}'".format(ping_key), str(e)
)
ping_obj = None
already_seen = sources.get(ping_key)
if already_seen is not None:
@ -276,23 +246,21 @@ def _instantiate_pings(
yield util.format_error(
filepath,
"",
f"Duplicate ping name '{ping_key}' "
f"already defined in '{already_seen}'",
("Duplicate ping name '{}'" "already defined in '{}'").format(
ping_key, already_seen
),
)
else:
all_objects.setdefault("pings", {})[ping_key] = ping_obj
sources[ping_key] = filepath
def _preprocess_objects(objs: ObjectTree, config: Dict[str, Any]) -> ObjectTree:
def _preprocess_objects(objs, config):
"""
Preprocess the object tree to better set defaults.
"""
for category in objs.values():
for obj in category.values():
if not isinstance(obj, Metric):
continue
if not config.get("do_not_disable_expired", False) and hasattr(
obj, "is_disabled"
):
@ -308,9 +276,7 @@ def _preprocess_objects(objs: ObjectTree, config: Dict[str, Any]) -> ObjectTree:
@util.keep_value
def parse_objects(
filepaths: Iterable[Path], config: Dict[str, Any] = {}
) -> Generator[str, None, ObjectTree]:
def parse_objects(filepaths, config={}):
"""
Parse one or more metrics.yaml and/or pings.yaml files, returning a tree of
`metrics.Metric` and `pings.Ping` instances.
@ -332,15 +298,14 @@ def parse_objects(
files
:param config: A dictionary of options that change parsing behavior.
Supported keys are:
- `allow_reserved`: Allow values reserved for internal Glean use.
- `do_not_disable_expired`: Don't mark expired metrics as disabled.
This is useful when you want to retain the original "disabled"
value from the `metrics.yaml`, rather than having it overridden when
the metric expires.
"""
all_objects: ObjectTree = OrderedDict()
sources: Dict[Any, Path] = {}
all_objects = OrderedDict()
sources = {}
filepaths = util.ensure_list(filepaths)
for filepath in filepaths:
content, filetype = yield from _load_file(filepath)

Просмотреть файл

@ -8,27 +8,33 @@
Classes for managing the description of pings.
"""
from typing import Dict, List, Optional
import sys
from . import util
# Import a backport of PEP487 to support __init_subclass__
if sys.version_info < (3, 6):
import pep487
base_object = pep487.PEP487Object
else:
base_object = object
RESERVED_PING_NAMES = ["baseline", "metrics", "events", "deletion-request"]
RESERVED_PING_NAMES = ["baseline", "metrics", "events", "deletion_request"]
class Ping:
class Ping(base_object):
def __init__(
self,
name: str,
description: str,
bugs: List[str],
notification_emails: List[str],
data_reviews: Optional[List[str]] = None,
include_client_id: bool = False,
send_if_empty: bool = False,
reasons: Dict[str, str] = None,
_validated: bool = False,
name,
description,
bugs,
notification_emails,
data_reviews=None,
include_client_id=False,
send_if_empty=False,
reasons=None,
_validated=False,
):
# Avoid cyclical import
from . import parser
@ -49,24 +55,21 @@ class Ping:
# _validated indicates whether this metric has already been jsonschema
# validated (but not any of the Python-level validation).
if not _validated:
data: Dict[str, util.JSONType] = {
"$schema": parser.PINGS_ID,
self.name: self.serialize(),
}
data = {"$schema": parser.PINGS_ID, self.name: self.serialize()}
for error in parser.validate(data):
raise ValueError(error)
_generate_enums = [("reason_codes", "ReasonCodes")]
@property
def type(self) -> str:
def type(self):
return "ping"
@property
def reason_codes(self) -> List[str]:
def reason_codes(self):
return sorted(list(self.reasons.keys()))
def serialize(self) -> Dict[str, util.JSONType]:
def serialize(self):
"""
Serialize the metric back to JSON object model.
"""

Просмотреть файл

@ -87,26 +87,24 @@ definitions:
metrics coming from GeckoView.
- `timespan`: Represents a time interval. Additional properties:
`time_unit`.
`time_unit`_.
- `timing_distribution`: Record the distribution of multiple
timings. Additional properties: `time_unit`.
timings. Additional properties: `time_unit`_.
- `datetime`: A date/time value. Represented as an ISO datetime in
UTC. Additional properties: `time_unit`.
UTC. Additional properties: `time_unit`_.
- `uuid`: Record a UUID v4.
- `jwe`: Record a [JWE](https://tools.ietf.org/html/rfc7516) value.
- `memory_distribution`: A histogram for recording memory usage
values. Additional properties: `memory_unit`.
values. Additional properties: `memory_unit`_.
- `custom_distribution`: A histogram with a custom range and number
of buckets. This metric type is for legacy support only and is
only allowed for metrics coming from GeckoView. Additional
properties: `range_min`, `range_max`, `bucket_count`,
`histogram_type`.
properties: `range_min`_, `range_max`_, `bucket_count`_,
`histogram_type`_.
- Additionally, labeled versions of many metric types are supported.
These support the `labels`_ parameter, allowing multiple instances
@ -129,7 +127,6 @@ definitions:
- memory_distribution
- datetime
- uuid
- jwe
- labeled_boolean
- labeled_string
- labeled_counter
@ -259,21 +256,12 @@ definitions:
time_unit:
title: Time unit
description: |
For timespans and datetimes, specifies the unit that the metric will
be stored and displayed in. If not provided, it defaults to
"millisecond". Time values are sent to the backend as integers, so
`time_unit`_ determines the maximum resolution at which timespans are
recorded. Times are always truncated, not rounded, to the nearest time
unit. For example, a measurement of 25 ns will be returned as 0 ms if
`time_unit` is `"millisecond"`.
For timing distributions, times are always recorded and sent in
nanoseconds, but `time_unit` controls the minimum and maximum values.
If not provided, it defaults to "nanosecond".
- nanosecond: 1ns <= x <= 10 minutes
- microsecond: 1μs <= x <= ~6.94 days
- millisecond: 1ms <= x <= ~19 years
Specifies the unit that the metric will be stored and displayed in. If
not provided, it defaults to milliseconds. Time values are sent to the
backend as integers, so `time_unit`_ determines the maximum resolution
at which timespans are recorded. Times are always truncated, not
rounded, to the nearest time unit. For example, a measurement of 25 ns
will be returned as 0 ms if `time_unit` is `"millisecond"`.
Valid when `type`_ is `timespan`, `timing_distribution` or `datetime`.
enum:
@ -311,9 +299,9 @@ definitions:
description: |
A list of labels for a labeled metric. If provided, the labels are
enforced at run time, and recording to an unknown label is recorded
to the special label `__other__`. If not provided, the labels
to the special label ``__other__``. If not provided, the labels
may be anything, but using too many unique labels will put some
labels in the special label `__other__`.
labels in the special label ``__other__``.
Valid with any of the labeled metric types.
anyOf:
@ -329,7 +317,6 @@ definitions:
description: |
The acceptable keys on the "extra" object sent with events. This is an
object mapping the key to an object containing metadata about the key.
A maximum of 10 extra keys is allowed.
This metadata object has the following keys:
- `description`: **Required.** A description of the key.
@ -345,7 +332,6 @@ definitions:
type: string
required:
- description
maxProperties: 10
default: {}
gecko_datapoint:
@ -415,62 +401,6 @@ definitions:
items:
type: string
decrypted_name:
title: Decrypted name
description: |
Name of the column where to persist the decrypted value
stored in the JWE after processing.
Required when `type`_ is `jwe`.
type: string
pattern: "^[a-z_][a-z0-9_]{0,29}(\\.[a-z_][a-z0-9_]{0,29})*$"
data_sensitivity:
title: The level of data sensitivity
description: |
There are four data collection categories related to data sensitivity
[defined here](https://wiki.mozilla.org/Firefox/Data_Collection):
- **Category 1: Technical Data:** (`technical`) Information about the
machine or Firefox itself. Examples include OS, available memory,
crashes and errors, outcome of automated processes like updates,
safebrowsing, activation, version \#s, and buildid. This also
includes compatibility information about features and APIs used by
websites, addons, and other 3rd-party software that interact with
Firefox during usage.
- **Category 2: Interaction Data:** (`interaction`) Information about
the users direct engagement with Firefox. Examples include how many
tabs, addons, or windows a user has open; uses of specific Firefox
features; session length, scrolls and clicks; and the status of
discrete user preferences.
- **Category 3: Web activity data:** (`web_activity`) Information
about user web browsing that could be considered sensitive. Examples
include users specific web browsing history; general information
about their web browsing history (such as TLDs or categories of
webpages visited over time); and potentially certain types of
interaction data about specific webpages visited.
- **Category 4: Highly sensitive data:** (`highly_sensitive`)
Information that directly identifies a person, or if combined with
other data could identify a person. Examples include e-mail,
usernames, identifiers such as google ad id, apple id, fxaccount,
city or country (unless small ones are explicitly filtered out), or
certain cookies. It may be embedded within specific website content,
such as memory contents, dumps, captures of screen data, or DOM
data.
type: array
items:
enum:
- technical
- interaction
- web_activity
- highly_sensitive
type: string
minLength: 1
uniqueItems: true
required:
- type
- bugs
@ -588,13 +518,3 @@ additionalProperties:
- unit
description: |
`quantity` is missing required parameter `unit`.
-
if:
properties:
type:
const: jwe
then:
required:
- decrypted_name
description: |
`jwe` is missing required parameter `decrypted_name`.

Просмотреть файл

@ -10,19 +10,17 @@ Outputter to generate Swift code for metrics.
import enum
import json
from pathlib import Path
from typing import Any, Dict, Union
from . import metrics
from . import pings
from . import util
from collections import defaultdict
# An (imcomplete) list of reserved keywords in Swift.
# These will be replaced in generated code by their escaped form.
SWIFT_RESERVED_NAMES = ["internal", "typealias"]
def swift_datatypes_filter(value: util.JSONType) -> str:
def swift_datatypes_filter(value):
"""
A Jinja2 filter that renders Swift literals.
@ -64,7 +62,7 @@ def swift_datatypes_filter(value: util.JSONType) -> str:
return "".join(SwiftEncoder().iterencode(value))
def type_name(obj: Union[metrics.Metric, pings.Ping]) -> str:
def type_name(obj):
"""
Returns the Swift type to use for a given metric or ping object.
"""
@ -85,7 +83,7 @@ def type_name(obj: Union[metrics.Metric, pings.Ping]) -> str:
return class_name(obj.type)
def class_name(obj_type: str) -> str:
def class_name(obj_type):
"""
Returns the Swift class name for a given metric or ping type.
"""
@ -96,7 +94,7 @@ def class_name(obj_type: str) -> str:
return util.Camelize(obj_type) + "MetricType"
def variable_name(var: str) -> str:
def variable_name(var):
"""
Returns a valid Swift variable name, escaping keywords if necessary.
"""
@ -106,24 +104,12 @@ def variable_name(var: str) -> str:
return var
class Category:
"""
Data struct holding information about a metric to be used in the template.
"""
name: str
objs: Dict[str, Union[metrics.Metric, pings.Ping]]
contains_pings: bool
def output_swift(
objs: metrics.ObjectTree, output_dir: Path, options: Dict[str, Any] = {}
) -> None:
def output_swift(objs, output_dir, options={}):
"""
Given a tree of objects, output Swift code to `output_dir`.
:param objects: A tree of objects (metrics and pings) as returned from
`parser.parse_objects`.
`parser.parse_objects`.
:param output_dir: Path to an output directory to write to.
:param options: options dictionary, with the following optional keys:
- namespace: The namespace to generate metrics in
@ -140,34 +126,49 @@ def output_swift(
),
)
# The object parameters to pass to constructors.
# **CAUTION**: This list needs to be in the order the type constructor expects them.
# The `test_order_of_fields` test checks that the generated code is valid.
# **DO NOT CHANGE THE ORDER OR ADD NEW FIELDS IN THE MIDDLE**
extra_args = [
"category",
"name",
"send_in_pings",
"lifetime",
"disabled",
"time_unit",
"allowed_extra_keys",
"reason_codes",
]
namespace = options.get("namespace", "GleanMetrics")
glean_namespace = options.get("glean_namespace", "Glean")
filename = "Metrics.swift"
filepath = output_dir / filename
categories = []
for category_key, category_val in objs.items():
contains_pings = any(
isinstance(obj, pings.Ping) for obj in category_val.values()
filename = util.Camelize(category_key) + ".swift"
filepath = output_dir / filename
custom_pings = defaultdict()
for obj in category_val.values():
if isinstance(obj, pings.Ping):
custom_pings[obj.name] = obj
has_labeled_metrics = any(
getattr(metric, "labeled", False) for metric in category_val.values()
)
cat = Category()
cat.name = category_key
cat.objs = category_val
cat.contains_pings = contains_pings
categories.append(cat)
with filepath.open("w", encoding="utf-8") as fd:
fd.write(
template.render(
categories=categories,
extra_args=util.extra_metric_args,
namespace=namespace,
glean_namespace=glean_namespace,
allow_reserved=options.get("allow_reserved", False),
with filepath.open("w", encoding="utf-8") as fd:
fd.write(
template.render(
category_name=category_key,
objs=category_val,
extra_args=extra_args,
namespace=namespace,
glean_namespace=glean_namespace,
has_labeled_metrics=has_labeled_metrics,
is_ping_type=len(custom_pings) > 0,
allow_reserved=options.get("allow_reserved", False)
)
)
)
# Jinja2 squashes the final newline, so we explicitly add it
fd.write("\n")
# Jinja2 squashes the final newline, so we explicitly add it
fd.write("\n")

Просмотреть файл

@ -1,98 +0,0 @@
// -*- mode: csharp -*-
/*
* AUTOGENERATED BY glean_parser. DO NOT EDIT.
*/
{# The rendered markdown is autogenerated, but this
Jinja2 template is not. Please file bugs! #}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
{% macro obj_declaration(obj, suffix='', access='', lazy=False) %}
{{ access }} {% if lazy %} Lazy<{{ obj|type_name }}>{%- else %} {{ obj|type_name }}{% endif %} {{ obj.name|camelize }}{{ suffix }}
{%- if lazy %} = new Lazy<{{ obj|type_name }}>(() => {%- else %} ={% endif %}
new {{ obj|type_name }}(
{% for arg_name in extra_args if obj[arg_name] is defined %}
{{ arg_name|camelize }}: {{ obj[arg_name]|csharp }}{{ "," if not loop.last }}
{% endfor %}
){% if lazy %});{% else %};{% endif %}{% endmacro %}
using System;
using {{ glean_namespace }}.Private;
{# The C# metrics design require the class name to have a 'Definition'
suffix, in order to nicely call in the metrics from the consumer code.
The user code will be interested in the Value of the lazy instance, so
that's where the real class name should be used. #}
{% set metrics_class_name = category_name|Camelize + 'Definition' %}
namespace {{ namespace }}
{
internal sealed class {{ metrics_class_name }}
{
private static readonly Lazy<{{ metrics_class_name }}>
lazyInstance = new Lazy<{{ metrics_class_name }}>(() => new {{ metrics_class_name }}());
public static {{ metrics_class_name }} {{ category_name|Camelize }} => lazyInstance.Value;
// Private constructor to disallow instantiation from external callers.
private {{ metrics_class_name }}() { }
#pragma warning disable IDE1006 // Naming Styles
{% for obj in objs.values() %}
{% if obj|attr("_generate_enums") %}
{% for name, suffix in obj["_generate_enums"] %}
{% if obj|attr(name)|length %}
internal enum {{ obj.name|camelize }}{{ suffix }} {
{% for key in obj|attr(name) %}
{{ key|camelize }}{{ "," if not loop.last }}
{% endfor %}
}
{% endif %}
{% endfor %}
{% endif %}
{% endfor %}
{% for obj in objs.values() %}
{% if obj.labeled %}
{{ obj_declaration(obj, 'Label', 'private ') }}
private readonly Lazy<LabeledMetricType<{{ obj|type_name }}>> {{ obj.name|camelize }}Lazy = new Lazy<LabeledMetricType<{{ obj|type_name }}>>(() => new LabeledMetricType(
category = {{ obj.category|csharp }},
name = {{ obj.name|csharp }},
subMetric = {{ obj.name|camelize }}Label,
disabled = {{ obj.is_disabled()|csharp }},
lifetime = {{ obj.lifetime|csharp }},
sendInPings = {{ obj.send_in_pings|csharp }},
labels = {{ obj.labels|csharp }}
)
);
/// <summary>
/// {{ obj.description|wordwrap() | replace('\n', '\n /// ') }}
/// </summary>
public {{ obj|type_name }} {{ obj.name|camelize }} => {{ obj.name|camelize }}Lazy.Value;
{% else %}
{# Deal with non-ping objects first. We need them to be lazy and we
want their description to stick on an accessor object. #}
{% if obj.type != 'ping' %}
{{ obj_declaration(obj, access='private readonly', suffix='Lazy', lazy=True) }}
/// <summary>
/// {{ obj.description|wordwrap() | replace('\n', '\n /// ') }}
/// </summary>
internal {{ obj|type_name }} {{ obj.name|camelize }} => {{ obj.name|camelize }}Lazy.Value;
{% else %}
{# Finally handle pings. #}
/// <summary>
/// {{ obj.description|wordwrap() | replace('\n', '\n /// ') }}
/// </summary>
{{ obj_declaration(obj, access='internal readonly', lazy=False) }}
{% endif %}
{% endif %}
{%- endfor %}
#pragma warning restore IDE1006 // Naming Styles
}
}

Просмотреть файл

@ -28,40 +28,19 @@ See the Glean SDK documentation for the [`{{ ping_name }}` ping]({{ ping_name|pi
{% if ping_name|ping_send_if_empty %}
This ping is sent if empty.
{% endif %}
{% if ping_name|ping_include_client_id %}
This ping includes the [client id](https://mozilla.github.io/glean/book/user/pings/index.html#the-client_info-section).
{% endif %}
{% if ping_name|ping_data_reviews %}
**Data reviews for this ping:**
{% for review in ping_name|ping_data_reviews %}
- <{{review}}>
{% endfor %}
{% endif %}
{% if ping_name|ping_bugs %}
**Bugs related to this ping:**
{% for bug in ping_name|ping_bugs %}
- {% if bug|int != 0 %}{{bug}}{% else %}<{{bug}}>{% endif %}
{% endfor %}
{% endif %}
{% if ping_name|ping_reasons %}
**Reasons this ping may be sent:**
Reasons this ping may be sent:
{% for (reason, desc) in ping_name|ping_reasons|dictsort %}
- `{{ reason }}`: {{ desc|indent(6, first=False) }}
- `{{ reason }}`: {{ desc|indent(6, indentfirst=False) }}
{% endfor %}
{% endif %}
{% if metrics_by_pings[ping_name] %}
The following metrics are added to the ping:
| Name | Type | Description | Data reviews | Extras | Expiration | [Data Sensitivity](https://wiki.mozilla.org/Firefix/Data_Collection) |
| Name | Type | Description | Data reviews | Extras | Expiration |
| --- | --- | --- | --- | --- | --- |
{% for metric in metrics_by_pings[ping_name] %}
| {{ metric.identifier() }} |
@ -78,7 +57,6 @@ The following metrics are added to the ping:
</ul>
{%- endif -%} |
{{- metric.expires }} |
{{- metric.data_sensitivity|data_sensitivity_numbers }} |
{% endfor %}
{% else %}
This ping contains no metrics.
@ -86,8 +64,6 @@ This ping contains no metrics.
{% endfor %}
Data categories are [defined here](https://wiki.mozilla.org/Firefox/Data_Collection).
<!-- AUTOGENERATED BY glean_parser. DO NOT EDIT. -->
{# The rendered markdown is autogenerated, but this
Jinja2 template is not. Please file bugs! #}

Просмотреть файл

@ -26,15 +26,14 @@ import {{ glean_namespace }}
// swiftlint:disable force_try
extension {{ namespace }} {
{% for category in categories %}
{% if category.contains_pings %}
class {{ category.name|Camelize }} {
public static let shared = {{ category.name|Camelize }}()
{% if is_ping_type %}
class {{ category_name|Camelize }} {
public static let shared = {{ category_name|Camelize }}()
private init() {
// Intentionally left private, no external user can instantiate a new global object.
}
{% for obj in category.objs.values() %}
{% for obj in objs.values() %}
{% if obj|attr("_generate_enums") %}
{% for name, suffix in obj["_generate_enums"] %}
{% if obj|attr(name)|length %}
@ -51,6 +50,7 @@ extension {{ namespace }} {
{% endif %}
{% endfor %}
{% endif %}
/// {{ obj.description|wordwrap() | replace('\n', '\n /// ') }}
let {{ obj.name|camelize|variable_name }} = {{obj|type_name}}(
name: {{ obj.name|swift }},
@ -61,10 +61,9 @@ extension {{ namespace }} {
{% endfor %}
}
{% else %}
enum {{ category.name|Camelize }} {
{% for obj in category.objs.values() %}
enum {{ category_name|Camelize }} {
{% for obj in objs.values() %}
{% if obj|attr("_generate_enums") %}
{% for name, suffix in obj["_generate_enums"] %}
{% if obj|attr(name)|length %}
@ -82,7 +81,7 @@ extension {{ namespace }} {
{% endfor %}
{% endif %}
{% endfor %}
{% for obj in category.objs.values() %}
{% for obj in objs.values() %}
{% if obj.labeled %}
{{ obj_declaration(obj, 'Label', 'private ') }}
/// {{ obj.description|wordwrap() | replace('\n', '\n /// ') }}
@ -102,7 +101,5 @@ extension {{ namespace }} {
{% endif %}
{% endfor %}
}
{% endif %}
{% endfor %}
}

Просмотреть файл

@ -11,90 +11,64 @@ High-level interface for translating `metrics.yaml` into other formats.
from pathlib import Path
import os
import shutil
import sys
import tempfile
from typing import Any, Callable, Dict, Iterable, List
from . import lint
from . import parser
from . import csharp
from . import kotlin
from . import markdown
from . import metrics
from . import swift
from . import util
class Outputter:
"""
Class to define an output format.
Each outputter in the table has the following member values:
- output_func: the main function of the outputter, the one which
does the actual translation.
- clear_patterns: A list of glob patterns to clear in the directory before
writing new results to it.
"""
def __init__(
self,
output_func: Callable[[metrics.ObjectTree, Path, Dict[str, Any]], None],
clear_patterns: List[str] = [],
):
self.output_func = output_func
self.clear_patterns = clear_patterns
# Each outputter in the table has the following keys:
# - "output_func": the main function of the outputter, the one which
# does the actual translation.
# - "clear_output_dir": a flag to clear the target directory before moving there
# the generated files.
OUTPUTTERS = {
"csharp": Outputter(csharp.output_csharp, ["*.cs"]),
"kotlin": Outputter(kotlin.output_kotlin, ["*.kt"]),
"markdown": Outputter(markdown.output_markdown),
"swift": Outputter(swift.output_swift, ["*.swift"]),
"kotlin": {
"output_func": kotlin.output_kotlin,
"clear_output_dir": True,
"extensions": ["*.kt"],
},
"markdown": {"output_func": markdown.output_markdown, "clear_output_dir": False},
"swift": {
"output_func": swift.output_swift,
"clear_output_dir": True,
"extensions": ["*.swift"],
},
}
def translate_metrics(
input_filepaths: Iterable[Path],
output_dir: Path,
translation_func: Callable[[metrics.ObjectTree, Path, Dict[str, Any]], None],
clear_patterns: List[str] = [],
options: Dict[str, Any] = {},
parser_config: Dict[str, Any] = {},
):
def translate(input_filepaths, output_format, output_dir, options={}, parser_config={}):
"""
Translate the files in `input_filepaths` by running the metrics through a
translation function and writing the results in `output_dir`.
Translate the files in `input_filepaths` to the given `output_format` and
put the results in `output_dir`.
:param input_filepaths: list of paths to input metrics.yaml files
:param output_format: the name of the output formats
:param output_dir: the path to the output directory
:param translation_func: the function that actually performs the translation.
It is passed the following arguments:
- metrics_objects: The tree of metrics as pings as returned by
`parser.parse_objects`.
- output_dir: The path to the output directory.
- options: A dictionary of output format-specific options.
Examples of translation functions are in `kotlin.py` and `swift.py`.
:param clear_patterns: a list of glob patterns of files to clear before
generating the output files. By default, no files will be cleared (i.e.
the directory should be left alone).
:param options: dictionary of options. The available options are backend
format specific. These are passed unchanged to `translation_func`.
format specific.
:param parser_config: A dictionary of options that change parsing behavior.
See `parser.parse_metrics` for more info.
"""
input_filepaths = util.ensure_list(input_filepaths)
if lint.glinter(input_filepaths, parser_config):
return 1
if output_format not in OUTPUTTERS:
raise ValueError("Unknown output format '{}'".format(output_format))
all_objects = parser.parse_objects(input_filepaths, parser_config)
if util.report_validation_errors(all_objects):
return 1
if lint.lint_metrics(all_objects.value, parser_config):
print(
"NOTE: These warnings will become errors in a future release of Glean.",
file=sys.stderr,
)
# allow_reserved is also relevant to the translators, so copy it there
if parser_config.get("allow_reserved"):
options["allow_reserved"] = True
@ -103,16 +77,19 @@ def translate_metrics(
# real directory, for transactional integrity.
with tempfile.TemporaryDirectory() as tempdir:
tempdir_path = Path(tempdir)
translation_func(all_objects.value, tempdir_path, options)
OUTPUTTERS[output_format]["output_func"](
all_objects.value, tempdir_path, options
)
if output_dir.is_file():
output_dir.unlink()
elif output_dir.is_dir() and len(clear_patterns):
for clear_pattern in clear_patterns:
for filepath in output_dir.glob(clear_pattern):
filepath.unlink()
if len(list(output_dir.iterdir())):
print(f"Extra contents found in '{output_dir}'.")
if OUTPUTTERS[output_format]["clear_output_dir"]:
if output_dir.is_file():
output_dir.unlink()
elif output_dir.is_dir():
for extensions in OUTPUTTERS[output_format]["extensions"]:
for filepath in output_dir.glob(extensions):
filepath.unlink()
if len(list(output_dir.iterdir())):
print("Extra contents found in '{}'.".format(output_dir))
# We can't use shutil.copytree alone if the directory already exists.
# However, if it doesn't exist, make sure to create one otherwise
@ -122,37 +99,3 @@ def translate_metrics(
shutil.copy(str(filename), str(output_dir))
return 0
def translate(
input_filepaths: Iterable[Path],
output_format: str,
output_dir: Path,
options: Dict[str, Any] = {},
parser_config: Dict[str, Any] = {},
):
"""
Translate the files in `input_filepaths` to the given `output_format` and
put the results in `output_dir`.
:param input_filepaths: list of paths to input metrics.yaml files
:param output_format: the name of the output format
:param output_dir: the path to the output directory
:param options: dictionary of options. The available options are backend
format specific.
:param parser_config: A dictionary of options that change parsing behavior.
See `parser.parse_metrics` for more info.
"""
format_desc = OUTPUTTERS.get(output_format, None)
if format_desc is None:
raise ValueError(f"Unknown output format '{output_format}'")
return translate_metrics(
input_filepaths,
output_dir,
format_desc.output_func,
format_desc.clear_patterns,
options,
parser_config,
)

Просмотреть файл

@ -11,34 +11,24 @@ import json
from pathlib import Path
import sys
import textwrap
from typing import Any, Callable, Iterable, Sequence, Tuple, Union
import urllib.request
import appdirs # type: ignore
import diskcache # type: ignore
import appdirs
import diskcache
import jinja2
import jsonschema # type: ignore
from jsonschema import _utils # type: ignore
import jsonschema
from jsonschema import _utils
import yaml
if sys.version_info < (3, 7):
import iso8601 # type: ignore
import iso8601
TESTING_MODE = "pytest" in sys.modules
JSONType = Union[list, dict, str, int, float, None]
"""
The types supported by JSON.
This is only an approximation -- this should really be a recursive type.
"""
# Adapted from
# https://stackoverflow.com/questions/34667108/ignore-dates-and-times-while-parsing-yaml
class _NoDatesSafeLoader(yaml.SafeLoader):
@classmethod
def remove_implicit_resolver(cls, tag_to_remove):
@ -106,7 +96,7 @@ else:
return yaml.dump(data, **kwargs)
def load_yaml_or_json(path: Path, ordered_dict: bool = False):
def load_yaml_or_json(path, ordered_dict=False):
"""
Load the content from either a .json or .yaml file, based on the filename
extension.
@ -123,19 +113,19 @@ def load_yaml_or_json(path: Path, ordered_dict: bool = False):
return {}
if path.suffix == ".json":
with path.open("r", encoding="utf-8") as fd:
with path.open("r") as fd:
return json.load(fd)
elif path.suffix in (".yml", ".yaml", ".yamlx"):
with path.open("r", encoding="utf-8") as fd:
with path.open("r") as fd:
if ordered_dict:
return ordered_yaml_load(fd)
else:
return yaml.load(fd, Loader=_NoDatesSafeLoader)
else:
raise ValueError(f"Unknown file extension {path.suffix}")
raise ValueError("Unknown file extension {}".format(path.suffix))
def ensure_list(value: Any) -> Sequence[Any]:
def ensure_list(value):
"""
Ensures that the value is a list. If it is anything but a list or tuple, a
list with a single element containing only value is returned.
@ -145,7 +135,7 @@ def ensure_list(value: Any) -> Sequence[Any]:
return value
def to_camel_case(input: str, capitalize_first_letter: bool) -> str:
def to_camel_case(input, capitalize_first_letter):
"""
Convert the value to camelCase.
@ -160,10 +150,10 @@ def to_camel_case(input: str, capitalize_first_letter: bool) -> str:
if not capitalize_first_letter:
tokens[0] = tokens[0].lower()
# Finally join the tokens and capitalize.
return "".join(tokens)
return ''.join(tokens)
def camelize(value: str) -> str:
def camelize(value):
"""
Convert the value to camelCase (with a lower case first letter).
@ -173,7 +163,7 @@ def camelize(value: str) -> str:
return to_camel_case(value, False)
def Camelize(value: str) -> str:
def Camelize(value):
"""
Convert the value to CamelCase (with an upper case first letter).
@ -184,9 +174,7 @@ def Camelize(value: str) -> str:
@functools.lru_cache()
def get_jinja2_template(
template_name: str, filters: Iterable[Tuple[str, Callable]] = ()
):
def get_jinja2_template(template_name, filters=()):
"""
Get a Jinja2 template that ships with glean_parser.
@ -248,32 +236,35 @@ def get_null_resolver(schema):
return NullResolver.from_schema(schema)
def fetch_remote_url(url: str, cache: bool = True):
def fetch_remote_url(url, cache=True):
"""
Fetches the contents from an HTTP url or local file path, and optionally
caches it to disk.
"""
# Include the Python version in the cache key, since caches aren't
# sharable across Python versions.
key = (url, str(sys.version_info))
is_http = url.startswith("http")
if not is_http:
with open(url, "r", encoding="utf-8") as fd:
return fd.read()
contents = fd.read()
return contents
if cache:
cache_dir = appdirs.user_cache_dir("glean_parser", "mozilla")
with diskcache.Cache(cache_dir) as dc:
if key in dc:
return dc[key]
if url in dc:
return dc[url]
contents: str = urllib.request.urlopen(url).read()
contents = urllib.request.urlopen(url).read()
# On Python 3.5, urlopen does not handle the unicode decoding for us. This
# is ok because we control these files and we know they are in UTF-8,
# however, this wouldn't be correct in general.
if sys.version_info < (3, 6):
contents = contents.decode("utf8")
if cache:
with diskcache.Cache(cache_dir) as dc:
dc[key] = contents
dc[url] = contents
return contents
@ -281,7 +272,7 @@ def fetch_remote_url(url: str, cache: bool = True):
_unset = _utils.Unset()
def pprint_validation_error(error) -> str:
def pprint_validation_error(error):
"""
A version of jsonschema's ValidationError __str__ method that doesn't
include the schema fragment that failed. This makes the error messages
@ -322,7 +313,7 @@ def pprint_validation_error(error) -> str:
return "\n".join(parts)
def format_error(filepath: Union[str, Path], header: str, content: str) -> str:
def format_error(filepath, header, content):
"""
Format a jsonshema validation error.
"""
@ -331,12 +322,12 @@ def format_error(filepath: Union[str, Path], header: str, content: str) -> str:
else:
filepath = "<string>"
if header:
return f"{filepath}: {header}\n{_utils.indent(content)}"
return "{}: {}\n{}".format(filepath, header, _utils.indent(content))
else:
return f"{filepath}:\n{_utils.indent(content)}"
return "{}:\n{}".format(filepath, _utils.indent(content))
def is_expired(expires: str) -> bool:
def is_expired(expires):
"""
Parses the `expires` field in a metric or ping and returns whether
the object should be considered expired.
@ -353,13 +344,15 @@ def is_expired(expires: str) -> bool:
date = datetime.date.fromisoformat(expires)
except ValueError:
raise ValueError(
f"Invalid expiration date '{expires}'. "
"Must be of the form yyyy-mm-dd in UTC."
(
"Invalid expiration date '{}'. "
"Must be of the form yyyy-mm-dd in UTC."
).format(expires)
)
return date <= datetime.datetime.utcnow().date()
def validate_expires(expires: str) -> None:
def validate_expires(expires):
"""
Raises ValueError if `expires` is not valid.
"""
@ -381,40 +374,3 @@ def report_validation_errors(all_objects):
print("=" * 78, file=sys.stderr)
print(error, file=sys.stderr)
return found_error
# Names of metric parameters to pass to constructors.
# This includes only things that the language bindings care about, not things
# that are metadata-only or are resolved into other parameters at parse time.
# **CAUTION**: This list needs to be in the order the Swift type constructors
# expects them. (The other language bindings don't care about the order). The
# `test_order_of_fields` test checks that the generated code is valid.
# **DO NOT CHANGE THE ORDER OR ADD NEW FIELDS IN THE MIDDLE**
extra_metric_args = [
"category",
"name",
"send_in_pings",
"lifetime",
"disabled",
"time_unit",
"memory_unit",
"allowed_extra_keys",
"reason_codes",
"bucket_count",
"range_max",
"range_min",
"histogram_type",
]
# Names of ping parameters to pass to constructors.
extra_ping_args = [
"include_client_id",
"send_if_empty",
"name",
"reason_codes",
]
# Names of parameters to pass to both metric and ping constructors.
extra_args = list(set(extra_metric_args) | set(extra_ping_args))

Просмотреть файл

@ -14,7 +14,7 @@ import json
from pathlib import Path
import sys
import jsonschema # type: ignore
import jsonschema
from . import util
@ -68,7 +68,7 @@ def validate_ping(ins, outs=None, schema_url=None):
outs = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8")
if isinstance(ins, (str, bytes, Path)):
with open(ins, "r", encoding="utf-8") as fd:
with open(ins, "r") as fd:
return _validate_ping(fd, outs, schema_url=schema_url)
else:
return _validate_ping(ins, outs, schema_url=schema_url)

Просмотреть файл

@ -2,7 +2,6 @@ black==19.10b0
coverage==4.5.2
flake8==3.7.8
m2r==0.2.1
mypy==0.761
pip
pytest-runner==4.4
pytest==4.3.0

11
third_party/python/glean_parser/requirements_dev_py35.txt поставляемый Normal file
Просмотреть файл

@ -0,0 +1,11 @@
coverage==4.5.2
flake8==3.7.8
m2r==0.2.1
pip
pytest-runner==4.4
pytest==4.3.0
Sphinx==1.8.4
twine==1.13.0
watchdog==0.9.0
wheel
yamllint==1.18.0

26
third_party/python/glean_parser/setup.py поставляемый Executable file → Normal file
Просмотреть файл

@ -12,26 +12,33 @@ import sys
from setuptools import setup, find_packages
if sys.version_info < (3, 6):
print("glean_parser requires at least Python 3.6", file=sys.stderr)
if sys.version_info < (3, 5):
print("glean_parser requires at least Python 3.5", file=sys.stderr)
sys.exit(1)
with open("README.rst", encoding="utf-8") as readme_file:
with open("README.rst") as readme_file:
readme = readme_file.read()
with open("HISTORY.rst", encoding="utf-8") as history_file:
with open("HISTORY.rst") as history_file:
history = history_file.read()
requirements = [
"appdirs>=1.4",
"Click>=7",
"diskcache>=4",
"iso8601>=0.1.10; python_version<='3.6'",
"Jinja2>=2.10.1",
"appdirs>=1.4.3",
"Click>=7.0",
"diskcache>=4.0.0",
"iso8601>=0.1.12",
"Jinja2>=2.10.1,<3.0",
"jsonschema>=3.0.2",
# 'markupsafe' is required by Jinja2. From version 2.0.0 on
# py3.5 support is dropped.
"markupsafe>=1.1,<2.0.0",
"pep487==1.0.1",
"PyYAML>=3.13",
"yamllint>=1.18.0",
# 'zipp' is required by jsonschema->importlib_metadata,
# it drops py3.5 in newer versions.
"zipp>=0.5,<2.0",
]
setup_requirements = ["pytest-runner", "setuptools-scm"]
@ -48,6 +55,7 @@ setup(
"Intended Audience :: Developers",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",

Просмотреть файл

@ -1,176 +0,0 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Usage:
python extract_data_categories.py metrics.yaml
Automatically extract the data collection categories for all the metrics in a
metrics.yaml file by consulting the linked data reviews.
This script reads a metrics.yaml file, visits all of the associated data
reviews, trying to determine the associated data categories, and inserts them
(in place) to the original metrics.yaml file.
A very simple heuristic is used: to look for the question about data categories
used in all data reviews, and then find any numbers between it and the next
question. When this simple heuristic fails, comments with "!!!" are inserted in
the output as a recommendation to manually investigate and enter the data
categories.
Requirements from PyPI: BeautifulSoup4, PyYAML
"""
import dbm
import functools
import re
import sys
import time
from typing import List, Set
from urllib.request import urlopen
from bs4 import BeautifulSoup
import yaml
cache = dbm.open("bugzilla-cache.db", "c")
QUESTION = "what collection type of data do the requested measurements fall under?"
CATEGORY_MAP = {
1: "technical",
2: "interaction",
3: "web_activity",
4: "highly_sensitive",
}
def fetch_url(url: str) -> str:
"""
Fetch a web page containing a data review, caching it to avoid
over-fetching.
"""
content = cache.get(url)
if content is not None:
return content
print(f"Fetching {url}")
content = urlopen(url).read()
cache[url] = content
time.sleep(0.5)
return content
@functools.lru_cache(1000)
def parse_data_review(html: str) -> Set[int]:
"""
Parse a single data review.
"""
soup = BeautifulSoup(html, features="html.parser")
text = soup.get_text()
lines = iter(text.splitlines())
for line in lines:
if QUESTION in line.strip():
break
categories: Set[int] = set()
for line in lines:
if "?" in line:
break
categories.update(int(x) for x in re.findall("[0-9]+", line))
return categories
def categories_as_strings(categories: Set[int]) -> List[str]:
"""
From a set of numeric categories, return the strings used in a metrics.yaml
file. This may contain strings representing errors.
"""
if len(categories):
return [
CATEGORY_MAP.get(x, f"!!!UNKNOWN CATEGORY {x}")
for x in sorted(list(categories))
]
else:
return ["!!! NO DATA CATEGORIES FOUND"]
def update_lines(
lines: List[str],
category_name: str,
metric_name: str,
data_sensitivity_values: List[str],
) -> List[str]:
"""
Update the lines of a YAML file in place to include the data_sensitivity
for the given metric, returning the lines of the result.
"""
output = []
lines_iter = iter(lines)
for line in lines_iter:
output.append(line)
if line.startswith(f"{category_name}:"):
break
for line in lines_iter:
output.append(line)
if line.startswith(f" {metric_name}:"):
break
for line in lines_iter:
output.append(line)
if line.startswith(f" data_reviews:"):
break
for line in lines_iter:
if not line.strip().startswith("- "):
output.append(" data_sensitivity:\n")
for data_sensitivity in data_sensitivity_values:
output.append(f" - {data_sensitivity}\n")
output.append(line)
break
else:
output.append(line)
for line in lines_iter:
output.append(line)
return output
def parse_yaml(yamlpath: str):
with open(yamlpath) as fd:
content = yaml.safe_load(fd)
with open(yamlpath) as fd:
lines = list(fd.readlines())
for category_name, category in content.items():
if category_name.startswith("$") or category_name == "no_lint":
continue
for metric_name, metric in category.items():
categories = set()
for data_review_url in metric["data_reviews"]:
html = fetch_url(data_review_url)
categories.update(parse_data_review(html))
lines = update_lines(
lines, category_name, metric_name, categories_as_strings(categories)
)
with open(yamlpath, "w") as fd:
for line in lines:
fd.write(line)
if __name__ == "__main__":
parse_yaml(sys.argv[-1])

Просмотреть файл

@ -1,6 +1,6 @@
Metadata-Version: 2.1
Name: importlib_metadata
Version: 1.7.0
Version: 1.5.0
Summary: Read metadata from Python packages
Home-page: http://importlib-metadata.readthedocs.io/
Author: Barry Warsaw

Просмотреть файл

@ -1,6 +1,6 @@
Metadata-Version: 2.1
Name: importlib-metadata
Version: 1.7.0
Version: 1.5.0
Summary: Read metadata from Python packages
Home-page: http://importlib-metadata.readthedocs.io/
Author: Barry Warsaw

Просмотреть файл

@ -11,7 +11,6 @@ rst.linker
[testing]
packaging
pep517
[testing:python_version < "3.9"]
importlib_resources>=1.3
[testing:python_version < "3.7"]
importlib_resources

Просмотреть файл

@ -28,8 +28,6 @@ from ._compat import (
MetaPathFinder,
email_message_from_string,
PyPy_repr,
unique_ordered,
str,
)
from importlib import import_module
from itertools import starmap
@ -55,15 +53,6 @@ __all__ = [
class PackageNotFoundError(ModuleNotFoundError):
"""The package was not found."""
def __str__(self):
tmpl = "No package metadata was found for {self.name}"
return tmpl.format(**locals())
@property
def name(self):
name, = self.args
return name
class EntryPoint(
PyPy_repr,
@ -106,16 +95,6 @@ class EntryPoint(
attrs = filter(None, (match.group('attr') or '').split('.'))
return functools.reduce(getattr, attrs, module)
@property
def module(self):
match = self.pattern.match(self.value)
return match.group('module')
@property
def attr(self):
match = self.pattern.match(self.value)
return match.group('attr')
@property
def extras(self):
match = self.pattern.match(self.value)
@ -208,7 +187,7 @@ class Distribution:
"""
for resolver in cls._discover_resolvers():
dists = resolver(DistributionFinder.Context(name=name))
dist = next(iter(dists), None)
dist = next(dists, None)
if dist is not None:
return dist
else:
@ -251,17 +230,6 @@ class Distribution:
)
return filter(None, declared)
@classmethod
def _local(cls, root='.'):
from pep517 import build, meta
system = build.compat_system(root)
builder = functools.partial(
meta.build,
source_dir=root,
system=system,
)
return PathDistribution(zipp.Path(meta.build_as_zip(builder)))
@property
def metadata(self):
"""Return the parsed metadata for this Distribution.
@ -439,8 +407,8 @@ class FastPath:
"""
def __init__(self, root):
self.root = str(root)
self.base = os.path.basename(self.root).lower()
self.root = root
self.base = os.path.basename(root).lower()
def joinpath(self, child):
return pathlib.Path(self.root, child)
@ -457,8 +425,8 @@ class FastPath:
names = zip_path.root.namelist()
self.joinpath = zip_path.joinpath
return unique_ordered(
child.split(posixpath.sep, 1)[0]
return (
posixpath.split(child)[0]
for child in names
)

Просмотреть файл

@ -1,4 +1,4 @@
from __future__ import absolute_import, unicode_literals
from __future__ import absolute_import
import io
import abc
@ -9,27 +9,21 @@ import email
if sys.version_info > (3,): # pragma: nocover
import builtins
from configparser import ConfigParser
import contextlib
from contextlib import suppress
FileNotFoundError = builtins.FileNotFoundError
IsADirectoryError = builtins.IsADirectoryError
NotADirectoryError = builtins.NotADirectoryError
PermissionError = builtins.PermissionError
map = builtins.map
from itertools import filterfalse
else: # pragma: nocover
from backports.configparser import ConfigParser
from itertools import imap as map # type: ignore
from itertools import ifilterfalse as filterfalse
import contextlib2 as contextlib
from contextlib2 import suppress # noqa
FileNotFoundError = IOError, OSError
IsADirectoryError = IOError, OSError
NotADirectoryError = IOError, OSError
PermissionError = IOError, OSError
str = type('')
suppress = contextlib.suppress
if sys.version_info > (3, 5): # pragma: nocover
import pathlib
else: # pragma: nocover
@ -135,18 +129,3 @@ class PyPy_repr:
if affected: # pragma: nocover
__repr__ = __compat_repr__
del affected
# from itertools recipes
def unique_everseen(iterable): # pragma: nocover
"List unique elements, preserving order. Remember all elements ever seen."
seen = set()
seen_add = seen.add
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
unique_ordered = (
unique_everseen if sys.version_info < (3, 7) else dict.fromkeys)

Просмотреть файл

@ -2,46 +2,6 @@
importlib_metadata NEWS
=========================
v1.7.0
======
* ``PathNotFoundError`` now has a custom ``__str__``
mentioning "package metadata" being missing to help
guide users to the cause when the package is installed
but no metadata is present. Closes #124.
v1.6.1
======
* Added ``Distribution._local()`` as a provisional
demonstration of how to load metadata for a local
package. Implicitly requires that
`pep517 <https://pypi.org/project/pep517>`_ is
installed. Ref #42.
* Ensure inputs to FastPath are Unicode. Closes #121.
* Tests now rely on ``importlib.resources.files`` (and
backport) instead of the older ``path`` function.
* Support any iterable from ``find_distributions``.
Closes #122.
v1.6.0
======
* Added ``module`` and ``attr`` attributes to ``EntryPoint``
v1.5.2
======
* Fix redundant entries from ``FastPath.zip_children``.
Closes #117.
v1.5.1
======
* Improve reliability and consistency of compatibility
imports for contextlib and pathlib when running tests.
Closes #116.
v1.5.0
======

Просмотреть файл

@ -70,9 +70,7 @@ Entry points
The ``entry_points()`` function returns a dictionary of all entry points,
keyed by group. Entry points are represented by ``EntryPoint`` instances;
each ``EntryPoint`` has a ``.name``, ``.group``, and ``.value`` attributes and
a ``.load()`` method to resolve the value. There are also ``.module``,
``.attr``, and ``.extras`` attributes for getting the components of the
``.value`` attribute::
a ``.load()`` method to resolve the value::
>>> eps = entry_points()
>>> list(eps)
@ -81,12 +79,6 @@ a ``.load()`` method to resolve the value. There are also ``.module``,
>>> wheel = [ep for ep in scripts if ep.name == 'wheel'][0]
>>> wheel
EntryPoint(name='wheel', value='wheel.cli:main', group='console_scripts')
>>> wheel.module
'wheel.cli'
>>> wheel.attr
'main'
>>> wheel.extras
[]
>>> main = wheel.load()
>>> main
<function main at 0x103528488>
@ -95,7 +87,7 @@ The ``group`` and ``name`` are arbitrary values defined by the package author
and usually a client will wish to resolve all entry points for a particular
group. Read `the setuptools docs
<https://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins>`_
for more information on entry points, their definition, and usage.
for more information on entrypoints, their definition, and usage.
.. _metadata:
@ -236,7 +228,7 @@ method::
"""
The ``DistributionFinder.Context`` object provides ``.path`` and ``.name``
properties indicating the path to search and name to match and may
properties indicating the path to search and names to match and may
supply other relevant context.
What this means in practice is that to support finding distribution package

Просмотреть файл

Просмотреть файл

Двоичный файл не отображается.

Двоичный файл не отображается.

Просмотреть файл

@ -1,232 +0,0 @@
from __future__ import unicode_literals
import os
import sys
import shutil
import tempfile
import textwrap
import test.support
from .._compat import pathlib, contextlib
__metaclass__ = type
@contextlib.contextmanager
def tempdir():
tmpdir = tempfile.mkdtemp()
try:
yield pathlib.Path(tmpdir)
finally:
shutil.rmtree(tmpdir)
@contextlib.contextmanager
def save_cwd():
orig = os.getcwd()
try:
yield
finally:
os.chdir(orig)
@contextlib.contextmanager
def tempdir_as_cwd():
with tempdir() as tmp:
with save_cwd():
os.chdir(str(tmp))
yield tmp
@contextlib.contextmanager
def install_finder(finder):
sys.meta_path.append(finder)
try:
yield
finally:
sys.meta_path.remove(finder)
class Fixtures:
def setUp(self):
self.fixtures = contextlib.ExitStack()
self.addCleanup(self.fixtures.close)
class SiteDir(Fixtures):
def setUp(self):
super(SiteDir, self).setUp()
self.site_dir = self.fixtures.enter_context(tempdir())
class OnSysPath(Fixtures):
@staticmethod
@contextlib.contextmanager
def add_sys_path(dir):
sys.path[:0] = [str(dir)]
try:
yield
finally:
sys.path.remove(str(dir))
def setUp(self):
super(OnSysPath, self).setUp()
self.fixtures.enter_context(self.add_sys_path(self.site_dir))
class DistInfoPkg(OnSysPath, SiteDir):
files = {
"distinfo_pkg-1.0.0.dist-info": {
"METADATA": """
Name: distinfo-pkg
Author: Steven Ma
Version: 1.0.0
Requires-Dist: wheel >= 1.0
Requires-Dist: pytest; extra == 'test'
""",
"RECORD": "mod.py,sha256=abc,20\n",
"entry_points.txt": """
[entries]
main = mod:main
ns:sub = mod:main
"""
},
"mod.py": """
def main():
print("hello world")
""",
}
def setUp(self):
super(DistInfoPkg, self).setUp()
build_files(DistInfoPkg.files, self.site_dir)
class DistInfoPkgOffPath(SiteDir):
def setUp(self):
super(DistInfoPkgOffPath, self).setUp()
build_files(DistInfoPkg.files, self.site_dir)
class EggInfoPkg(OnSysPath, SiteDir):
files = {
"egginfo_pkg.egg-info": {
"PKG-INFO": """
Name: egginfo-pkg
Author: Steven Ma
License: Unknown
Version: 1.0.0
Classifier: Intended Audience :: Developers
Classifier: Topic :: Software Development :: Libraries
""",
"SOURCES.txt": """
mod.py
egginfo_pkg.egg-info/top_level.txt
""",
"entry_points.txt": """
[entries]
main = mod:main
""",
"requires.txt": """
wheel >= 1.0; python_version >= "2.7"
[test]
pytest
""",
"top_level.txt": "mod\n"
},
"mod.py": """
def main():
print("hello world")
""",
}
def setUp(self):
super(EggInfoPkg, self).setUp()
build_files(EggInfoPkg.files, prefix=self.site_dir)
class EggInfoFile(OnSysPath, SiteDir):
files = {
"egginfo_file.egg-info": """
Metadata-Version: 1.0
Name: egginfo_file
Version: 0.1
Summary: An example package
Home-page: www.example.com
Author: Eric Haffa-Vee
Author-email: eric@example.coms
License: UNKNOWN
Description: UNKNOWN
Platform: UNKNOWN
""",
}
def setUp(self):
super(EggInfoFile, self).setUp()
build_files(EggInfoFile.files, prefix=self.site_dir)
class LocalPackage:
files = {
"setup.py": """
import setuptools
setuptools.setup(name="local-pkg", version="2.0.1")
""",
}
def setUp(self):
self.fixtures = contextlib.ExitStack()
self.addCleanup(self.fixtures.close)
self.fixtures.enter_context(tempdir_as_cwd())
build_files(self.files)
def build_files(file_defs, prefix=pathlib.Path()):
"""Build a set of files/directories, as described by the
file_defs dictionary. Each key/value pair in the dictionary is
interpreted as a filename/contents pair. If the contents value is a
dictionary, a directory is created, and the dictionary interpreted
as the files within it, recursively.
For example:
{"README.txt": "A README file",
"foo": {
"__init__.py": "",
"bar": {
"__init__.py": "",
},
"baz.py": "# Some code",
}
}
"""
for name, contents in file_defs.items():
full_name = prefix / name
if isinstance(contents, dict):
full_name.mkdir()
build_files(contents, prefix=full_name)
else:
if isinstance(contents, bytes):
with full_name.open('wb') as f:
f.write(contents)
else:
with full_name.open('w') as f:
f.write(DALS(contents))
class FileBuilder:
def unicode_filename(self):
return test.support.FS_NONASCII or \
self.skip("File system does not support non-ascii.")
def DALS(str):
"Dedent and left-strip"
return textwrap.dedent(str).lstrip()
class NullFinder:
def find_module(self, name):
pass

Просмотреть файл

@ -1,176 +0,0 @@
import re
import textwrap
import unittest
from . import fixtures
from .. import (
Distribution, PackageNotFoundError, __version__, distribution,
entry_points, files, metadata, requires, version,
)
try:
from collections.abc import Iterator
except ImportError:
from collections import Iterator # noqa: F401
try:
from builtins import str as text
except ImportError:
from __builtin__ import unicode as text
class APITests(
fixtures.EggInfoPkg,
fixtures.DistInfoPkg,
fixtures.EggInfoFile,
unittest.TestCase):
version_pattern = r'\d+\.\d+(\.\d)?'
def test_retrieves_version_of_self(self):
pkg_version = version('egginfo-pkg')
assert isinstance(pkg_version, text)
assert re.match(self.version_pattern, pkg_version)
def test_retrieves_version_of_distinfo_pkg(self):
pkg_version = version('distinfo-pkg')
assert isinstance(pkg_version, text)
assert re.match(self.version_pattern, pkg_version)
def test_for_name_does_not_exist(self):
with self.assertRaises(PackageNotFoundError):
distribution('does-not-exist')
def test_for_top_level(self):
self.assertEqual(
distribution('egginfo-pkg').read_text('top_level.txt').strip(),
'mod')
def test_read_text(self):
top_level = [
path for path in files('egginfo-pkg')
if path.name == 'top_level.txt'
][0]
self.assertEqual(top_level.read_text(), 'mod\n')
def test_entry_points(self):
entries = dict(entry_points()['entries'])
ep = entries['main']
self.assertEqual(ep.value, 'mod:main')
self.assertEqual(ep.extras, [])
def test_metadata_for_this_package(self):
md = metadata('egginfo-pkg')
assert md['author'] == 'Steven Ma'
assert md['LICENSE'] == 'Unknown'
assert md['Name'] == 'egginfo-pkg'
classifiers = md.get_all('Classifier')
assert 'Topic :: Software Development :: Libraries' in classifiers
def test_importlib_metadata_version(self):
assert re.match(self.version_pattern, __version__)
@staticmethod
def _test_files(files):
root = files[0].root
for file in files:
assert file.root == root
assert not file.hash or file.hash.value
assert not file.hash or file.hash.mode == 'sha256'
assert not file.size or file.size >= 0
assert file.locate().exists()
assert isinstance(file.read_binary(), bytes)
if file.name.endswith('.py'):
file.read_text()
def test_file_hash_repr(self):
try:
assertRegex = self.assertRegex
except AttributeError:
# Python 2
assertRegex = self.assertRegexpMatches
util = [
p for p in files('distinfo-pkg')
if p.name == 'mod.py'
][0]
assertRegex(
repr(util.hash),
'<FileHash mode: sha256 value: .*>')
def test_files_dist_info(self):
self._test_files(files('distinfo-pkg'))
def test_files_egg_info(self):
self._test_files(files('egginfo-pkg'))
def test_version_egg_info_file(self):
self.assertEqual(version('egginfo-file'), '0.1')
def test_requires_egg_info_file(self):
requirements = requires('egginfo-file')
self.assertIsNone(requirements)
def test_requires_egg_info(self):
deps = requires('egginfo-pkg')
assert len(deps) == 2
assert any(
dep == 'wheel >= 1.0; python_version >= "2.7"'
for dep in deps
)
def test_requires_dist_info(self):
deps = requires('distinfo-pkg')
assert len(deps) == 2
assert all(deps)
assert 'wheel >= 1.0' in deps
assert "pytest; extra == 'test'" in deps
def test_more_complex_deps_requires_text(self):
requires = textwrap.dedent("""
dep1
dep2
[:python_version < "3"]
dep3
[extra1]
dep4
[extra2:python_version < "3"]
dep5
""")
deps = sorted(Distribution._deps_from_requires_text(requires))
expected = [
'dep1',
'dep2',
'dep3; python_version < "3"',
'dep4; extra == "extra1"',
'dep5; (python_version < "3") and extra == "extra2"',
]
# It's important that the environment marker expression be
# wrapped in parentheses to avoid the following 'and' binding more
# tightly than some other part of the environment expression.
assert deps == expected
class OffSysPathTests(fixtures.DistInfoPkgOffPath, unittest.TestCase):
def test_find_distributions_specified_path(self):
dists = Distribution.discover(path=[str(self.site_dir)])
assert any(
dist.metadata['Name'] == 'distinfo-pkg'
for dist in dists
)
def test_distribution_at_pathlib(self):
"""Demonstrate how to load metadata direct from a directory.
"""
dist_info_path = self.site_dir / 'distinfo_pkg-1.0.0.dist-info'
dist = Distribution.at(dist_info_path)
assert dist.version == '1.0.0'
def test_distribution_at_str(self):
dist_info_path = self.site_dir / 'distinfo_pkg-1.0.0.dist-info'
dist = Distribution.at(str(dist_info_path))
assert dist.version == '1.0.0'

Просмотреть файл

@ -1,54 +0,0 @@
# coding: utf-8
from __future__ import unicode_literals
import unittest
import packaging.requirements
import packaging.version
from . import fixtures
from .. import (
Distribution,
_compat,
version,
)
class IntegrationTests(fixtures.DistInfoPkg, unittest.TestCase):
def test_package_spec_installed(self):
"""
Illustrate the recommended procedure to determine if
a specified version of a package is installed.
"""
def is_installed(package_spec):
req = packaging.requirements.Requirement(package_spec)
return version(req.name) in req.specifier
assert is_installed('distinfo-pkg==1.0')
assert is_installed('distinfo-pkg>=1.0,<2.0')
assert not is_installed('distinfo-pkg<1.0')
class FinderTests(fixtures.Fixtures, unittest.TestCase):
def test_finder_without_module(self):
class ModuleFreeFinder(fixtures.NullFinder):
"""
A finder without an __module__ attribute
"""
def __getattribute__(self, name):
if name == '__module__':
raise AttributeError(name)
return super().__getattribute__(name)
self.fixtures.enter_context(
fixtures.install_finder(ModuleFreeFinder()))
_compat.disable_stdlib_finder()
class LocalProjectTests(fixtures.LocalPackage, unittest.TestCase):
def test_find_local(self):
dist = Distribution._local()
assert dist.metadata['Name'] == 'local-pkg'
assert dist.version == '2.0.1'

Просмотреть файл

@ -1,285 +0,0 @@
# coding: utf-8
from __future__ import unicode_literals
import re
import json
import pickle
import textwrap
import unittest
import importlib
import importlib_metadata
import pyfakefs.fake_filesystem_unittest as ffs
from . import fixtures
from .. import (
Distribution, EntryPoint, MetadataPathFinder,
PackageNotFoundError, distributions,
entry_points, metadata, version,
)
try:
from builtins import str as text
except ImportError:
from __builtin__ import unicode as text
class BasicTests(fixtures.DistInfoPkg, unittest.TestCase):
version_pattern = r'\d+\.\d+(\.\d)?'
def test_retrieves_version_of_self(self):
dist = Distribution.from_name('distinfo-pkg')
assert isinstance(dist.version, text)
assert re.match(self.version_pattern, dist.version)
def test_for_name_does_not_exist(self):
with self.assertRaises(PackageNotFoundError):
Distribution.from_name('does-not-exist')
def test_package_not_found_mentions_metadata(self):
"""
When a package is not found, that could indicate that the
packgae is not installed or that it is installed without
metadata. Ensure the exception mentions metadata to help
guide users toward the cause. See #124.
"""
with self.assertRaises(PackageNotFoundError) as ctx:
Distribution.from_name('does-not-exist')
assert "metadata" in str(ctx.exception)
def test_new_style_classes(self):
self.assertIsInstance(Distribution, type)
self.assertIsInstance(MetadataPathFinder, type)
class ImportTests(fixtures.DistInfoPkg, unittest.TestCase):
def test_import_nonexistent_module(self):
# Ensure that the MetadataPathFinder does not crash an import of a
# non-existent module.
with self.assertRaises(ImportError):
importlib.import_module('does_not_exist')
def test_resolve(self):
entries = dict(entry_points()['entries'])
ep = entries['main']
self.assertEqual(ep.load().__name__, "main")
def test_entrypoint_with_colon_in_name(self):
entries = dict(entry_points()['entries'])
ep = entries['ns:sub']
self.assertEqual(ep.value, 'mod:main')
def test_resolve_without_attr(self):
ep = EntryPoint(
name='ep',
value='importlib_metadata',
group='grp',
)
assert ep.load() is importlib_metadata
class NameNormalizationTests(
fixtures.OnSysPath, fixtures.SiteDir, unittest.TestCase):
@staticmethod
def pkg_with_dashes(site_dir):
"""
Create minimal metadata for a package with dashes
in the name (and thus underscores in the filename).
"""
metadata_dir = site_dir / 'my_pkg.dist-info'
metadata_dir.mkdir()
metadata = metadata_dir / 'METADATA'
with metadata.open('w') as strm:
strm.write('Version: 1.0\n')
return 'my-pkg'
def test_dashes_in_dist_name_found_as_underscores(self):
"""
For a package with a dash in the name, the dist-info metadata
uses underscores in the name. Ensure the metadata loads.
"""
pkg_name = self.pkg_with_dashes(self.site_dir)
assert version(pkg_name) == '1.0'
@staticmethod
def pkg_with_mixed_case(site_dir):
"""
Create minimal metadata for a package with mixed case
in the name.
"""
metadata_dir = site_dir / 'CherryPy.dist-info'
metadata_dir.mkdir()
metadata = metadata_dir / 'METADATA'
with metadata.open('w') as strm:
strm.write('Version: 1.0\n')
return 'CherryPy'
def test_dist_name_found_as_any_case(self):
"""
Ensure the metadata loads when queried with any case.
"""
pkg_name = self.pkg_with_mixed_case(self.site_dir)
assert version(pkg_name) == '1.0'
assert version(pkg_name.lower()) == '1.0'
assert version(pkg_name.upper()) == '1.0'
class NonASCIITests(fixtures.OnSysPath, fixtures.SiteDir, unittest.TestCase):
@staticmethod
def pkg_with_non_ascii_description(site_dir):
"""
Create minimal metadata for a package with non-ASCII in
the description.
"""
metadata_dir = site_dir / 'portend.dist-info'
metadata_dir.mkdir()
metadata = metadata_dir / 'METADATA'
with metadata.open('w', encoding='utf-8') as fp:
fp.write('Description: pôrˈtend\n')
return 'portend'
@staticmethod
def pkg_with_non_ascii_description_egg_info(site_dir):
"""
Create minimal metadata for an egg-info package with
non-ASCII in the description.
"""
metadata_dir = site_dir / 'portend.dist-info'
metadata_dir.mkdir()
metadata = metadata_dir / 'METADATA'
with metadata.open('w', encoding='utf-8') as fp:
fp.write(textwrap.dedent("""
Name: portend
pôrˈtend
""").lstrip())
return 'portend'
def test_metadata_loads(self):
pkg_name = self.pkg_with_non_ascii_description(self.site_dir)
meta = metadata(pkg_name)
assert meta['Description'] == 'pôrˈtend'
def test_metadata_loads_egg_info(self):
pkg_name = self.pkg_with_non_ascii_description_egg_info(self.site_dir)
meta = metadata(pkg_name)
assert meta.get_payload() == 'pôrˈtend\n'
class DiscoveryTests(fixtures.EggInfoPkg,
fixtures.DistInfoPkg,
unittest.TestCase):
def test_package_discovery(self):
dists = list(distributions())
assert all(
isinstance(dist, Distribution)
for dist in dists
)
assert any(
dist.metadata['Name'] == 'egginfo-pkg'
for dist in dists
)
assert any(
dist.metadata['Name'] == 'distinfo-pkg'
for dist in dists
)
def test_invalid_usage(self):
with self.assertRaises(ValueError):
list(distributions(context='something', name='else'))
class DirectoryTest(fixtures.OnSysPath, fixtures.SiteDir, unittest.TestCase):
def test_egg_info(self):
# make an `EGG-INFO` directory that's unrelated
self.site_dir.joinpath('EGG-INFO').mkdir()
# used to crash with `IsADirectoryError`
with self.assertRaises(PackageNotFoundError):
version('unknown-package')
def test_egg(self):
egg = self.site_dir.joinpath('foo-3.6.egg')
egg.mkdir()
with self.add_sys_path(egg):
with self.assertRaises(PackageNotFoundError):
version('foo')
class MissingSysPath(fixtures.OnSysPath, unittest.TestCase):
site_dir = '/does-not-exist'
def test_discovery(self):
"""
Discovering distributions should succeed even if
there is an invalid path on sys.path.
"""
importlib_metadata.distributions()
class InaccessibleSysPath(fixtures.OnSysPath, ffs.TestCase):
site_dir = '/access-denied'
def setUp(self):
super(InaccessibleSysPath, self).setUp()
self.setUpPyfakefs()
self.fs.create_dir(self.site_dir, perm_bits=000)
def test_discovery(self):
"""
Discovering distributions should succeed even if
there is an invalid path on sys.path.
"""
list(importlib_metadata.distributions())
class TestEntryPoints(unittest.TestCase):
def __init__(self, *args):
super(TestEntryPoints, self).__init__(*args)
self.ep = importlib_metadata.EntryPoint('name', 'value', 'group')
def test_entry_point_pickleable(self):
revived = pickle.loads(pickle.dumps(self.ep))
assert revived == self.ep
def test_immutable(self):
"""EntryPoints should be immutable"""
with self.assertRaises(AttributeError):
self.ep.name = 'badactor'
def test_repr(self):
assert 'EntryPoint' in repr(self.ep)
assert 'name=' in repr(self.ep)
assert "'name'" in repr(self.ep)
def test_hashable(self):
"""EntryPoints should be hashable"""
hash(self.ep)
def test_json_dump(self):
"""
json should not expect to be able to dump an EntryPoint
"""
with self.assertRaises(Exception):
json.dumps(self.ep)
def test_module(self):
assert self.ep.module == 'value'
def test_attr(self):
assert self.ep.attr is None
class FileSystem(
fixtures.OnSysPath, fixtures.SiteDir, fixtures.FileBuilder,
unittest.TestCase):
def test_unicode_dir_on_sys_path(self):
"""
Ensure a Unicode subdirectory of a directory on sys.path
does not crash.
"""
fixtures.build_files(
{self.unicode_filename(): {}},
prefix=self.site_dir,
)
list(distributions())

Просмотреть файл

@ -1,80 +0,0 @@
import sys
import unittest
from .. import (
distribution, entry_points, files, PackageNotFoundError,
version, distributions,
)
try:
from importlib import resources
getattr(resources, 'files')
getattr(resources, 'as_file')
except (ImportError, AttributeError):
import importlib_resources as resources
try:
from contextlib import ExitStack
except ImportError:
from contextlib2 import ExitStack
class TestZip(unittest.TestCase):
root = 'importlib_metadata.tests.data'
def _fixture_on_path(self, filename):
pkg_file = resources.files(self.root).joinpath(filename)
file = self.resources.enter_context(resources.as_file(pkg_file))
assert file.name.startswith('example-'), file.name
sys.path.insert(0, str(file))
self.resources.callback(sys.path.pop, 0)
def setUp(self):
# Find the path to the example-*.whl so we can add it to the front of
# sys.path, where we'll then try to find the metadata thereof.
self.resources = ExitStack()
self.addCleanup(self.resources.close)
self._fixture_on_path('example-21.12-py3-none-any.whl')
def test_zip_version(self):
self.assertEqual(version('example'), '21.12')
def test_zip_version_does_not_match(self):
with self.assertRaises(PackageNotFoundError):
version('definitely-not-installed')
def test_zip_entry_points(self):
scripts = dict(entry_points()['console_scripts'])
entry_point = scripts['example']
self.assertEqual(entry_point.value, 'example:main')
entry_point = scripts['Example']
self.assertEqual(entry_point.value, 'example:main')
def test_missing_metadata(self):
self.assertIsNone(distribution('example').read_text('does not exist'))
def test_case_insensitive(self):
self.assertEqual(version('Example'), '21.12')
def test_files(self):
for file in files('example'):
path = str(file.dist.locate_file(file))
assert '.whl/' in path, path
def test_one_distribution(self):
dists = list(distributions(path=sys.path[:1]))
assert len(dists) == 1
class TestEgg(TestZip):
def setUp(self):
# Find the path to the example-*.egg so we can add it to the front of
# sys.path, where we'll then try to find the metadata thereof.
self.resources = ExitStack()
self.addCleanup(self.resources.close)
self._fixture_on_path('example-21.12-py3.6.egg')
def test_files(self):
for file in files('example'):
path = str(file.dist.locate_file(file))
assert '.egg/' in path, path

Просмотреть файл

@ -48,9 +48,8 @@ universal = 1
[options.extras_require]
testing =
importlib_resources>=1.3; python_version < "3.9"
importlib_resources; python_version < "3.7"
packaging
pep517
docs =
sphinx
rst.linker

Просмотреть файл

@ -1 +0,0 @@
TODO

Просмотреть файл

@ -1,8 +0,0 @@
language: python
python: "3.7"
node_js: "9"
install:
- pip install tox
script:
- tox
- npm install && npm test || true

19
third_party/python/jsonschema/json/LICENSE поставляемый
Просмотреть файл

@ -1,19 +0,0 @@
Copyright (c) 2012 Julian Berman
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

181
third_party/python/jsonschema/json/README.md поставляемый
Просмотреть файл

@ -1,181 +0,0 @@
JSON Schema Test Suite [![Build Status](https://travis-ci.org/json-schema-org/JSON-Schema-Test-Suite.svg?branch=master)](https://travis-ci.org/json-schema-org/JSON-Schema-Test-Suite)
======================
This repository contains a set of JSON objects that implementors of JSON Schema
validation libraries can use to test their validators.
It is meant to be language agnostic and should require only a JSON parser.
The conversion of the JSON objects into tests within your test framework of
choice is still the job of the validator implementor.
Structure of a Test
-------------------
If you're going to use this suite, you need to know how tests are laid out. The
tests are contained in the `tests` directory at the root of this repository.
Inside that directory is a subdirectory for each draft or version of the
schema.
If you look inside the draft directory, there are a number of `.json` files,
which logically group a set of test cases together. Often the grouping is by
property under test, but not always, especially within optional test files
(discussed below).
Inside each `.json` file is a single array containing objects. It's easiest to
illustrate the structure of these with an example:
```json
{
"description": "the description of the test case",
"schema": {"the schema that should" : "be validated against"},
"tests": [
{
"description": "a specific test of a valid instance",
"data": "the instance",
"valid": true
},
{
"description": "another specific test this time, invalid",
"data": 15,
"valid": false
}
]
}
```
So a description, a schema, and some tests, where tests is an array containing
one or more objects with descriptions, data, and a boolean indicating whether
they should be valid or invalid.
Coverage
--------
Drafts 03, 04, 06, and 07 should have full coverage, with drafts 06 and 07
being considered current and actively supported. Bug fixes will be made as
needed for draft-04 as it is still the most widely used, while draft-03
is long since deprecated.
If you see anything missing from the current supported drafts, or incorrect
on any draft still accepting bug fixes, please file an issue or submit a PR.
Who Uses the Test Suite
-----------------------
This suite is being used by:
### Clojure ###
* [jinx](https://github.com/juxt/jinx)
* [json-schema](https://github.com/tatut/json-schema)
### Coffeescript ###
* [jsck](https://github.com/pandastrike/jsck)
### C++ ###
* [Modern C++ JSON schema validator](https://github.com/pboettch/json-schema-validator)
### Dart ###
* [json_schema](https://github.com/patefacio/json_schema)
### Elixir ###
* [ex_json_schema](https://github.com/jonasschmidt/ex_json_schema)
### Erlang ###
* [jesse](https://github.com/for-GET/jesse)
### Go ###
* [gojsonschema](https://github.com/sigu-399/gojsonschema)
* [validate-json](https://github.com/cesanta/validate-json)
### Haskell ###
* [aeson-schema](https://github.com/timjb/aeson-schema)
* [hjsonschema](https://github.com/seagreen/hjsonschema)
### Java ###
* [json-schema-validator](https://github.com/daveclayton/json-schema-validator)
* [everit-org/json-schema](https://github.com/everit-org/json-schema)
* [networknt/json-schema-validator](https://github.com/networknt/json-schema-validator)
* [Justify](https://github.com/leadpony/justify)
### JavaScript ###
* [json-schema-benchmark](https://github.com/Muscula/json-schema-benchmark)
* [direct-schema](https://github.com/IreneKnapp/direct-schema)
* [is-my-json-valid](https://github.com/mafintosh/is-my-json-valid)
* [jassi](https://github.com/iclanzan/jassi)
* [JaySchema](https://github.com/natesilva/jayschema)
* [json-schema-valid](https://github.com/ericgj/json-schema-valid)
* [Jsonary](https://github.com/jsonary-js/jsonary)
* [jsonschema](https://github.com/tdegrunt/jsonschema)
* [request-validator](https://github.com/bugventure/request-validator)
* [skeemas](https://github.com/Prestaul/skeemas)
* [tv4](https://github.com/geraintluff/tv4)
* [z-schema](https://github.com/zaggino/z-schema)
* [jsen](https://github.com/bugventure/jsen)
* [ajv](https://github.com/epoberezkin/ajv)
* [djv](https://github.com/korzio/djv)
### Node.js ###
For node.js developers, the suite is also available as an
[npm](https://www.npmjs.com/package/@json-schema-org/tests) package.
Node-specific support is maintained in a [separate
repository](https://github.com/json-schema-org/json-schema-test-suite-npm)
which also welcomes your contributions!
### .NET ###
* [Newtonsoft.Json.Schema](https://github.com/JamesNK/Newtonsoft.Json.Schema)
* [Manatee.Json](https://github.com/gregsdennis/Manatee.Json)
### PHP ###
* [json-schema](https://github.com/justinrainbow/json-schema)
* [json-guard](https://github.com/thephpleague/json-guard)
### PostgreSQL ###
* [postgres-json-schema](https://github.com/gavinwahl/postgres-json-schema)
* [is_jsonb_valid](https://github.com/furstenheim/is_jsonb_valid)
### Python ###
* [jsonschema](https://github.com/Julian/jsonschema)
* [fastjsonschema](https://github.com/seznam/python-fastjsonschema)
* [hypothesis-jsonschema](https://github.com/Zac-HD/hypothesis-jsonschema)
### Ruby ###
* [json-schema](https://github.com/hoxworth/json-schema)
* [json_schemer](https://github.com/davishmcclurg/json_schemer)
### Rust ###
* [valico](https://github.com/rustless/valico)
### Swift ###
* [JSONSchema](https://github.com/kylef/JSONSchema.swift)
If you use it as well, please fork and send a pull request adding yourself to
the list :).
Contributing
------------
If you see something missing or incorrect, a pull request is most welcome!
There are some sanity checks in place for testing the test suite. You can run
them with `bin/jsonschema_suite check && npm test` or `tox && npm test`. They will be run automatically by
[Travis CI](https://travis-ci.org/) as well.

Просмотреть файл

@ -1,298 +0,0 @@
#! /usr/bin/env python3
from __future__ import print_function
from pprint import pformat
import argparse
import errno
import fnmatch
import json
import os
import random
import shutil
import sys
import textwrap
import unittest
import warnings
if getattr(unittest, "skipIf", None) is None:
unittest.skipIf = lambda cond, msg : lambda fn : fn
try:
import jsonschema
except ImportError:
jsonschema = None
else:
validators = getattr(
jsonschema.validators, "validators", jsonschema.validators
)
ROOT_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir).rstrip("__pycache__"),
)
SUITE_ROOT_DIR = os.path.join(ROOT_DIR, "tests")
REMOTES = {
"integer.json": {u"type": u"integer"},
"name.json": {
u"type": "string",
u"definitions": {
u"orNull": {u"anyOf": [{u"type": u"null"}, {u"$ref": u"#"}]},
},
},
"name-defs.json": {
u"type": "string",
u"$defs": {
u"orNull": {u"anyOf": [{u"type": u"null"}, {u"$ref": u"#"}]},
},
},
"subSchemas.json": {
u"integer": {u"type": u"integer"},
u"refToInteger": {u"$ref": u"#/integer"},
},
"folder/folderInteger.json": {u"type": u"integer"}
}
REMOTES_DIR = os.path.join(ROOT_DIR, "remotes")
with open(os.path.join(ROOT_DIR, "test-schema.json")) as schema:
TESTSUITE_SCHEMA = json.load(schema)
def files(paths):
for path in paths:
with open(path) as test_file:
yield json.load(test_file)
def groups(paths):
for test_file in files(paths):
for group in test_file:
yield group
def cases(paths):
for test_group in groups(paths):
for test in test_group["tests"]:
test["schema"] = test_group["schema"]
yield test
def collect(root_dir):
for root, dirs, files in os.walk(root_dir):
for filename in fnmatch.filter(files, "*.json"):
yield os.path.join(root, filename)
class SanityTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
print("Looking for tests in %s" % SUITE_ROOT_DIR)
cls.test_files = list(collect(SUITE_ROOT_DIR))
print("Found %s test files" % len(cls.test_files))
assert cls.test_files, "Didn't find the test files!"
def test_all_files_are_valid_json(self):
for path in self.test_files:
with open(path) as test_file:
try:
json.load(test_file)
except ValueError as error:
self.fail("%s contains invalid JSON (%s)" % (path, error))
def test_all_descriptions_have_reasonable_length(self):
for case in cases(self.test_files):
description = case["description"]
self.assertLess(
len(description),
70,
"%r is too long! (keep it to less than 70 chars)" % (
description,
),
)
def test_all_descriptions_are_unique(self):
for group in groups(self.test_files):
descriptions = set(test["description"] for test in group["tests"])
self.assertEqual(
len(descriptions),
len(group["tests"]),
"%r contains a duplicate description" % (group,)
)
@unittest.skipIf(jsonschema is None, "Validation library not present!")
def test_all_schemas_are_valid(self):
for schema in os.listdir(SUITE_ROOT_DIR):
schema_validator = validators.get(schema)
if schema_validator is not None:
test_files = collect(os.path.join(SUITE_ROOT_DIR, schema))
for case in cases(test_files):
try:
schema_validator.check_schema(case["schema"])
except jsonschema.SchemaError as error:
self.fail("%s contains an invalid schema (%s)" %
(case, error))
else:
warnings.warn("No schema validator for %s" % schema)
@unittest.skipIf(jsonschema is None, "Validation library not present!")
def test_suites_are_valid(self):
validator = jsonschema.Draft4Validator(TESTSUITE_SCHEMA)
for tests in files(self.test_files):
try:
validator.validate(tests)
except jsonschema.ValidationError as error:
self.fail(str(error))
def test_remote_schemas_are_updated(self):
files = {}
for parent, _, paths in os.walk(REMOTES_DIR):
for path in paths:
absolute_path = os.path.join(parent, path)
with open(absolute_path) as schema_file:
files[absolute_path] = json.load(schema_file)
expected = {
os.path.join(REMOTES_DIR, path): contents
for path, contents in REMOTES.items()
}
missing = set(files).symmetric_difference(expected)
changed = {
path
for path, contents in expected.items()
if path in files
and contents != files[path]
}
self.assertEqual(
files,
expected,
msg=textwrap.dedent(
"""
Remotes in the remotes/ directory do not match those in the
``jsonschema_suite`` Python script.
Unfortunately for the minute, each remote file is duplicated in
two places.""" + ("""
Only present in one location:
{}""".format("\n".join(missing)) if missing else "") + ("""
Conflicting between the two:
{}""".format("\n".join(changed)) if changed else "")
)
)
def main(arguments):
if arguments.command == "check":
suite = unittest.TestLoader().loadTestsFromTestCase(SanityTests)
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not result.wasSuccessful())
elif arguments.command == "flatten":
selected_cases = [case for case in cases(collect(arguments.version))]
if arguments.randomize:
random.shuffle(selected_cases)
json.dump(selected_cases, sys.stdout, indent=4, sort_keys=True)
elif arguments.command == "remotes":
json.dump(REMOTES, sys.stdout, indent=4, sort_keys=True)
elif arguments.command == "dump_remotes":
if arguments.update:
shutil.rmtree(arguments.out_dir, ignore_errors=True)
try:
os.makedirs(arguments.out_dir)
except OSError as e:
if e.errno == errno.EEXIST:
print("%s already exists. Aborting." % arguments.out_dir)
sys.exit(1)
raise
for url, schema in REMOTES.items():
filepath = os.path.join(arguments.out_dir, url)
try:
os.makedirs(os.path.dirname(filepath))
except OSError as e:
if e.errno != errno.EEXIST:
raise
with open(filepath, "w") as out_file:
json.dump(schema, out_file, indent=4, sort_keys=True)
out_file.write("\n")
elif arguments.command == "serve":
try:
from flask import Flask, jsonify
except ImportError:
print(textwrap.dedent("""
The Flask library is required to serve the remote schemas.
You can install it by running `pip install Flask`.
Alternatively, see the `jsonschema_suite remotes` or
`jsonschema_suite dump_remotes` commands to create static files
that can be served with your own web server.
""".strip("\n")))
sys.exit(1)
app = Flask(__name__)
@app.route("/<path:path>")
def serve_path(path):
if path in REMOTES:
return jsonify(REMOTES[path])
return "Document does not exist.", 404
app.run(port=1234)
parser = argparse.ArgumentParser(
description="JSON Schema Test Suite utilities",
)
subparsers = parser.add_subparsers(help="utility commands", dest="command")
check = subparsers.add_parser("check", help="Sanity check the test suite.")
flatten = subparsers.add_parser(
"flatten",
help="Output a flattened file containing a selected version's test cases."
)
flatten.add_argument(
"--randomize",
action="store_true",
help="Randomize the order of the outputted cases.",
)
flatten.add_argument(
"version", help="The directory containing the version to output",
)
remotes = subparsers.add_parser(
"remotes",
help="Output the expected URLs and their associated schemas for remote "
"ref tests as a JSON object."
)
dump_remotes = subparsers.add_parser(
"dump_remotes", help="Dump the remote ref schemas into a file tree",
)
dump_remotes.add_argument(
"--update",
action="store_true",
help="Update the remotes in an existing directory.",
)
dump_remotes.add_argument(
"--out-dir",
default=REMOTES_DIR,
type=os.path.abspath,
help="The output directory to create as the root of the file tree",
)
serve = subparsers.add_parser(
"serve",
help="Start a webserver to serve schemas used by remote ref tests."
)
if __name__ == "__main__":
main(parser.parse_args())

45
third_party/python/jsonschema/json/index.js поставляемый
Просмотреть файл

@ -1,45 +0,0 @@
'use strict';
const Ajv = require('ajv');
const jsonSchemaTest = require('json-schema-test');
const assert = require('assert');
const refs = {
'http://localhost:1234/integer.json': require('./remotes/integer.json'),
'http://localhost:1234/subSchemas.json': require('./remotes/subSchemas.json'),
'http://localhost:1234/folder/folderInteger.json': require('./remotes/folder/folderInteger.json'),
'http://localhost:1234/name.json': require('./remotes/name.json'),
'http://localhost:1234/name-defs.json': require('./remotes/name-defs.json')
};
const SKIP = {
4: ['optional/zeroTerminatedFloats'],
7: [
'format/idn-email',
'format/idn-hostname',
'format/iri',
'format/iri-reference',
'optional/content'
]
};
[4, 6, 7].forEach((draft) => {
let ajv;
if (draft == 7) {
ajv = new Ajv({format: 'full'});
} else {
const schemaId = draft == 4 ? 'id' : '$id';
ajv = new Ajv({format: 'full', meta: false, schemaId});
ajv.addMetaSchema(require(`ajv/lib/refs/json-schema-draft-0${draft}.json`));
ajv._opts.defaultMeta = `http://json-schema.org/draft-0${draft}/schema#`;
}
for (const uri in refs) ajv.addSchema(refs[uri], uri);
jsonSchemaTest(ajv, {
description: `Test suite draft-0${draft}`,
suites: {tests: `./tests/draft${draft}/{**/,}*.json`},
skip: SKIP[draft],
cwd: __dirname,
hideFolder: 'tests/'
});
});

Просмотреть файл

@ -1,28 +0,0 @@
{
"name": "json-schema-test-suite",
"version": "0.1.0",
"description": "A language agnostic test suite for the JSON Schema specifications",
"main": "index.js",
"scripts": {
"test": "mocha index.js -R spec"
},
"repository": {
"type": "git",
"url": "git+https://github.com/json-schema-org/JSON-Schema-Test-Suite.git"
},
"keywords": [
"json-schema",
"tests"
],
"author": "http://json-schema.org",
"license": "MIT",
"bugs": {
"url": "https://github.com/json-schema-org/JSON-Schema-Test-Suite/issues"
},
"homepage": "https://github.com/json-schema-org/JSON-Schema-Test-Suite#readme",
"devDependencies": {
"ajv": "^6.0.0-rc.1",
"json-schema-test": "^2.0.0",
"mocha": "^3.2.0"
}
}

Просмотреть файл

@ -1,3 +0,0 @@
{
"type": "integer"
}

Просмотреть файл

@ -1,3 +0,0 @@
{
"type": "integer"
}

Просмотреть файл

@ -1,15 +0,0 @@
{
"$defs": {
"orNull": {
"anyOf": [
{
"type": "null"
},
{
"$ref": "#"
}
]
}
},
"type": "string"
}

Просмотреть файл

@ -1,15 +0,0 @@
{
"definitions": {
"orNull": {
"anyOf": [
{
"type": "null"
},
{
"$ref": "#"
}
]
}
},
"type": "string"
}

Просмотреть файл

@ -1,8 +0,0 @@
{
"integer": {
"type": "integer"
},
"refToInteger": {
"$ref": "#/integer"
}
}

Просмотреть файл

@ -1,59 +0,0 @@
{
"$schema": "http://json-schema.org/draft-04/schema#",
"definitions": {
"outputItem": {
"type": "object",
"properties": {
"valid": {"type": "boolean"},
"keywordLocation": {"type": "string"},
"absoluteKeywordLocation": {
"type": "string",
"format": "uri"
},
"instanceLocation": {"type": "string"},
"annotations": {
"type": "array",
"items": {"$ref": "#/definitions/outputItem"}
},
"errors": {
"type": "array",
"items": {"$ref": "#/definitions/outputItem"}
}
}
}
},
"type": "array",
"items": {
"type": "object",
"required": ["description", "schema", "tests"],
"properties": {
"description": {"type": "string"},
"schema": {},
"tests": {
"type": "array",
"items": {
"type": "object",
"required": ["description", "data", "valid"],
"properties": {
"description": {"type": "string"},
"data": {},
"valid": {"type": "boolean"},
"output": {
"type": "object",
"properties": {
"basic": {"$ref": "#/definitions/outputItem"},
"detailed": {"$ref": "#/definitions/outputItem"},
"verbose": {"$ref": "#/definitions/outputItem"}
},
"required": ["basic", "detailed", "verbose"]
}
},
"additionalProperties": false
},
"minItems": 1
}
},
"additionalProperties": false,
"minItems": 1
}
}

Просмотреть файл

@ -1,87 +0,0 @@
[
{
"description": "additionalItems as schema",
"schema": {
"items": [{}],
"additionalItems": {"type": "integer"}
},
"tests": [
{
"description": "additional items match schema",
"data": [ null, 2, 3, 4 ],
"valid": true
},
{
"description": "additional items do not match schema",
"data": [ null, 2, 3, "foo" ],
"valid": false
}
]
},
{
"description": "items is schema, no additionalItems",
"schema": {
"items": {},
"additionalItems": false
},
"tests": [
{
"description": "all items match schema",
"data": [ 1, 2, 3, 4, 5 ],
"valid": true
}
]
},
{
"description": "array of items with no additionalItems",
"schema": {
"items": [{}, {}, {}],
"additionalItems": false
},
"tests": [
{
"description": "fewer number of items present",
"data": [ 1, 2 ],
"valid": true
},
{
"description": "equal number of items present",
"data": [ 1, 2, 3 ],
"valid": true
},
{
"description": "additional items are not permitted",
"data": [ 1, 2, 3, 4 ],
"valid": false
}
]
},
{
"description": "additionalItems as false without items",
"schema": {"additionalItems": false},
"tests": [
{
"description":
"items defaults to empty schema so everything is valid",
"data": [ 1, 2, 3, 4, 5 ],
"valid": true
},
{
"description": "ignores non-arrays",
"data": {"foo" : "bar"},
"valid": true
}
]
},
{
"description": "additionalItems are allowed by default",
"schema": {"items": [{"type": "integer"}]},
"tests": [
{
"description": "only the first item is validated",
"data": [1, "foo", false],
"valid": true
}
]
}
]

Просмотреть файл

@ -1,133 +0,0 @@
[
{
"description":
"additionalProperties being false does not allow other properties",
"schema": {
"properties": {"foo": {}, "bar": {}},
"patternProperties": { "^v": {} },
"additionalProperties": false
},
"tests": [
{
"description": "no additional properties is valid",
"data": {"foo": 1},
"valid": true
},
{
"description": "an additional property is invalid",
"data": {"foo" : 1, "bar" : 2, "quux" : "boom"},
"valid": false
},
{
"description": "ignores arrays",
"data": [1, 2, 3],
"valid": true
},
{
"description": "ignores strings",
"data": "foobarbaz",
"valid": true
},
{
"description": "ignores other non-objects",
"data": 12,
"valid": true
},
{
"description": "patternProperties are not additional properties",
"data": {"foo":1, "vroom": 2},
"valid": true
}
]
},
{
"description": "non-ASCII pattern with additionalProperties",
"schema": {
"patternProperties": {"^á": {}},
"additionalProperties": false
},
"tests": [
{
"description": "matching the pattern is valid",
"data": {"ármányos": 2},
"valid": true
},
{
"description": "not matching the pattern is invalid",
"data": {"élmény": 2},
"valid": false
}
]
},
{
"description":
"additionalProperties allows a schema which should validate",
"schema": {
"properties": {"foo": {}, "bar": {}},
"additionalProperties": {"type": "boolean"}
},
"tests": [
{
"description": "no additional properties is valid",
"data": {"foo": 1},
"valid": true
},
{
"description": "an additional valid property is valid",
"data": {"foo" : 1, "bar" : 2, "quux" : true},
"valid": true
},
{
"description": "an additional invalid property is invalid",
"data": {"foo" : 1, "bar" : 2, "quux" : 12},
"valid": false
}
]
},
{
"description":
"additionalProperties can exist by itself",
"schema": {
"additionalProperties": {"type": "boolean"}
},
"tests": [
{
"description": "an additional valid property is valid",
"data": {"foo" : true},
"valid": true
},
{
"description": "an additional invalid property is invalid",
"data": {"foo" : 1},
"valid": false
}
]
},
{
"description": "additionalProperties are allowed by default",
"schema": {"properties": {"foo": {}, "bar": {}}},
"tests": [
{
"description": "additional properties are allowed",
"data": {"foo": 1, "bar": 2, "quux": true},
"valid": true
}
]
},
{
"description": "additionalProperties should not look in applicators",
"schema": {
"allOf": [
{"properties": {"foo": {}}}
],
"additionalProperties": {"type": "boolean"}
},
"tests": [
{
"description": "properties defined in allOf are not allowed",
"data": {"foo": 1, "bar": true},
"valid": false
}
]
}
]

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше