Bug 1602773 - Vendor glean_parser and its dependencies. r=ahal CLOSED TREE

Differential Revision: https://phabricator.services.mozilla.com/D64313

--HG--
extra : histedit_source : 30b5d577a6c26e79cb305a3eca4ac8cfc98df01a
This commit is contained in:
Alessio Placitelli 2020-03-02 15:34:05 +00:00
Родитель 0543d20741
Коммит 0ebaf39eca
188 изменённых файлов: 44837 добавлений и 0 удалений

Просмотреть файл

@ -6,6 +6,7 @@ mozilla.pth:python/mozrelease
mozilla.pth:python/mozterm
mozilla.pth:python/mozversioncontrol
mozilla.pth:python/l10n
mozilla.pth:third_party/python/appdirs
mozilla.pth:third_party/python/atomicwrites
mozilla.pth:third_party/python/attrs/src
python2:mozilla.pth:third_party/python/backports
@ -16,17 +17,27 @@ mozilla.pth:third_party/python/compare-locales
mozilla.pth:third_party/python/configobj
mozilla.pth:third_party/python/cookies
mozilla.pth:third_party/python/cram
mozilla.pth:third_party/python/diskcache
mozilla.pth:third_party/python/distro
mozilla.pth:third_party/python/dlmanager
mozilla.pth:third_party/python/enum34
mozilla.pth:third_party/python/fluent
mozilla.pth:third_party/python/funcsigs
python2:mozilla.pth:third_party/python/futures
python3:mozilla.pth:third_party/python/glean_parser
mozilla.pth:third_party/python/importlib_metadata
mozilla.pth:third_party/python/iso8601
mozilla.pth:third_party/python/Jinja2
mozilla.pth:third_party/python/jsonschema
mozilla.pth:third_party/python/MarkupSafe/src
mozilla.pth:third_party/python/mohawk
mozilla.pth:third_party/python/more-itertools
mozilla.pth:third_party/python/mozilla-version
mozilla.pth:third_party/python/pathlib2
mozilla.pth:third_party/python/pathspec
mozilla.pth:third_party/python/pep487
mozilla.pth:third_party/python/gyp/pylib
mozilla.pth:third_party/python/pyrsistent
mozilla.pth:third_party/python/python-hglib
mozilla.pth:third_party/python/pluggy
mozilla.pth:third_party/python/jsmin
@ -52,6 +63,8 @@ mozilla.pth:third_party/python/six
mozilla.pth:third_party/python/taskcluster-urls
mozilla.pth:third_party/python/voluptuous
mozilla.pth:third_party/python/json-e
mozilla.pth:third_party/python/yamllint
mozilla.pth:third_party/python/zipp
mozilla.pth:build
objdir:build
mozilla.pth:build/pymake

44
third_party/python/Jinja2/jinja2/__init__.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,44 @@
# -*- coding: utf-8 -*-
"""Jinja is a template engine written in pure Python. It provides a
non-XML syntax that supports inline expressions and an optional
sandboxed environment.
"""
from markupsafe import escape
from markupsafe import Markup
from .bccache import BytecodeCache
from .bccache import FileSystemBytecodeCache
from .bccache import MemcachedBytecodeCache
from .environment import Environment
from .environment import Template
from .exceptions import TemplateAssertionError
from .exceptions import TemplateError
from .exceptions import TemplateNotFound
from .exceptions import TemplateRuntimeError
from .exceptions import TemplatesNotFound
from .exceptions import TemplateSyntaxError
from .exceptions import UndefinedError
from .filters import contextfilter
from .filters import environmentfilter
from .filters import evalcontextfilter
from .loaders import BaseLoader
from .loaders import ChoiceLoader
from .loaders import DictLoader
from .loaders import FileSystemLoader
from .loaders import FunctionLoader
from .loaders import ModuleLoader
from .loaders import PackageLoader
from .loaders import PrefixLoader
from .runtime import ChainableUndefined
from .runtime import DebugUndefined
from .runtime import make_logging_undefined
from .runtime import StrictUndefined
from .runtime import Undefined
from .utils import clear_caches
from .utils import contextfunction
from .utils import environmentfunction
from .utils import evalcontextfunction
from .utils import is_undefined
from .utils import select_autoescape
__version__ = "2.11.1"

132
third_party/python/Jinja2/jinja2/_compat.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,132 @@
# -*- coding: utf-8 -*-
# flake8: noqa
import marshal
import sys
PY2 = sys.version_info[0] == 2
PYPY = hasattr(sys, "pypy_translation_info")
_identity = lambda x: x
if not PY2:
unichr = chr
range_type = range
text_type = str
string_types = (str,)
integer_types = (int,)
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
import pickle
from io import BytesIO, StringIO
NativeStringIO = StringIO
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
ifilter = filter
imap = map
izip = zip
intern = sys.intern
implements_iterator = _identity
implements_to_string = _identity
encode_filename = _identity
marshal_dump = marshal.dump
marshal_load = marshal.load
else:
unichr = unichr
text_type = unicode
range_type = xrange
string_types = (str, unicode)
integer_types = (int, long)
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
import cPickle as pickle
from cStringIO import StringIO as BytesIO, StringIO
NativeStringIO = BytesIO
exec("def reraise(tp, value, tb=None):\n raise tp, value, tb")
from itertools import imap, izip, ifilter
intern = intern
def implements_iterator(cls):
cls.next = cls.__next__
del cls.__next__
return cls
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode("utf-8")
return cls
def encode_filename(filename):
if isinstance(filename, unicode):
return filename.encode("utf-8")
return filename
def marshal_dump(code, f):
if isinstance(f, file):
marshal.dump(code, f)
else:
f.write(marshal.dumps(code))
def marshal_load(f):
if isinstance(f, file):
return marshal.load(f)
return marshal.loads(f.read())
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instantiation that replaces
# itself with the actual metaclass.
class metaclass(type):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, "temporary_class", (), {})
try:
from urllib.parse import quote_from_bytes as url_quote
except ImportError:
from urllib import quote as url_quote
try:
from collections import abc
except ImportError:
import collections as abc
try:
from os import fspath
except ImportError:
try:
from pathlib import PurePath
except ImportError:
PurePath = None
def fspath(path):
if hasattr(path, "__fspath__"):
return path.__fspath__()
# Python 3.5 doesn't have __fspath__ yet, use str.
if PurePath is not None and isinstance(path, PurePath):
return str(path)
return path

6
third_party/python/Jinja2/jinja2/_identifier.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,6 @@
import re
# generated by scripts/generate_identifier_pattern.py
pattern = re.compile(
r"[\w·̀-ͯ·҃-֑҇-ׇֽֿׁׂׅׄؐ-ًؚ-ٰٟۖ-ۜ۟-۪ۤۧۨ-ܑۭܰ-݊ަ-ް߫-߳ࠖ-࠙ࠛ-ࠣࠥ-ࠧࠩ-࡙࠭-࡛ࣔ-ࣣ࣡-ःऺ-़ा-ॏ॑-ॗॢॣঁ-ঃ়া-ৄেৈো-্ৗৢৣਁ-ਃ਼ਾ-ੂੇੈੋ-੍ੑੰੱੵઁ-ઃ઼ા-ૅે-ૉો-્ૢૣଁ-ଃ଼ା-ୄେୈୋ-୍ୖୗୢୣஂா-ூெ-ைொ-்ௗఀ-ఃా-ౄె-ైొ-్ౕౖౢౣಁ-ಃ಼ಾ-ೄೆ-ೈೊ-್ೕೖೢೣഁ-ഃാ-ൄെ-ൈൊ-്ൗൢൣංඃ්ා-ුූෘ-ෟෲෳัิ-ฺ็-๎ັິ-ູົຼ່-ໍ༹༘༙༵༷༾༿ཱ-྄྆྇ྍ-ྗྙ-ྼ࿆ါ-ှၖ-ၙၞ-ၠၢ-ၤၧ-ၭၱ-ၴႂ-ႍႏႚ-ႝ፝-፟ᜒ-᜔ᜲ-᜴ᝒᝓᝲᝳ឴-៓៝᠋-᠍ᢅᢆᢩᤠ-ᤫᤰ-᤻ᨗ-ᨛᩕ-ᩞ᩠-᩿᩼᪰-᪽ᬀ-ᬄ᬴-᭄᭫-᭳ᮀ-ᮂᮡ-ᮭ᯦-᯳ᰤ-᰷᳐-᳔᳒-᳨᳭ᳲ-᳴᳸᳹᷀-᷵᷻-᷿‿⁀⁔⃐-⃥⃜⃡-⃰℘℮⳯-⵿⳱ⷠ-〪ⷿ-゙゚〯꙯ꙴ-꙽ꚞꚟ꛰꛱ꠂ꠆ꠋꠣ-ꠧꢀꢁꢴ-ꣅ꣠-꣱ꤦ-꤭ꥇ-꥓ꦀ-ꦃ꦳-꧀ꧥꨩ-ꨶꩃꩌꩍꩻ-ꩽꪰꪲ-ꪴꪷꪸꪾ꪿꫁ꫫ-ꫯꫵ꫶ꯣ-ꯪ꯬꯭ﬞ︀-️︠-︯︳︴﹍-﹏_𐇽𐋠𐍶-𐍺𐨁-𐨃𐨅𐨆𐨌-𐨏𐨸-𐨿𐨺𐫦𐫥𑀀-𑀂𑀸-𑁆𑁿-𑂂𑂰-𑂺𑄀-𑄂𑄧-𑅳𑄴𑆀-𑆂𑆳-𑇊𑇀-𑇌𑈬-𑈷𑈾𑋟-𑋪𑌀-𑌃𑌼𑌾-𑍄𑍇𑍈𑍋-𑍍𑍗𑍢𑍣𑍦-𑍬𑍰-𑍴𑐵-𑑆𑒰-𑓃𑖯-𑖵𑖸-𑗀𑗜𑗝𑘰-𑙀𑚫-𑚷𑜝-𑜫𑰯-𑰶𑰸-𑰿𑲒-𑲧𑲩-𑲶𖫰-𖫴𖬰-𖬶𖽑-𖽾𖾏-𖾒𛲝𛲞𝅥-𝅩𝅭-𝅲𝅻-𝆂𝆅-𝆋𝆪-𝆭𝉂-𝉄𝨀-𝨶𝨻-𝩬𝩵𝪄𝪛-𝪟𝪡-𝪯𞀀-𞀆𞀈-𞀘𞀛-𞀡𞀣𞀤𞀦-𞣐𞀪-𞣖𞥄-𞥊󠄀-󠇯]+" # noqa: B950
)

159
third_party/python/Jinja2/jinja2/asyncfilters.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,159 @@
from functools import wraps
from . import filters
from .asyncsupport import auto_aiter
from .asyncsupport import auto_await
async def auto_to_seq(value):
seq = []
if hasattr(value, "__aiter__"):
async for item in value:
seq.append(item)
else:
for item in value:
seq.append(item)
return seq
async def async_select_or_reject(args, kwargs, modfunc, lookup_attr):
seq, func = filters.prepare_select_or_reject(args, kwargs, modfunc, lookup_attr)
if seq:
async for item in auto_aiter(seq):
if func(item):
yield item
def dualfilter(normal_filter, async_filter):
wrap_evalctx = False
if getattr(normal_filter, "environmentfilter", False):
def is_async(args):
return args[0].is_async
wrap_evalctx = False
else:
if not getattr(normal_filter, "evalcontextfilter", False) and not getattr(
normal_filter, "contextfilter", False
):
wrap_evalctx = True
def is_async(args):
return args[0].environment.is_async
@wraps(normal_filter)
def wrapper(*args, **kwargs):
b = is_async(args)
if wrap_evalctx:
args = args[1:]
if b:
return async_filter(*args, **kwargs)
return normal_filter(*args, **kwargs)
if wrap_evalctx:
wrapper.evalcontextfilter = True
wrapper.asyncfiltervariant = True
return wrapper
def asyncfiltervariant(original):
def decorator(f):
return dualfilter(original, f)
return decorator
@asyncfiltervariant(filters.do_first)
async def do_first(environment, seq):
try:
return await auto_aiter(seq).__anext__()
except StopAsyncIteration:
return environment.undefined("No first item, sequence was empty.")
@asyncfiltervariant(filters.do_groupby)
async def do_groupby(environment, value, attribute):
expr = filters.make_attrgetter(environment, attribute)
return [
filters._GroupTuple(key, await auto_to_seq(values))
for key, values in filters.groupby(
sorted(await auto_to_seq(value), key=expr), expr
)
]
@asyncfiltervariant(filters.do_join)
async def do_join(eval_ctx, value, d=u"", attribute=None):
return filters.do_join(eval_ctx, await auto_to_seq(value), d, attribute)
@asyncfiltervariant(filters.do_list)
async def do_list(value):
return await auto_to_seq(value)
@asyncfiltervariant(filters.do_reject)
async def do_reject(*args, **kwargs):
return async_select_or_reject(args, kwargs, lambda x: not x, False)
@asyncfiltervariant(filters.do_rejectattr)
async def do_rejectattr(*args, **kwargs):
return async_select_or_reject(args, kwargs, lambda x: not x, True)
@asyncfiltervariant(filters.do_select)
async def do_select(*args, **kwargs):
return async_select_or_reject(args, kwargs, lambda x: x, False)
@asyncfiltervariant(filters.do_selectattr)
async def do_selectattr(*args, **kwargs):
return async_select_or_reject(args, kwargs, lambda x: x, True)
@asyncfiltervariant(filters.do_map)
async def do_map(*args, **kwargs):
seq, func = filters.prepare_map(args, kwargs)
if seq:
async for item in auto_aiter(seq):
yield await auto_await(func(item))
@asyncfiltervariant(filters.do_sum)
async def do_sum(environment, iterable, attribute=None, start=0):
rv = start
if attribute is not None:
func = filters.make_attrgetter(environment, attribute)
else:
def func(x):
return x
async for item in auto_aiter(iterable):
rv += func(item)
return rv
@asyncfiltervariant(filters.do_slice)
async def do_slice(value, slices, fill_with=None):
return filters.do_slice(await auto_to_seq(value), slices, fill_with)
ASYNC_FILTERS = {
"first": do_first,
"groupby": do_groupby,
"join": do_join,
"list": do_list,
# we intentionally do not support do_last because that would be
# ridiculous
"reject": do_reject,
"rejectattr": do_rejectattr,
"map": do_map,
"select": do_select,
"selectattr": do_selectattr,
"sum": do_sum,
"slice": do_slice,
}

264
third_party/python/Jinja2/jinja2/asyncsupport.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,264 @@
# -*- coding: utf-8 -*-
"""The code for async support. Importing this patches Jinja on supported
Python versions.
"""
import asyncio
import inspect
from functools import update_wrapper
from markupsafe import Markup
from .environment import TemplateModule
from .runtime import LoopContext
from .utils import concat
from .utils import internalcode
from .utils import missing
async def concat_async(async_gen):
rv = []
async def collect():
async for event in async_gen:
rv.append(event)
await collect()
return concat(rv)
async def generate_async(self, *args, **kwargs):
vars = dict(*args, **kwargs)
try:
async for event in self.root_render_func(self.new_context(vars)):
yield event
except Exception:
yield self.environment.handle_exception()
def wrap_generate_func(original_generate):
def _convert_generator(self, loop, args, kwargs):
async_gen = self.generate_async(*args, **kwargs)
try:
while 1:
yield loop.run_until_complete(async_gen.__anext__())
except StopAsyncIteration:
pass
def generate(self, *args, **kwargs):
if not self.environment.is_async:
return original_generate(self, *args, **kwargs)
return _convert_generator(self, asyncio.get_event_loop(), args, kwargs)
return update_wrapper(generate, original_generate)
async def render_async(self, *args, **kwargs):
if not self.environment.is_async:
raise RuntimeError("The environment was not created with async mode enabled.")
vars = dict(*args, **kwargs)
ctx = self.new_context(vars)
try:
return await concat_async(self.root_render_func(ctx))
except Exception:
return self.environment.handle_exception()
def wrap_render_func(original_render):
def render(self, *args, **kwargs):
if not self.environment.is_async:
return original_render(self, *args, **kwargs)
loop = asyncio.get_event_loop()
return loop.run_until_complete(self.render_async(*args, **kwargs))
return update_wrapper(render, original_render)
def wrap_block_reference_call(original_call):
@internalcode
async def async_call(self):
rv = await concat_async(self._stack[self._depth](self._context))
if self._context.eval_ctx.autoescape:
rv = Markup(rv)
return rv
@internalcode
def __call__(self):
if not self._context.environment.is_async:
return original_call(self)
return async_call(self)
return update_wrapper(__call__, original_call)
def wrap_macro_invoke(original_invoke):
@internalcode
async def async_invoke(self, arguments, autoescape):
rv = await self._func(*arguments)
if autoescape:
rv = Markup(rv)
return rv
@internalcode
def _invoke(self, arguments, autoescape):
if not self._environment.is_async:
return original_invoke(self, arguments, autoescape)
return async_invoke(self, arguments, autoescape)
return update_wrapper(_invoke, original_invoke)
@internalcode
async def get_default_module_async(self):
if self._module is not None:
return self._module
self._module = rv = await self.make_module_async()
return rv
def wrap_default_module(original_default_module):
@internalcode
def _get_default_module(self):
if self.environment.is_async:
raise RuntimeError("Template module attribute is unavailable in async mode")
return original_default_module(self)
return _get_default_module
async def make_module_async(self, vars=None, shared=False, locals=None):
context = self.new_context(vars, shared, locals)
body_stream = []
async for item in self.root_render_func(context):
body_stream.append(item)
return TemplateModule(self, context, body_stream)
def patch_template():
from . import Template
Template.generate = wrap_generate_func(Template.generate)
Template.generate_async = update_wrapper(generate_async, Template.generate_async)
Template.render_async = update_wrapper(render_async, Template.render_async)
Template.render = wrap_render_func(Template.render)
Template._get_default_module = wrap_default_module(Template._get_default_module)
Template._get_default_module_async = get_default_module_async
Template.make_module_async = update_wrapper(
make_module_async, Template.make_module_async
)
def patch_runtime():
from .runtime import BlockReference, Macro
BlockReference.__call__ = wrap_block_reference_call(BlockReference.__call__)
Macro._invoke = wrap_macro_invoke(Macro._invoke)
def patch_filters():
from .filters import FILTERS
from .asyncfilters import ASYNC_FILTERS
FILTERS.update(ASYNC_FILTERS)
def patch_all():
patch_template()
patch_runtime()
patch_filters()
async def auto_await(value):
if inspect.isawaitable(value):
return await value
return value
async def auto_aiter(iterable):
if hasattr(iterable, "__aiter__"):
async for item in iterable:
yield item
return
for item in iterable:
yield item
class AsyncLoopContext(LoopContext):
_to_iterator = staticmethod(auto_aiter)
@property
async def length(self):
if self._length is not None:
return self._length
try:
self._length = len(self._iterable)
except TypeError:
iterable = [x async for x in self._iterator]
self._iterator = self._to_iterator(iterable)
self._length = len(iterable) + self.index + (self._after is not missing)
return self._length
@property
async def revindex0(self):
return await self.length - self.index
@property
async def revindex(self):
return await self.length - self.index0
async def _peek_next(self):
if self._after is not missing:
return self._after
try:
self._after = await self._iterator.__anext__()
except StopAsyncIteration:
self._after = missing
return self._after
@property
async def last(self):
return await self._peek_next() is missing
@property
async def nextitem(self):
rv = await self._peek_next()
if rv is missing:
return self._undefined("there is no next item")
return rv
def __aiter__(self):
return self
async def __anext__(self):
if self._after is not missing:
rv = self._after
self._after = missing
else:
rv = await self._iterator.__anext__()
self.index0 += 1
self._before = self._current
self._current = rv
return rv, self
async def make_async_loop_context(iterable, undefined, recurse=None, depth0=0):
import warnings
warnings.warn(
"This template must be recompiled with at least Jinja 2.11, or"
" it will fail in 3.0.",
DeprecationWarning,
stacklevel=2,
)
return AsyncLoopContext(iterable, undefined, recurse, depth0)
patch_all()

350
third_party/python/Jinja2/jinja2/bccache.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,350 @@
# -*- coding: utf-8 -*-
"""The optional bytecode cache system. This is useful if you have very
complex template situations and the compilation of all those templates
slows down your application too much.
Situations where this is useful are often forking web applications that
are initialized on the first request.
"""
import errno
import fnmatch
import os
import stat
import sys
import tempfile
from hashlib import sha1
from os import listdir
from os import path
from ._compat import BytesIO
from ._compat import marshal_dump
from ._compat import marshal_load
from ._compat import pickle
from ._compat import text_type
from .utils import open_if_exists
bc_version = 4
# Magic bytes to identify Jinja bytecode cache files. Contains the
# Python major and minor version to avoid loading incompatible bytecode
# if a project upgrades its Python version.
bc_magic = (
b"j2"
+ pickle.dumps(bc_version, 2)
+ pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1], 2)
)
class Bucket(object):
"""Buckets are used to store the bytecode for one template. It's created
and initialized by the bytecode cache and passed to the loading functions.
The buckets get an internal checksum from the cache assigned and use this
to automatically reject outdated cache material. Individual bytecode
cache subclasses don't have to care about cache invalidation.
"""
def __init__(self, environment, key, checksum):
self.environment = environment
self.key = key
self.checksum = checksum
self.reset()
def reset(self):
"""Resets the bucket (unloads the bytecode)."""
self.code = None
def load_bytecode(self, f):
"""Loads bytecode from a file or file like object."""
# make sure the magic header is correct
magic = f.read(len(bc_magic))
if magic != bc_magic:
self.reset()
return
# the source code of the file changed, we need to reload
checksum = pickle.load(f)
if self.checksum != checksum:
self.reset()
return
# if marshal_load fails then we need to reload
try:
self.code = marshal_load(f)
except (EOFError, ValueError, TypeError):
self.reset()
return
def write_bytecode(self, f):
"""Dump the bytecode into the file or file like object passed."""
if self.code is None:
raise TypeError("can't write empty bucket")
f.write(bc_magic)
pickle.dump(self.checksum, f, 2)
marshal_dump(self.code, f)
def bytecode_from_string(self, string):
"""Load bytecode from a string."""
self.load_bytecode(BytesIO(string))
def bytecode_to_string(self):
"""Return the bytecode as string."""
out = BytesIO()
self.write_bytecode(out)
return out.getvalue()
class BytecodeCache(object):
"""To implement your own bytecode cache you have to subclass this class
and override :meth:`load_bytecode` and :meth:`dump_bytecode`. Both of
these methods are passed a :class:`~jinja2.bccache.Bucket`.
A very basic bytecode cache that saves the bytecode on the file system::
from os import path
class MyCache(BytecodeCache):
def __init__(self, directory):
self.directory = directory
def load_bytecode(self, bucket):
filename = path.join(self.directory, bucket.key)
if path.exists(filename):
with open(filename, 'rb') as f:
bucket.load_bytecode(f)
def dump_bytecode(self, bucket):
filename = path.join(self.directory, bucket.key)
with open(filename, 'wb') as f:
bucket.write_bytecode(f)
A more advanced version of a filesystem based bytecode cache is part of
Jinja.
"""
def load_bytecode(self, bucket):
"""Subclasses have to override this method to load bytecode into a
bucket. If they are not able to find code in the cache for the
bucket, it must not do anything.
"""
raise NotImplementedError()
def dump_bytecode(self, bucket):
"""Subclasses have to override this method to write the bytecode
from a bucket back to the cache. If it unable to do so it must not
fail silently but raise an exception.
"""
raise NotImplementedError()
def clear(self):
"""Clears the cache. This method is not used by Jinja but should be
implemented to allow applications to clear the bytecode cache used
by a particular environment.
"""
def get_cache_key(self, name, filename=None):
"""Returns the unique hash key for this template name."""
hash = sha1(name.encode("utf-8"))
if filename is not None:
filename = "|" + filename
if isinstance(filename, text_type):
filename = filename.encode("utf-8")
hash.update(filename)
return hash.hexdigest()
def get_source_checksum(self, source):
"""Returns a checksum for the source."""
return sha1(source.encode("utf-8")).hexdigest()
def get_bucket(self, environment, name, filename, source):
"""Return a cache bucket for the given template. All arguments are
mandatory but filename may be `None`.
"""
key = self.get_cache_key(name, filename)
checksum = self.get_source_checksum(source)
bucket = Bucket(environment, key, checksum)
self.load_bytecode(bucket)
return bucket
def set_bucket(self, bucket):
"""Put the bucket into the cache."""
self.dump_bytecode(bucket)
class FileSystemBytecodeCache(BytecodeCache):
"""A bytecode cache that stores bytecode on the filesystem. It accepts
two arguments: The directory where the cache items are stored and a
pattern string that is used to build the filename.
If no directory is specified a default cache directory is selected. On
Windows the user's temp directory is used, on UNIX systems a directory
is created for the user in the system temp directory.
The pattern can be used to have multiple separate caches operate on the
same directory. The default pattern is ``'__jinja2_%s.cache'``. ``%s``
is replaced with the cache key.
>>> bcc = FileSystemBytecodeCache('/tmp/jinja_cache', '%s.cache')
This bytecode cache supports clearing of the cache using the clear method.
"""
def __init__(self, directory=None, pattern="__jinja2_%s.cache"):
if directory is None:
directory = self._get_default_cache_dir()
self.directory = directory
self.pattern = pattern
def _get_default_cache_dir(self):
def _unsafe_dir():
raise RuntimeError(
"Cannot determine safe temp directory. You "
"need to explicitly provide one."
)
tmpdir = tempfile.gettempdir()
# On windows the temporary directory is used specific unless
# explicitly forced otherwise. We can just use that.
if os.name == "nt":
return tmpdir
if not hasattr(os, "getuid"):
_unsafe_dir()
dirname = "_jinja2-cache-%d" % os.getuid()
actual_dir = os.path.join(tmpdir, dirname)
try:
os.mkdir(actual_dir, stat.S_IRWXU)
except OSError as e:
if e.errno != errno.EEXIST:
raise
try:
os.chmod(actual_dir, stat.S_IRWXU)
actual_dir_stat = os.lstat(actual_dir)
if (
actual_dir_stat.st_uid != os.getuid()
or not stat.S_ISDIR(actual_dir_stat.st_mode)
or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU
):
_unsafe_dir()
except OSError as e:
if e.errno != errno.EEXIST:
raise
actual_dir_stat = os.lstat(actual_dir)
if (
actual_dir_stat.st_uid != os.getuid()
or not stat.S_ISDIR(actual_dir_stat.st_mode)
or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU
):
_unsafe_dir()
return actual_dir
def _get_cache_filename(self, bucket):
return path.join(self.directory, self.pattern % bucket.key)
def load_bytecode(self, bucket):
f = open_if_exists(self._get_cache_filename(bucket), "rb")
if f is not None:
try:
bucket.load_bytecode(f)
finally:
f.close()
def dump_bytecode(self, bucket):
f = open(self._get_cache_filename(bucket), "wb")
try:
bucket.write_bytecode(f)
finally:
f.close()
def clear(self):
# imported lazily here because google app-engine doesn't support
# write access on the file system and the function does not exist
# normally.
from os import remove
files = fnmatch.filter(listdir(self.directory), self.pattern % "*")
for filename in files:
try:
remove(path.join(self.directory, filename))
except OSError:
pass
class MemcachedBytecodeCache(BytecodeCache):
"""This class implements a bytecode cache that uses a memcache cache for
storing the information. It does not enforce a specific memcache library
(tummy's memcache or cmemcache) but will accept any class that provides
the minimal interface required.
Libraries compatible with this class:
- `cachelib <https://github.com/pallets/cachelib>`_
- `python-memcached <https://pypi.org/project/python-memcached/>`_
(Unfortunately the django cache interface is not compatible because it
does not support storing binary data, only unicode. You can however pass
the underlying cache client to the bytecode cache which is available
as `django.core.cache.cache._client`.)
The minimal interface for the client passed to the constructor is this:
.. class:: MinimalClientInterface
.. method:: set(key, value[, timeout])
Stores the bytecode in the cache. `value` is a string and
`timeout` the timeout of the key. If timeout is not provided
a default timeout or no timeout should be assumed, if it's
provided it's an integer with the number of seconds the cache
item should exist.
.. method:: get(key)
Returns the value for the cache key. If the item does not
exist in the cache the return value must be `None`.
The other arguments to the constructor are the prefix for all keys that
is added before the actual cache key and the timeout for the bytecode in
the cache system. We recommend a high (or no) timeout.
This bytecode cache does not support clearing of used items in the cache.
The clear method is a no-operation function.
.. versionadded:: 2.7
Added support for ignoring memcache errors through the
`ignore_memcache_errors` parameter.
"""
def __init__(
self,
client,
prefix="jinja2/bytecode/",
timeout=None,
ignore_memcache_errors=True,
):
self.client = client
self.prefix = prefix
self.timeout = timeout
self.ignore_memcache_errors = ignore_memcache_errors
def load_bytecode(self, bucket):
try:
code = self.client.get(self.prefix + bucket.key)
except Exception:
if not self.ignore_memcache_errors:
raise
code = None
if code is not None:
bucket.bytecode_from_string(code)
def dump_bytecode(self, bucket):
args = (self.prefix + bucket.key, bucket.bytecode_to_string())
if self.timeout is not None:
args += (self.timeout,)
try:
self.client.set(*args)
except Exception:
if not self.ignore_memcache_errors:
raise

1843
third_party/python/Jinja2/jinja2/compiler.py поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

21
third_party/python/Jinja2/jinja2/constants.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,21 @@
# -*- coding: utf-8 -*-
#: list of lorem ipsum words used by the lipsum() helper function
LOREM_IPSUM_WORDS = u"""\
a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at
auctor augue bibendum blandit class commodo condimentum congue consectetuer
consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus
diam dictum dictumst dignissim dis dolor donec dui duis egestas eget eleifend
elementum elit enim erat eros est et etiam eu euismod facilisi facilisis fames
faucibus felis fermentum feugiat fringilla fusce gravida habitant habitasse hac
hendrerit hymenaeos iaculis id imperdiet in inceptos integer interdum ipsum
justo lacinia lacus laoreet lectus leo libero ligula litora lobortis lorem
luctus maecenas magna magnis malesuada massa mattis mauris metus mi molestie
mollis montes morbi mus nam nascetur natoque nec neque netus nibh nisi nisl non
nonummy nostra nulla nullam nunc odio orci ornare parturient pede pellentesque
penatibus per pharetra phasellus placerat platea porta porttitor posuere
potenti praesent pretium primis proin pulvinar purus quam quis quisque rhoncus
ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit
sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor
tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices
ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus
viverra volutpat vulputate"""

271
third_party/python/Jinja2/jinja2/debug.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,271 @@
import sys
from types import CodeType
from . import TemplateSyntaxError
from ._compat import PYPY
from .utils import internal_code
from .utils import missing
def rewrite_traceback_stack(source=None):
"""Rewrite the current exception to replace any tracebacks from
within compiled template code with tracebacks that look like they
came from the template source.
This must be called within an ``except`` block.
:param exc_info: A :meth:`sys.exc_info` tuple. If not provided,
the current ``exc_info`` is used.
:param source: For ``TemplateSyntaxError``, the original source if
known.
:return: A :meth:`sys.exc_info` tuple that can be re-raised.
"""
exc_type, exc_value, tb = sys.exc_info()
if isinstance(exc_value, TemplateSyntaxError) and not exc_value.translated:
exc_value.translated = True
exc_value.source = source
try:
# Remove the old traceback on Python 3, otherwise the frames
# from the compiler still show up.
exc_value.with_traceback(None)
except AttributeError:
pass
# Outside of runtime, so the frame isn't executing template
# code, but it still needs to point at the template.
tb = fake_traceback(
exc_value, None, exc_value.filename or "<unknown>", exc_value.lineno
)
else:
# Skip the frame for the render function.
tb = tb.tb_next
stack = []
# Build the stack of traceback object, replacing any in template
# code with the source file and line information.
while tb is not None:
# Skip frames decorated with @internalcode. These are internal
# calls that aren't useful in template debugging output.
if tb.tb_frame.f_code in internal_code:
tb = tb.tb_next
continue
template = tb.tb_frame.f_globals.get("__jinja_template__")
if template is not None:
lineno = template.get_corresponding_lineno(tb.tb_lineno)
fake_tb = fake_traceback(exc_value, tb, template.filename, lineno)
stack.append(fake_tb)
else:
stack.append(tb)
tb = tb.tb_next
tb_next = None
# Assign tb_next in reverse to avoid circular references.
for tb in reversed(stack):
tb_next = tb_set_next(tb, tb_next)
return exc_type, exc_value, tb_next
def fake_traceback(exc_value, tb, filename, lineno):
"""Produce a new traceback object that looks like it came from the
template source instead of the compiled code. The filename, line
number, and location name will point to the template, and the local
variables will be the current template context.
:param exc_value: The original exception to be re-raised to create
the new traceback.
:param tb: The original traceback to get the local variables and
code info from.
:param filename: The template filename.
:param lineno: The line number in the template source.
"""
if tb is not None:
# Replace the real locals with the context that would be
# available at that point in the template.
locals = get_template_locals(tb.tb_frame.f_locals)
locals.pop("__jinja_exception__", None)
else:
locals = {}
globals = {
"__name__": filename,
"__file__": filename,
"__jinja_exception__": exc_value,
}
# Raise an exception at the correct line number.
code = compile("\n" * (lineno - 1) + "raise __jinja_exception__", filename, "exec")
# Build a new code object that points to the template file and
# replaces the location with a block name.
try:
location = "template"
if tb is not None:
function = tb.tb_frame.f_code.co_name
if function == "root":
location = "top-level template code"
elif function.startswith("block_"):
location = 'block "%s"' % function[6:]
# Collect arguments for the new code object. CodeType only
# accepts positional arguments, and arguments were inserted in
# new Python versions.
code_args = []
for attr in (
"argcount",
"posonlyargcount", # Python 3.8
"kwonlyargcount", # Python 3
"nlocals",
"stacksize",
"flags",
"code", # codestring
"consts", # constants
"names",
"varnames",
("filename", filename),
("name", location),
"firstlineno",
"lnotab",
"freevars",
"cellvars",
):
if isinstance(attr, tuple):
# Replace with given value.
code_args.append(attr[1])
continue
try:
# Copy original value if it exists.
code_args.append(getattr(code, "co_" + attr))
except AttributeError:
# Some arguments were added later.
continue
code = CodeType(*code_args)
except Exception:
# Some environments such as Google App Engine don't support
# modifying code objects.
pass
# Execute the new code, which is guaranteed to raise, and return
# the new traceback without this frame.
try:
exec(code, globals, locals)
except BaseException:
return sys.exc_info()[2].tb_next
def get_template_locals(real_locals):
"""Based on the runtime locals, get the context that would be
available at that point in the template.
"""
# Start with the current template context.
ctx = real_locals.get("context")
if ctx:
data = ctx.get_all().copy()
else:
data = {}
# Might be in a derived context that only sets local variables
# rather than pushing a context. Local variables follow the scheme
# l_depth_name. Find the highest-depth local that has a value for
# each name.
local_overrides = {}
for name, value in real_locals.items():
if not name.startswith("l_") or value is missing:
# Not a template variable, or no longer relevant.
continue
try:
_, depth, name = name.split("_", 2)
depth = int(depth)
except ValueError:
continue
cur_depth = local_overrides.get(name, (-1,))[0]
if cur_depth < depth:
local_overrides[name] = (depth, value)
# Modify the context with any derived context.
for name, (_, value) in local_overrides.items():
if value is missing:
data.pop(name, None)
else:
data[name] = value
return data
if sys.version_info >= (3, 7):
# tb_next is directly assignable as of Python 3.7
def tb_set_next(tb, tb_next):
tb.tb_next = tb_next
return tb
elif PYPY:
# PyPy might have special support, and won't work with ctypes.
try:
import tputil
except ImportError:
# Without tproxy support, use the original traceback.
def tb_set_next(tb, tb_next):
return tb
else:
# With tproxy support, create a proxy around the traceback that
# returns the new tb_next.
def tb_set_next(tb, tb_next):
def controller(op):
if op.opname == "__getattribute__" and op.args[0] == "tb_next":
return tb_next
return op.delegate()
return tputil.make_proxy(controller, obj=tb)
else:
# Use ctypes to assign tb_next at the C level since it's read-only
# from Python.
import ctypes
class _CTraceback(ctypes.Structure):
_fields_ = [
# Extra PyObject slots when compiled with Py_TRACE_REFS.
(
"PyObject_HEAD",
ctypes.c_byte * (32 if hasattr(sys, "getobjects") else 16),
),
# Only care about tb_next as an object, not a traceback.
("tb_next", ctypes.py_object),
]
def tb_set_next(tb, tb_next):
c_tb = _CTraceback.from_address(id(tb))
# Clear out the old tb_next.
if tb.tb_next is not None:
c_tb_next = ctypes.py_object(tb.tb_next)
c_tb.tb_next = ctypes.py_object()
ctypes.pythonapi.Py_DecRef(c_tb_next)
# Assign the new tb_next.
if tb_next is not None:
c_tb_next = ctypes.py_object(tb_next)
ctypes.pythonapi.Py_IncRef(c_tb_next)
c_tb.tb_next = c_tb_next
return tb

44
third_party/python/Jinja2/jinja2/defaults.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,44 @@
# -*- coding: utf-8 -*-
from ._compat import range_type
from .filters import FILTERS as DEFAULT_FILTERS # noqa: F401
from .tests import TESTS as DEFAULT_TESTS # noqa: F401
from .utils import Cycler
from .utils import generate_lorem_ipsum
from .utils import Joiner
from .utils import Namespace
# defaults for the parser / lexer
BLOCK_START_STRING = "{%"
BLOCK_END_STRING = "%}"
VARIABLE_START_STRING = "{{"
VARIABLE_END_STRING = "}}"
COMMENT_START_STRING = "{#"
COMMENT_END_STRING = "#}"
LINE_STATEMENT_PREFIX = None
LINE_COMMENT_PREFIX = None
TRIM_BLOCKS = False
LSTRIP_BLOCKS = False
NEWLINE_SEQUENCE = "\n"
KEEP_TRAILING_NEWLINE = False
# default filters, tests and namespace
DEFAULT_NAMESPACE = {
"range": range_type,
"dict": dict,
"lipsum": generate_lorem_ipsum,
"cycler": Cycler,
"joiner": Joiner,
"namespace": Namespace,
}
# default policies
DEFAULT_POLICIES = {
"compiler.ascii_str": True,
"urlize.rel": "noopener",
"urlize.target": None,
"truncate.leeway": 5,
"json.dumps_function": None,
"json.dumps_kwargs": {"sort_keys": True},
"ext.i18n.trimmed": False,
}

1362
third_party/python/Jinja2/jinja2/environment.py поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

177
third_party/python/Jinja2/jinja2/exceptions.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,177 @@
# -*- coding: utf-8 -*-
from ._compat import imap
from ._compat import implements_to_string
from ._compat import PY2
from ._compat import text_type
class TemplateError(Exception):
"""Baseclass for all template errors."""
if PY2:
def __init__(self, message=None):
if message is not None:
message = text_type(message).encode("utf-8")
Exception.__init__(self, message)
@property
def message(self):
if self.args:
message = self.args[0]
if message is not None:
return message.decode("utf-8", "replace")
def __unicode__(self):
return self.message or u""
else:
def __init__(self, message=None):
Exception.__init__(self, message)
@property
def message(self):
if self.args:
message = self.args[0]
if message is not None:
return message
@implements_to_string
class TemplateNotFound(IOError, LookupError, TemplateError):
"""Raised if a template does not exist.
.. versionchanged:: 2.11
If the given name is :class:`Undefined` and no message was
provided, an :exc:`UndefinedError` is raised.
"""
# looks weird, but removes the warning descriptor that just
# bogusly warns us about message being deprecated
message = None
def __init__(self, name, message=None):
IOError.__init__(self, name)
if message is None:
from .runtime import Undefined
if isinstance(name, Undefined):
name._fail_with_undefined_error()
message = name
self.message = message
self.name = name
self.templates = [name]
def __str__(self):
return self.message
class TemplatesNotFound(TemplateNotFound):
"""Like :class:`TemplateNotFound` but raised if multiple templates
are selected. This is a subclass of :class:`TemplateNotFound`
exception, so just catching the base exception will catch both.
.. versionchanged:: 2.11
If a name in the list of names is :class:`Undefined`, a message
about it being undefined is shown rather than the empty string.
.. versionadded:: 2.2
"""
def __init__(self, names=(), message=None):
if message is None:
from .runtime import Undefined
parts = []
for name in names:
if isinstance(name, Undefined):
parts.append(name._undefined_message)
else:
parts.append(name)
message = u"none of the templates given were found: " + u", ".join(
imap(text_type, parts)
)
TemplateNotFound.__init__(self, names and names[-1] or None, message)
self.templates = list(names)
@implements_to_string
class TemplateSyntaxError(TemplateError):
"""Raised to tell the user that there is a problem with the template."""
def __init__(self, message, lineno, name=None, filename=None):
TemplateError.__init__(self, message)
self.lineno = lineno
self.name = name
self.filename = filename
self.source = None
# this is set to True if the debug.translate_syntax_error
# function translated the syntax error into a new traceback
self.translated = False
def __str__(self):
# for translated errors we only return the message
if self.translated:
return self.message
# otherwise attach some stuff
location = "line %d" % self.lineno
name = self.filename or self.name
if name:
location = 'File "%s", %s' % (name, location)
lines = [self.message, " " + location]
# if the source is set, add the line to the output
if self.source is not None:
try:
line = self.source.splitlines()[self.lineno - 1]
except IndexError:
line = None
if line:
lines.append(" " + line.strip())
return u"\n".join(lines)
def __reduce__(self):
# https://bugs.python.org/issue1692335 Exceptions that take
# multiple required arguments have problems with pickling.
# Without this, raises TypeError: __init__() missing 1 required
# positional argument: 'lineno'
return self.__class__, (self.message, self.lineno, self.name, self.filename)
class TemplateAssertionError(TemplateSyntaxError):
"""Like a template syntax error, but covers cases where something in the
template caused an error at compile time that wasn't necessarily caused
by a syntax error. However it's a direct subclass of
:exc:`TemplateSyntaxError` and has the same attributes.
"""
class TemplateRuntimeError(TemplateError):
"""A generic runtime error in the template engine. Under some situations
Jinja may raise this exception.
"""
class UndefinedError(TemplateRuntimeError):
"""Raised if a template tries to operate on :class:`Undefined`."""
class SecurityError(TemplateRuntimeError):
"""Raised if a template tries to do something insecure if the
sandbox is enabled.
"""
class FilterArgumentError(TemplateRuntimeError):
"""This error is raised if a filter was called with inappropriate
arguments
"""

704
third_party/python/Jinja2/jinja2/ext.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,704 @@
# -*- coding: utf-8 -*-
"""Extension API for adding custom tags and behavior."""
import pprint
import re
from sys import version_info
from markupsafe import Markup
from . import nodes
from ._compat import iteritems
from ._compat import string_types
from ._compat import with_metaclass
from .defaults import BLOCK_END_STRING
from .defaults import BLOCK_START_STRING
from .defaults import COMMENT_END_STRING
from .defaults import COMMENT_START_STRING
from .defaults import KEEP_TRAILING_NEWLINE
from .defaults import LINE_COMMENT_PREFIX
from .defaults import LINE_STATEMENT_PREFIX
from .defaults import LSTRIP_BLOCKS
from .defaults import NEWLINE_SEQUENCE
from .defaults import TRIM_BLOCKS
from .defaults import VARIABLE_END_STRING
from .defaults import VARIABLE_START_STRING
from .environment import Environment
from .exceptions import TemplateAssertionError
from .exceptions import TemplateSyntaxError
from .nodes import ContextReference
from .runtime import concat
from .utils import contextfunction
from .utils import import_string
# the only real useful gettext functions for a Jinja template. Note
# that ugettext must be assigned to gettext as Jinja doesn't support
# non unicode strings.
GETTEXT_FUNCTIONS = ("_", "gettext", "ngettext")
_ws_re = re.compile(r"\s*\n\s*")
class ExtensionRegistry(type):
"""Gives the extension an unique identifier."""
def __new__(mcs, name, bases, d):
rv = type.__new__(mcs, name, bases, d)
rv.identifier = rv.__module__ + "." + rv.__name__
return rv
class Extension(with_metaclass(ExtensionRegistry, object)):
"""Extensions can be used to add extra functionality to the Jinja template
system at the parser level. Custom extensions are bound to an environment
but may not store environment specific data on `self`. The reason for
this is that an extension can be bound to another environment (for
overlays) by creating a copy and reassigning the `environment` attribute.
As extensions are created by the environment they cannot accept any
arguments for configuration. One may want to work around that by using
a factory function, but that is not possible as extensions are identified
by their import name. The correct way to configure the extension is
storing the configuration values on the environment. Because this way the
environment ends up acting as central configuration storage the
attributes may clash which is why extensions have to ensure that the names
they choose for configuration are not too generic. ``prefix`` for example
is a terrible name, ``fragment_cache_prefix`` on the other hand is a good
name as includes the name of the extension (fragment cache).
"""
#: if this extension parses this is the list of tags it's listening to.
tags = set()
#: the priority of that extension. This is especially useful for
#: extensions that preprocess values. A lower value means higher
#: priority.
#:
#: .. versionadded:: 2.4
priority = 100
def __init__(self, environment):
self.environment = environment
def bind(self, environment):
"""Create a copy of this extension bound to another environment."""
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.environment = environment
return rv
def preprocess(self, source, name, filename=None):
"""This method is called before the actual lexing and can be used to
preprocess the source. The `filename` is optional. The return value
must be the preprocessed source.
"""
return source
def filter_stream(self, stream):
"""It's passed a :class:`~jinja2.lexer.TokenStream` that can be used
to filter tokens returned. This method has to return an iterable of
:class:`~jinja2.lexer.Token`\\s, but it doesn't have to return a
:class:`~jinja2.lexer.TokenStream`.
"""
return stream
def parse(self, parser):
"""If any of the :attr:`tags` matched this method is called with the
parser as first argument. The token the parser stream is pointing at
is the name token that matched. This method has to return one or a
list of multiple nodes.
"""
raise NotImplementedError()
def attr(self, name, lineno=None):
"""Return an attribute node for the current extension. This is useful
to pass constants on extensions to generated template code.
::
self.attr('_my_attribute', lineno=lineno)
"""
return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
def call_method(
self, name, args=None, kwargs=None, dyn_args=None, dyn_kwargs=None, lineno=None
):
"""Call a method of the extension. This is a shortcut for
:meth:`attr` + :class:`jinja2.nodes.Call`.
"""
if args is None:
args = []
if kwargs is None:
kwargs = []
return nodes.Call(
self.attr(name, lineno=lineno),
args,
kwargs,
dyn_args,
dyn_kwargs,
lineno=lineno,
)
@contextfunction
def _gettext_alias(__context, *args, **kwargs):
return __context.call(__context.resolve("gettext"), *args, **kwargs)
def _make_new_gettext(func):
@contextfunction
def gettext(__context, __string, **variables):
rv = __context.call(func, __string)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
# Always treat as a format string, even if there are no
# variables. This makes translation strings more consistent
# and predictable. This requires escaping
return rv % variables
return gettext
def _make_new_ngettext(func):
@contextfunction
def ngettext(__context, __singular, __plural, __num, **variables):
variables.setdefault("num", __num)
rv = __context.call(func, __singular, __plural, __num)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
# Always treat as a format string, see gettext comment above.
return rv % variables
return ngettext
class InternationalizationExtension(Extension):
"""This extension adds gettext support to Jinja."""
tags = {"trans"}
# TODO: the i18n extension is currently reevaluating values in a few
# situations. Take this example:
# {% trans count=something() %}{{ count }} foo{% pluralize
# %}{{ count }} fooss{% endtrans %}
# something is called twice here. One time for the gettext value and
# the other time for the n-parameter of the ngettext function.
def __init__(self, environment):
Extension.__init__(self, environment)
environment.globals["_"] = _gettext_alias
environment.extend(
install_gettext_translations=self._install,
install_null_translations=self._install_null,
install_gettext_callables=self._install_callables,
uninstall_gettext_translations=self._uninstall,
extract_translations=self._extract,
newstyle_gettext=False,
)
def _install(self, translations, newstyle=None):
gettext = getattr(translations, "ugettext", None)
if gettext is None:
gettext = translations.gettext
ngettext = getattr(translations, "ungettext", None)
if ngettext is None:
ngettext = translations.ngettext
self._install_callables(gettext, ngettext, newstyle)
def _install_null(self, newstyle=None):
self._install_callables(
lambda x: x, lambda s, p, n: (n != 1 and (p,) or (s,))[0], newstyle
)
def _install_callables(self, gettext, ngettext, newstyle=None):
if newstyle is not None:
self.environment.newstyle_gettext = newstyle
if self.environment.newstyle_gettext:
gettext = _make_new_gettext(gettext)
ngettext = _make_new_ngettext(ngettext)
self.environment.globals.update(gettext=gettext, ngettext=ngettext)
def _uninstall(self, translations):
for key in "gettext", "ngettext":
self.environment.globals.pop(key, None)
def _extract(self, source, gettext_functions=GETTEXT_FUNCTIONS):
if isinstance(source, string_types):
source = self.environment.parse(source)
return extract_from_ast(source, gettext_functions)
def parse(self, parser):
"""Parse a translatable tag."""
lineno = next(parser.stream).lineno
num_called_num = False
# find all the variables referenced. Additionally a variable can be
# defined in the body of the trans block too, but this is checked at
# a later state.
plural_expr = None
plural_expr_assignment = None
variables = {}
trimmed = None
while parser.stream.current.type != "block_end":
if variables:
parser.stream.expect("comma")
# skip colon for python compatibility
if parser.stream.skip_if("colon"):
break
name = parser.stream.expect("name")
if name.value in variables:
parser.fail(
"translatable variable %r defined twice." % name.value,
name.lineno,
exc=TemplateAssertionError,
)
# expressions
if parser.stream.current.type == "assign":
next(parser.stream)
variables[name.value] = var = parser.parse_expression()
elif trimmed is None and name.value in ("trimmed", "notrimmed"):
trimmed = name.value == "trimmed"
continue
else:
variables[name.value] = var = nodes.Name(name.value, "load")
if plural_expr is None:
if isinstance(var, nodes.Call):
plural_expr = nodes.Name("_trans", "load")
variables[name.value] = plural_expr
plural_expr_assignment = nodes.Assign(
nodes.Name("_trans", "store"), var
)
else:
plural_expr = var
num_called_num = name.value == "num"
parser.stream.expect("block_end")
plural = None
have_plural = False
referenced = set()
# now parse until endtrans or pluralize
singular_names, singular = self._parse_block(parser, True)
if singular_names:
referenced.update(singular_names)
if plural_expr is None:
plural_expr = nodes.Name(singular_names[0], "load")
num_called_num = singular_names[0] == "num"
# if we have a pluralize block, we parse that too
if parser.stream.current.test("name:pluralize"):
have_plural = True
next(parser.stream)
if parser.stream.current.type != "block_end":
name = parser.stream.expect("name")
if name.value not in variables:
parser.fail(
"unknown variable %r for pluralization" % name.value,
name.lineno,
exc=TemplateAssertionError,
)
plural_expr = variables[name.value]
num_called_num = name.value == "num"
parser.stream.expect("block_end")
plural_names, plural = self._parse_block(parser, False)
next(parser.stream)
referenced.update(plural_names)
else:
next(parser.stream)
# register free names as simple name expressions
for var in referenced:
if var not in variables:
variables[var] = nodes.Name(var, "load")
if not have_plural:
plural_expr = None
elif plural_expr is None:
parser.fail("pluralize without variables", lineno)
if trimmed is None:
trimmed = self.environment.policies["ext.i18n.trimmed"]
if trimmed:
singular = self._trim_whitespace(singular)
if plural:
plural = self._trim_whitespace(plural)
node = self._make_node(
singular,
plural,
variables,
plural_expr,
bool(referenced),
num_called_num and have_plural,
)
node.set_lineno(lineno)
if plural_expr_assignment is not None:
return [plural_expr_assignment, node]
else:
return node
def _trim_whitespace(self, string, _ws_re=_ws_re):
return _ws_re.sub(" ", string.strip())
def _parse_block(self, parser, allow_pluralize):
"""Parse until the next block tag with a given name."""
referenced = []
buf = []
while 1:
if parser.stream.current.type == "data":
buf.append(parser.stream.current.value.replace("%", "%%"))
next(parser.stream)
elif parser.stream.current.type == "variable_begin":
next(parser.stream)
name = parser.stream.expect("name").value
referenced.append(name)
buf.append("%%(%s)s" % name)
parser.stream.expect("variable_end")
elif parser.stream.current.type == "block_begin":
next(parser.stream)
if parser.stream.current.test("name:endtrans"):
break
elif parser.stream.current.test("name:pluralize"):
if allow_pluralize:
break
parser.fail(
"a translatable section can have only one pluralize section"
)
parser.fail(
"control structures in translatable sections are not allowed"
)
elif parser.stream.eos:
parser.fail("unclosed translation block")
else:
raise RuntimeError("internal parser error")
return referenced, concat(buf)
def _make_node(
self, singular, plural, variables, plural_expr, vars_referenced, num_called_num
):
"""Generates a useful node from the data provided."""
# no variables referenced? no need to escape for old style
# gettext invocations only if there are vars.
if not vars_referenced and not self.environment.newstyle_gettext:
singular = singular.replace("%%", "%")
if plural:
plural = plural.replace("%%", "%")
# singular only:
if plural_expr is None:
gettext = nodes.Name("gettext", "load")
node = nodes.Call(gettext, [nodes.Const(singular)], [], None, None)
# singular and plural
else:
ngettext = nodes.Name("ngettext", "load")
node = nodes.Call(
ngettext,
[nodes.Const(singular), nodes.Const(plural), plural_expr],
[],
None,
None,
)
# in case newstyle gettext is used, the method is powerful
# enough to handle the variable expansion and autoescape
# handling itself
if self.environment.newstyle_gettext:
for key, value in iteritems(variables):
# the function adds that later anyways in case num was
# called num, so just skip it.
if num_called_num and key == "num":
continue
node.kwargs.append(nodes.Keyword(key, value))
# otherwise do that here
else:
# mark the return value as safe if we are in an
# environment with autoescaping turned on
node = nodes.MarkSafeIfAutoescape(node)
if variables:
node = nodes.Mod(
node,
nodes.Dict(
[
nodes.Pair(nodes.Const(key), value)
for key, value in variables.items()
]
),
)
return nodes.Output([node])
class ExprStmtExtension(Extension):
"""Adds a `do` tag to Jinja that works like the print statement just
that it doesn't print the return value.
"""
tags = set(["do"])
def parse(self, parser):
node = nodes.ExprStmt(lineno=next(parser.stream).lineno)
node.node = parser.parse_tuple()
return node
class LoopControlExtension(Extension):
"""Adds break and continue to the template engine."""
tags = set(["break", "continue"])
def parse(self, parser):
token = next(parser.stream)
if token.value == "break":
return nodes.Break(lineno=token.lineno)
return nodes.Continue(lineno=token.lineno)
class WithExtension(Extension):
pass
class AutoEscapeExtension(Extension):
pass
class DebugExtension(Extension):
"""A ``{% debug %}`` tag that dumps the available variables,
filters, and tests.
.. code-block:: html+jinja
<pre>{% debug %}</pre>
.. code-block:: text
{'context': {'cycler': <class 'jinja2.utils.Cycler'>,
...,
'namespace': <class 'jinja2.utils.Namespace'>},
'filters': ['abs', 'attr', 'batch', 'capitalize', 'center', 'count', 'd',
..., 'urlencode', 'urlize', 'wordcount', 'wordwrap', 'xmlattr'],
'tests': ['!=', '<', '<=', '==', '>', '>=', 'callable', 'defined',
..., 'odd', 'sameas', 'sequence', 'string', 'undefined', 'upper']}
.. versionadded:: 2.11.0
"""
tags = {"debug"}
def parse(self, parser):
lineno = parser.stream.expect("name:debug").lineno
context = ContextReference()
result = self.call_method("_render", [context], lineno=lineno)
return nodes.Output([result], lineno=lineno)
def _render(self, context):
result = {
"context": context.get_all(),
"filters": sorted(self.environment.filters.keys()),
"tests": sorted(self.environment.tests.keys()),
}
# Set the depth since the intent is to show the top few names.
if version_info[:2] >= (3, 4):
return pprint.pformat(result, depth=3, compact=True)
else:
return pprint.pformat(result, depth=3)
def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS, babel_style=True):
"""Extract localizable strings from the given template node. Per
default this function returns matches in babel style that means non string
parameters as well as keyword arguments are returned as `None`. This
allows Babel to figure out what you really meant if you are using
gettext functions that allow keyword arguments for placeholder expansion.
If you don't want that behavior set the `babel_style` parameter to `False`
which causes only strings to be returned and parameters are always stored
in tuples. As a consequence invalid gettext calls (calls without a single
string parameter or string parameters after non-string parameters) are
skipped.
This example explains the behavior:
>>> from jinja2 import Environment
>>> env = Environment()
>>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}')
>>> list(extract_from_ast(node))
[(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))]
>>> list(extract_from_ast(node, babel_style=False))
[(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))]
For every string found this function yields a ``(lineno, function,
message)`` tuple, where:
* ``lineno`` is the number of the line on which the string was found,
* ``function`` is the name of the ``gettext`` function used (if the
string was extracted from embedded Python code), and
* ``message`` is the string itself (a ``unicode`` object, or a tuple
of ``unicode`` objects for functions with multiple string arguments).
This extraction function operates on the AST and is because of that unable
to extract any comments. For comment support you have to use the babel
extraction interface or extract comments yourself.
"""
for node in node.find_all(nodes.Call):
if (
not isinstance(node.node, nodes.Name)
or node.node.name not in gettext_functions
):
continue
strings = []
for arg in node.args:
if isinstance(arg, nodes.Const) and isinstance(arg.value, string_types):
strings.append(arg.value)
else:
strings.append(None)
for _ in node.kwargs:
strings.append(None)
if node.dyn_args is not None:
strings.append(None)
if node.dyn_kwargs is not None:
strings.append(None)
if not babel_style:
strings = tuple(x for x in strings if x is not None)
if not strings:
continue
else:
if len(strings) == 1:
strings = strings[0]
else:
strings = tuple(strings)
yield node.lineno, node.node.name, strings
class _CommentFinder(object):
"""Helper class to find comments in a token stream. Can only
find comments for gettext calls forwards. Once the comment
from line 4 is found, a comment for line 1 will not return a
usable value.
"""
def __init__(self, tokens, comment_tags):
self.tokens = tokens
self.comment_tags = comment_tags
self.offset = 0
self.last_lineno = 0
def find_backwards(self, offset):
try:
for _, token_type, token_value in reversed(
self.tokens[self.offset : offset]
):
if token_type in ("comment", "linecomment"):
try:
prefix, comment = token_value.split(None, 1)
except ValueError:
continue
if prefix in self.comment_tags:
return [comment.rstrip()]
return []
finally:
self.offset = offset
def find_comments(self, lineno):
if not self.comment_tags or self.last_lineno > lineno:
return []
for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset :]):
if token_lineno > lineno:
return self.find_backwards(self.offset + idx)
return self.find_backwards(len(self.tokens))
def babel_extract(fileobj, keywords, comment_tags, options):
"""Babel extraction method for Jinja templates.
.. versionchanged:: 2.3
Basic support for translation comments was added. If `comment_tags`
is now set to a list of keywords for extraction, the extractor will
try to find the best preceding comment that begins with one of the
keywords. For best results, make sure to not have more than one
gettext call in one line of code and the matching comment in the
same line or the line before.
.. versionchanged:: 2.5.1
The `newstyle_gettext` flag can be set to `True` to enable newstyle
gettext calls.
.. versionchanged:: 2.7
A `silent` option can now be provided. If set to `False` template
syntax errors are propagated instead of being ignored.
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results.
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples.
(comments will be empty currently)
"""
extensions = set()
for extension in options.get("extensions", "").split(","):
extension = extension.strip()
if not extension:
continue
extensions.add(import_string(extension))
if InternationalizationExtension not in extensions:
extensions.add(InternationalizationExtension)
def getbool(options, key, default=False):
return options.get(key, str(default)).lower() in ("1", "on", "yes", "true")
silent = getbool(options, "silent", True)
environment = Environment(
options.get("block_start_string", BLOCK_START_STRING),
options.get("block_end_string", BLOCK_END_STRING),
options.get("variable_start_string", VARIABLE_START_STRING),
options.get("variable_end_string", VARIABLE_END_STRING),
options.get("comment_start_string", COMMENT_START_STRING),
options.get("comment_end_string", COMMENT_END_STRING),
options.get("line_statement_prefix") or LINE_STATEMENT_PREFIX,
options.get("line_comment_prefix") or LINE_COMMENT_PREFIX,
getbool(options, "trim_blocks", TRIM_BLOCKS),
getbool(options, "lstrip_blocks", LSTRIP_BLOCKS),
NEWLINE_SEQUENCE,
getbool(options, "keep_trailing_newline", KEEP_TRAILING_NEWLINE),
frozenset(extensions),
cache_size=0,
auto_reload=False,
)
if getbool(options, "trimmed"):
environment.policies["ext.i18n.trimmed"] = True
if getbool(options, "newstyle_gettext"):
environment.newstyle_gettext = True
source = fileobj.read().decode(options.get("encoding", "utf-8"))
try:
node = environment.parse(source)
tokens = list(environment.lex(environment.preprocess(source)))
except TemplateSyntaxError:
if not silent:
raise
# skip templates with syntax errors
return
finder = _CommentFinder(tokens, comment_tags)
for lineno, func, message in extract_from_ast(node, keywords):
yield lineno, func, message, finder.find_comments(lineno)
#: nicer import names
i18n = InternationalizationExtension
do = ExprStmtExtension
loopcontrols = LoopControlExtension
with_ = WithExtension
autoescape = AutoEscapeExtension
debug = DebugExtension

1382
third_party/python/Jinja2/jinja2/filters.py поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

290
third_party/python/Jinja2/jinja2/idtracking.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,290 @@
from ._compat import iteritems
from .visitor import NodeVisitor
VAR_LOAD_PARAMETER = "param"
VAR_LOAD_RESOLVE = "resolve"
VAR_LOAD_ALIAS = "alias"
VAR_LOAD_UNDEFINED = "undefined"
def find_symbols(nodes, parent_symbols=None):
sym = Symbols(parent=parent_symbols)
visitor = FrameSymbolVisitor(sym)
for node in nodes:
visitor.visit(node)
return sym
def symbols_for_node(node, parent_symbols=None):
sym = Symbols(parent=parent_symbols)
sym.analyze_node(node)
return sym
class Symbols(object):
def __init__(self, parent=None, level=None):
if level is None:
if parent is None:
level = 0
else:
level = parent.level + 1
self.level = level
self.parent = parent
self.refs = {}
self.loads = {}
self.stores = set()
def analyze_node(self, node, **kwargs):
visitor = RootVisitor(self)
visitor.visit(node, **kwargs)
def _define_ref(self, name, load=None):
ident = "l_%d_%s" % (self.level, name)
self.refs[name] = ident
if load is not None:
self.loads[ident] = load
return ident
def find_load(self, target):
if target in self.loads:
return self.loads[target]
if self.parent is not None:
return self.parent.find_load(target)
def find_ref(self, name):
if name in self.refs:
return self.refs[name]
if self.parent is not None:
return self.parent.find_ref(name)
def ref(self, name):
rv = self.find_ref(name)
if rv is None:
raise AssertionError(
"Tried to resolve a name to a reference that "
"was unknown to the frame (%r)" % name
)
return rv
def copy(self):
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.refs = self.refs.copy()
rv.loads = self.loads.copy()
rv.stores = self.stores.copy()
return rv
def store(self, name):
self.stores.add(name)
# If we have not see the name referenced yet, we need to figure
# out what to set it to.
if name not in self.refs:
# If there is a parent scope we check if the name has a
# reference there. If it does it means we might have to alias
# to a variable there.
if self.parent is not None:
outer_ref = self.parent.find_ref(name)
if outer_ref is not None:
self._define_ref(name, load=(VAR_LOAD_ALIAS, outer_ref))
return
# Otherwise we can just set it to undefined.
self._define_ref(name, load=(VAR_LOAD_UNDEFINED, None))
def declare_parameter(self, name):
self.stores.add(name)
return self._define_ref(name, load=(VAR_LOAD_PARAMETER, None))
def load(self, name):
target = self.find_ref(name)
if target is None:
self._define_ref(name, load=(VAR_LOAD_RESOLVE, name))
def branch_update(self, branch_symbols):
stores = {}
for branch in branch_symbols:
for target in branch.stores:
if target in self.stores:
continue
stores[target] = stores.get(target, 0) + 1
for sym in branch_symbols:
self.refs.update(sym.refs)
self.loads.update(sym.loads)
self.stores.update(sym.stores)
for name, branch_count in iteritems(stores):
if branch_count == len(branch_symbols):
continue
target = self.find_ref(name)
assert target is not None, "should not happen"
if self.parent is not None:
outer_target = self.parent.find_ref(name)
if outer_target is not None:
self.loads[target] = (VAR_LOAD_ALIAS, outer_target)
continue
self.loads[target] = (VAR_LOAD_RESOLVE, name)
def dump_stores(self):
rv = {}
node = self
while node is not None:
for name in node.stores:
if name not in rv:
rv[name] = self.find_ref(name)
node = node.parent
return rv
def dump_param_targets(self):
rv = set()
node = self
while node is not None:
for target, (instr, _) in iteritems(self.loads):
if instr == VAR_LOAD_PARAMETER:
rv.add(target)
node = node.parent
return rv
class RootVisitor(NodeVisitor):
def __init__(self, symbols):
self.sym_visitor = FrameSymbolVisitor(symbols)
def _simple_visit(self, node, **kwargs):
for child in node.iter_child_nodes():
self.sym_visitor.visit(child)
visit_Template = (
visit_Block
) = (
visit_Macro
) = (
visit_FilterBlock
) = visit_Scope = visit_If = visit_ScopedEvalContextModifier = _simple_visit
def visit_AssignBlock(self, node, **kwargs):
for child in node.body:
self.sym_visitor.visit(child)
def visit_CallBlock(self, node, **kwargs):
for child in node.iter_child_nodes(exclude=("call",)):
self.sym_visitor.visit(child)
def visit_OverlayScope(self, node, **kwargs):
for child in node.body:
self.sym_visitor.visit(child)
def visit_For(self, node, for_branch="body", **kwargs):
if for_branch == "body":
self.sym_visitor.visit(node.target, store_as_param=True)
branch = node.body
elif for_branch == "else":
branch = node.else_
elif for_branch == "test":
self.sym_visitor.visit(node.target, store_as_param=True)
if node.test is not None:
self.sym_visitor.visit(node.test)
return
else:
raise RuntimeError("Unknown for branch")
for item in branch or ():
self.sym_visitor.visit(item)
def visit_With(self, node, **kwargs):
for target in node.targets:
self.sym_visitor.visit(target)
for child in node.body:
self.sym_visitor.visit(child)
def generic_visit(self, node, *args, **kwargs):
raise NotImplementedError(
"Cannot find symbols for %r" % node.__class__.__name__
)
class FrameSymbolVisitor(NodeVisitor):
"""A visitor for `Frame.inspect`."""
def __init__(self, symbols):
self.symbols = symbols
def visit_Name(self, node, store_as_param=False, **kwargs):
"""All assignments to names go through this function."""
if store_as_param or node.ctx == "param":
self.symbols.declare_parameter(node.name)
elif node.ctx == "store":
self.symbols.store(node.name)
elif node.ctx == "load":
self.symbols.load(node.name)
def visit_NSRef(self, node, **kwargs):
self.symbols.load(node.name)
def visit_If(self, node, **kwargs):
self.visit(node.test, **kwargs)
original_symbols = self.symbols
def inner_visit(nodes):
self.symbols = rv = original_symbols.copy()
for subnode in nodes:
self.visit(subnode, **kwargs)
self.symbols = original_symbols
return rv
body_symbols = inner_visit(node.body)
elif_symbols = inner_visit(node.elif_)
else_symbols = inner_visit(node.else_ or ())
self.symbols.branch_update([body_symbols, elif_symbols, else_symbols])
def visit_Macro(self, node, **kwargs):
self.symbols.store(node.name)
def visit_Import(self, node, **kwargs):
self.generic_visit(node, **kwargs)
self.symbols.store(node.target)
def visit_FromImport(self, node, **kwargs):
self.generic_visit(node, **kwargs)
for name in node.names:
if isinstance(name, tuple):
self.symbols.store(name[1])
else:
self.symbols.store(name)
def visit_Assign(self, node, **kwargs):
"""Visit assignments in the correct order."""
self.visit(node.node, **kwargs)
self.visit(node.target, **kwargs)
def visit_For(self, node, **kwargs):
"""Visiting stops at for blocks. However the block sequence
is visited as part of the outer scope.
"""
self.visit(node.iter, **kwargs)
def visit_CallBlock(self, node, **kwargs):
self.visit(node.call, **kwargs)
def visit_FilterBlock(self, node, **kwargs):
self.visit(node.filter, **kwargs)
def visit_With(self, node, **kwargs):
for target in node.values:
self.visit(target)
def visit_AssignBlock(self, node, **kwargs):
"""Stop visiting at block assigns."""
self.visit(node.target, **kwargs)
def visit_Scope(self, node, **kwargs):
"""Stop visiting at scopes."""
def visit_Block(self, node, **kwargs):
"""Stop visiting at blocks."""
def visit_OverlayScope(self, node, **kwargs):
"""Do not visit into overlay scopes."""

841
third_party/python/Jinja2/jinja2/lexer.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,841 @@
# -*- coding: utf-8 -*-
"""Implements a Jinja / Python combination lexer. The ``Lexer`` class
is used to do some preprocessing. It filters out invalid operators like
the bitshift operators we don't allow in templates. It separates
template code and python code in expressions.
"""
import re
from ast import literal_eval
from collections import deque
from operator import itemgetter
from ._compat import implements_iterator
from ._compat import intern
from ._compat import iteritems
from ._compat import text_type
from .exceptions import TemplateSyntaxError
from .utils import LRUCache
# cache for the lexers. Exists in order to be able to have multiple
# environments with the same lexer
_lexer_cache = LRUCache(50)
# static regular expressions
whitespace_re = re.compile(r"\s+", re.U)
newline_re = re.compile(r"(\r\n|\r|\n)")
string_re = re.compile(
r"('([^'\\]*(?:\\.[^'\\]*)*)'" r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S
)
integer_re = re.compile(r"(\d+_)*\d+")
float_re = re.compile(
r"""
(?<!\.) # doesn't start with a .
(\d+_)*\d+ # digits, possibly _ separated
(
(\.(\d+_)*\d+)? # optional fractional part
e[+\-]?(\d+_)*\d+ # exponent part
|
\.(\d+_)*\d+ # required fractional part
)
""",
re.IGNORECASE | re.VERBOSE,
)
try:
# check if this Python supports Unicode identifiers
compile("föö", "<unknown>", "eval")
except SyntaxError:
# Python 2, no Unicode support, use ASCII identifiers
name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*")
check_ident = False
else:
# Unicode support, import generated re pattern and set flag to use
# str.isidentifier to validate during lexing.
from ._identifier import pattern as name_re
check_ident = True
# internal the tokens and keep references to them
TOKEN_ADD = intern("add")
TOKEN_ASSIGN = intern("assign")
TOKEN_COLON = intern("colon")
TOKEN_COMMA = intern("comma")
TOKEN_DIV = intern("div")
TOKEN_DOT = intern("dot")
TOKEN_EQ = intern("eq")
TOKEN_FLOORDIV = intern("floordiv")
TOKEN_GT = intern("gt")
TOKEN_GTEQ = intern("gteq")
TOKEN_LBRACE = intern("lbrace")
TOKEN_LBRACKET = intern("lbracket")
TOKEN_LPAREN = intern("lparen")
TOKEN_LT = intern("lt")
TOKEN_LTEQ = intern("lteq")
TOKEN_MOD = intern("mod")
TOKEN_MUL = intern("mul")
TOKEN_NE = intern("ne")
TOKEN_PIPE = intern("pipe")
TOKEN_POW = intern("pow")
TOKEN_RBRACE = intern("rbrace")
TOKEN_RBRACKET = intern("rbracket")
TOKEN_RPAREN = intern("rparen")
TOKEN_SEMICOLON = intern("semicolon")
TOKEN_SUB = intern("sub")
TOKEN_TILDE = intern("tilde")
TOKEN_WHITESPACE = intern("whitespace")
TOKEN_FLOAT = intern("float")
TOKEN_INTEGER = intern("integer")
TOKEN_NAME = intern("name")
TOKEN_STRING = intern("string")
TOKEN_OPERATOR = intern("operator")
TOKEN_BLOCK_BEGIN = intern("block_begin")
TOKEN_BLOCK_END = intern("block_end")
TOKEN_VARIABLE_BEGIN = intern("variable_begin")
TOKEN_VARIABLE_END = intern("variable_end")
TOKEN_RAW_BEGIN = intern("raw_begin")
TOKEN_RAW_END = intern("raw_end")
TOKEN_COMMENT_BEGIN = intern("comment_begin")
TOKEN_COMMENT_END = intern("comment_end")
TOKEN_COMMENT = intern("comment")
TOKEN_LINESTATEMENT_BEGIN = intern("linestatement_begin")
TOKEN_LINESTATEMENT_END = intern("linestatement_end")
TOKEN_LINECOMMENT_BEGIN = intern("linecomment_begin")
TOKEN_LINECOMMENT_END = intern("linecomment_end")
TOKEN_LINECOMMENT = intern("linecomment")
TOKEN_DATA = intern("data")
TOKEN_INITIAL = intern("initial")
TOKEN_EOF = intern("eof")
# bind operators to token types
operators = {
"+": TOKEN_ADD,
"-": TOKEN_SUB,
"/": TOKEN_DIV,
"//": TOKEN_FLOORDIV,
"*": TOKEN_MUL,
"%": TOKEN_MOD,
"**": TOKEN_POW,
"~": TOKEN_TILDE,
"[": TOKEN_LBRACKET,
"]": TOKEN_RBRACKET,
"(": TOKEN_LPAREN,
")": TOKEN_RPAREN,
"{": TOKEN_LBRACE,
"}": TOKEN_RBRACE,
"==": TOKEN_EQ,
"!=": TOKEN_NE,
">": TOKEN_GT,
">=": TOKEN_GTEQ,
"<": TOKEN_LT,
"<=": TOKEN_LTEQ,
"=": TOKEN_ASSIGN,
".": TOKEN_DOT,
":": TOKEN_COLON,
"|": TOKEN_PIPE,
",": TOKEN_COMMA,
";": TOKEN_SEMICOLON,
}
reverse_operators = dict([(v, k) for k, v in iteritems(operators)])
assert len(operators) == len(reverse_operators), "operators dropped"
operator_re = re.compile(
"(%s)" % "|".join(re.escape(x) for x in sorted(operators, key=lambda x: -len(x)))
)
ignored_tokens = frozenset(
[
TOKEN_COMMENT_BEGIN,
TOKEN_COMMENT,
TOKEN_COMMENT_END,
TOKEN_WHITESPACE,
TOKEN_LINECOMMENT_BEGIN,
TOKEN_LINECOMMENT_END,
TOKEN_LINECOMMENT,
]
)
ignore_if_empty = frozenset(
[TOKEN_WHITESPACE, TOKEN_DATA, TOKEN_COMMENT, TOKEN_LINECOMMENT]
)
def _describe_token_type(token_type):
if token_type in reverse_operators:
return reverse_operators[token_type]
return {
TOKEN_COMMENT_BEGIN: "begin of comment",
TOKEN_COMMENT_END: "end of comment",
TOKEN_COMMENT: "comment",
TOKEN_LINECOMMENT: "comment",
TOKEN_BLOCK_BEGIN: "begin of statement block",
TOKEN_BLOCK_END: "end of statement block",
TOKEN_VARIABLE_BEGIN: "begin of print statement",
TOKEN_VARIABLE_END: "end of print statement",
TOKEN_LINESTATEMENT_BEGIN: "begin of line statement",
TOKEN_LINESTATEMENT_END: "end of line statement",
TOKEN_DATA: "template data / text",
TOKEN_EOF: "end of template",
}.get(token_type, token_type)
def describe_token(token):
"""Returns a description of the token."""
if token.type == TOKEN_NAME:
return token.value
return _describe_token_type(token.type)
def describe_token_expr(expr):
"""Like `describe_token` but for token expressions."""
if ":" in expr:
type, value = expr.split(":", 1)
if type == TOKEN_NAME:
return value
else:
type = expr
return _describe_token_type(type)
def count_newlines(value):
"""Count the number of newline characters in the string. This is
useful for extensions that filter a stream.
"""
return len(newline_re.findall(value))
def compile_rules(environment):
"""Compiles all the rules from the environment into a list of rules."""
e = re.escape
rules = [
(
len(environment.comment_start_string),
TOKEN_COMMENT_BEGIN,
e(environment.comment_start_string),
),
(
len(environment.block_start_string),
TOKEN_BLOCK_BEGIN,
e(environment.block_start_string),
),
(
len(environment.variable_start_string),
TOKEN_VARIABLE_BEGIN,
e(environment.variable_start_string),
),
]
if environment.line_statement_prefix is not None:
rules.append(
(
len(environment.line_statement_prefix),
TOKEN_LINESTATEMENT_BEGIN,
r"^[ \t\v]*" + e(environment.line_statement_prefix),
)
)
if environment.line_comment_prefix is not None:
rules.append(
(
len(environment.line_comment_prefix),
TOKEN_LINECOMMENT_BEGIN,
r"(?:^|(?<=\S))[^\S\r\n]*" + e(environment.line_comment_prefix),
)
)
return [x[1:] for x in sorted(rules, reverse=True)]
class Failure(object):
"""Class that raises a `TemplateSyntaxError` if called.
Used by the `Lexer` to specify known errors.
"""
def __init__(self, message, cls=TemplateSyntaxError):
self.message = message
self.error_class = cls
def __call__(self, lineno, filename):
raise self.error_class(self.message, lineno, filename)
class Token(tuple):
"""Token class."""
__slots__ = ()
lineno, type, value = (property(itemgetter(x)) for x in range(3))
def __new__(cls, lineno, type, value):
return tuple.__new__(cls, (lineno, intern(str(type)), value))
def __str__(self):
if self.type in reverse_operators:
return reverse_operators[self.type]
elif self.type == "name":
return self.value
return self.type
def test(self, expr):
"""Test a token against a token expression. This can either be a
token type or ``'token_type:token_value'``. This can only test
against string values and types.
"""
# here we do a regular string equality check as test_any is usually
# passed an iterable of not interned strings.
if self.type == expr:
return True
elif ":" in expr:
return expr.split(":", 1) == [self.type, self.value]
return False
def test_any(self, *iterable):
"""Test against multiple token expressions."""
for expr in iterable:
if self.test(expr):
return True
return False
def __repr__(self):
return "Token(%r, %r, %r)" % (self.lineno, self.type, self.value)
@implements_iterator
class TokenStreamIterator(object):
"""The iterator for tokenstreams. Iterate over the stream
until the eof token is reached.
"""
def __init__(self, stream):
self.stream = stream
def __iter__(self):
return self
def __next__(self):
token = self.stream.current
if token.type is TOKEN_EOF:
self.stream.close()
raise StopIteration()
next(self.stream)
return token
@implements_iterator
class TokenStream(object):
"""A token stream is an iterable that yields :class:`Token`\\s. The
parser however does not iterate over it but calls :meth:`next` to go
one token ahead. The current active token is stored as :attr:`current`.
"""
def __init__(self, generator, name, filename):
self._iter = iter(generator)
self._pushed = deque()
self.name = name
self.filename = filename
self.closed = False
self.current = Token(1, TOKEN_INITIAL, "")
next(self)
def __iter__(self):
return TokenStreamIterator(self)
def __bool__(self):
return bool(self._pushed) or self.current.type is not TOKEN_EOF
__nonzero__ = __bool__ # py2
@property
def eos(self):
"""Are we at the end of the stream?"""
return not self
def push(self, token):
"""Push a token back to the stream."""
self._pushed.append(token)
def look(self):
"""Look at the next token."""
old_token = next(self)
result = self.current
self.push(result)
self.current = old_token
return result
def skip(self, n=1):
"""Got n tokens ahead."""
for _ in range(n):
next(self)
def next_if(self, expr):
"""Perform the token test and return the token if it matched.
Otherwise the return value is `None`.
"""
if self.current.test(expr):
return next(self)
def skip_if(self, expr):
"""Like :meth:`next_if` but only returns `True` or `False`."""
return self.next_if(expr) is not None
def __next__(self):
"""Go one token ahead and return the old one.
Use the built-in :func:`next` instead of calling this directly.
"""
rv = self.current
if self._pushed:
self.current = self._pushed.popleft()
elif self.current.type is not TOKEN_EOF:
try:
self.current = next(self._iter)
except StopIteration:
self.close()
return rv
def close(self):
"""Close the stream."""
self.current = Token(self.current.lineno, TOKEN_EOF, "")
self._iter = None
self.closed = True
def expect(self, expr):
"""Expect a given token type and return it. This accepts the same
argument as :meth:`jinja2.lexer.Token.test`.
"""
if not self.current.test(expr):
expr = describe_token_expr(expr)
if self.current.type is TOKEN_EOF:
raise TemplateSyntaxError(
"unexpected end of template, expected %r." % expr,
self.current.lineno,
self.name,
self.filename,
)
raise TemplateSyntaxError(
"expected token %r, got %r" % (expr, describe_token(self.current)),
self.current.lineno,
self.name,
self.filename,
)
try:
return self.current
finally:
next(self)
def get_lexer(environment):
"""Return a lexer which is probably cached."""
key = (
environment.block_start_string,
environment.block_end_string,
environment.variable_start_string,
environment.variable_end_string,
environment.comment_start_string,
environment.comment_end_string,
environment.line_statement_prefix,
environment.line_comment_prefix,
environment.trim_blocks,
environment.lstrip_blocks,
environment.newline_sequence,
environment.keep_trailing_newline,
)
lexer = _lexer_cache.get(key)
if lexer is None:
lexer = Lexer(environment)
_lexer_cache[key] = lexer
return lexer
class OptionalLStrip(tuple):
"""A special tuple for marking a point in the state that can have
lstrip applied.
"""
__slots__ = ()
# Even though it looks like a no-op, creating instances fails
# without this.
def __new__(cls, *members, **kwargs):
return super(OptionalLStrip, cls).__new__(cls, members)
class Lexer(object):
"""Class that implements a lexer for a given environment. Automatically
created by the environment class, usually you don't have to do that.
Note that the lexer is not automatically bound to an environment.
Multiple environments can share the same lexer.
"""
def __init__(self, environment):
# shortcuts
e = re.escape
def c(x):
return re.compile(x, re.M | re.S)
# lexing rules for tags
tag_rules = [
(whitespace_re, TOKEN_WHITESPACE, None),
(float_re, TOKEN_FLOAT, None),
(integer_re, TOKEN_INTEGER, None),
(name_re, TOKEN_NAME, None),
(string_re, TOKEN_STRING, None),
(operator_re, TOKEN_OPERATOR, None),
]
# assemble the root lexing rule. because "|" is ungreedy
# we have to sort by length so that the lexer continues working
# as expected when we have parsing rules like <% for block and
# <%= for variables. (if someone wants asp like syntax)
# variables are just part of the rules if variable processing
# is required.
root_tag_rules = compile_rules(environment)
# block suffix if trimming is enabled
block_suffix_re = environment.trim_blocks and "\\n?" or ""
# If lstrip is enabled, it should not be applied if there is any
# non-whitespace between the newline and block.
self.lstrip_unless_re = c(r"[^ \t]") if environment.lstrip_blocks else None
self.newline_sequence = environment.newline_sequence
self.keep_trailing_newline = environment.keep_trailing_newline
# global lexing rules
self.rules = {
"root": [
# directives
(
c(
"(.*?)(?:%s)"
% "|".join(
[
r"(?P<raw_begin>%s(\-|\+|)\s*raw\s*(?:\-%s\s*|%s))"
% (
e(environment.block_start_string),
e(environment.block_end_string),
e(environment.block_end_string),
)
]
+ [
r"(?P<%s>%s(\-|\+|))" % (n, r)
for n, r in root_tag_rules
]
)
),
OptionalLStrip(TOKEN_DATA, "#bygroup"),
"#bygroup",
),
# data
(c(".+"), TOKEN_DATA, None),
],
# comments
TOKEN_COMMENT_BEGIN: [
(
c(
r"(.*?)((?:\-%s\s*|%s)%s)"
% (
e(environment.comment_end_string),
e(environment.comment_end_string),
block_suffix_re,
)
),
(TOKEN_COMMENT, TOKEN_COMMENT_END),
"#pop",
),
(c("(.)"), (Failure("Missing end of comment tag"),), None),
],
# blocks
TOKEN_BLOCK_BEGIN: [
(
c(
r"(?:\-%s\s*|%s)%s"
% (
e(environment.block_end_string),
e(environment.block_end_string),
block_suffix_re,
)
),
TOKEN_BLOCK_END,
"#pop",
),
]
+ tag_rules,
# variables
TOKEN_VARIABLE_BEGIN: [
(
c(
r"\-%s\s*|%s"
% (
e(environment.variable_end_string),
e(environment.variable_end_string),
)
),
TOKEN_VARIABLE_END,
"#pop",
)
]
+ tag_rules,
# raw block
TOKEN_RAW_BEGIN: [
(
c(
r"(.*?)((?:%s(\-|\+|))\s*endraw\s*(?:\-%s\s*|%s%s))"
% (
e(environment.block_start_string),
e(environment.block_end_string),
e(environment.block_end_string),
block_suffix_re,
)
),
OptionalLStrip(TOKEN_DATA, TOKEN_RAW_END),
"#pop",
),
(c("(.)"), (Failure("Missing end of raw directive"),), None),
],
# line statements
TOKEN_LINESTATEMENT_BEGIN: [
(c(r"\s*(\n|$)"), TOKEN_LINESTATEMENT_END, "#pop")
]
+ tag_rules,
# line comments
TOKEN_LINECOMMENT_BEGIN: [
(
c(r"(.*?)()(?=\n|$)"),
(TOKEN_LINECOMMENT, TOKEN_LINECOMMENT_END),
"#pop",
)
],
}
def _normalize_newlines(self, value):
"""Called for strings and template data to normalize it to unicode."""
return newline_re.sub(self.newline_sequence, value)
def tokenize(self, source, name=None, filename=None, state=None):
"""Calls tokeniter + tokenize and wraps it in a token stream."""
stream = self.tokeniter(source, name, filename, state)
return TokenStream(self.wrap(stream, name, filename), name, filename)
def wrap(self, stream, name=None, filename=None):
"""This is called with the stream as returned by `tokenize` and wraps
every token in a :class:`Token` and converts the value.
"""
for lineno, token, value in stream:
if token in ignored_tokens:
continue
elif token == TOKEN_LINESTATEMENT_BEGIN:
token = TOKEN_BLOCK_BEGIN
elif token == TOKEN_LINESTATEMENT_END:
token = TOKEN_BLOCK_END
# we are not interested in those tokens in the parser
elif token in (TOKEN_RAW_BEGIN, TOKEN_RAW_END):
continue
elif token == TOKEN_DATA:
value = self._normalize_newlines(value)
elif token == "keyword":
token = value
elif token == TOKEN_NAME:
value = str(value)
if check_ident and not value.isidentifier():
raise TemplateSyntaxError(
"Invalid character in identifier", lineno, name, filename
)
elif token == TOKEN_STRING:
# try to unescape string
try:
value = (
self._normalize_newlines(value[1:-1])
.encode("ascii", "backslashreplace")
.decode("unicode-escape")
)
except Exception as e:
msg = str(e).split(":")[-1].strip()
raise TemplateSyntaxError(msg, lineno, name, filename)
elif token == TOKEN_INTEGER:
value = int(value.replace("_", ""))
elif token == TOKEN_FLOAT:
# remove all "_" first to support more Python versions
value = literal_eval(value.replace("_", ""))
elif token == TOKEN_OPERATOR:
token = operators[value]
yield Token(lineno, token, value)
def tokeniter(self, source, name, filename=None, state=None):
"""This method tokenizes the text and returns the tokens in a
generator. Use this method if you just want to tokenize a template.
"""
source = text_type(source)
lines = source.splitlines()
if self.keep_trailing_newline and source:
for newline in ("\r\n", "\r", "\n"):
if source.endswith(newline):
lines.append("")
break
source = "\n".join(lines)
pos = 0
lineno = 1
stack = ["root"]
if state is not None and state != "root":
assert state in ("variable", "block"), "invalid state"
stack.append(state + "_begin")
statetokens = self.rules[stack[-1]]
source_length = len(source)
balancing_stack = []
lstrip_unless_re = self.lstrip_unless_re
while 1:
# tokenizer loop
for regex, tokens, new_state in statetokens:
m = regex.match(source, pos)
# if no match we try again with the next rule
if m is None:
continue
# we only match blocks and variables if braces / parentheses
# are balanced. continue parsing with the lower rule which
# is the operator rule. do this only if the end tags look
# like operators
if balancing_stack and tokens in (
TOKEN_VARIABLE_END,
TOKEN_BLOCK_END,
TOKEN_LINESTATEMENT_END,
):
continue
# tuples support more options
if isinstance(tokens, tuple):
groups = m.groups()
if isinstance(tokens, OptionalLStrip):
# Rule supports lstrip. Match will look like
# text, block type, whitespace control, type, control, ...
text = groups[0]
# Skipping the text and first type, every other group is the
# whitespace control for each type. One of the groups will be
# -, +, or empty string instead of None.
strip_sign = next(g for g in groups[2::2] if g is not None)
if strip_sign == "-":
# Strip all whitespace between the text and the tag.
groups = (text.rstrip(),) + groups[1:]
elif (
# Not marked for preserving whitespace.
strip_sign != "+"
# lstrip is enabled.
and lstrip_unless_re is not None
# Not a variable expression.
and not m.groupdict().get(TOKEN_VARIABLE_BEGIN)
):
# The start of text between the last newline and the tag.
l_pos = text.rfind("\n") + 1
# If there's only whitespace between the newline and the
# tag, strip it.
if not lstrip_unless_re.search(text, l_pos):
groups = (text[:l_pos],) + groups[1:]
for idx, token in enumerate(tokens):
# failure group
if token.__class__ is Failure:
raise token(lineno, filename)
# bygroup is a bit more complex, in that case we
# yield for the current token the first named
# group that matched
elif token == "#bygroup":
for key, value in iteritems(m.groupdict()):
if value is not None:
yield lineno, key, value
lineno += value.count("\n")
break
else:
raise RuntimeError(
"%r wanted to resolve "
"the token dynamically"
" but no group matched" % regex
)
# normal group
else:
data = groups[idx]
if data or token not in ignore_if_empty:
yield lineno, token, data
lineno += data.count("\n")
# strings as token just are yielded as it.
else:
data = m.group()
# update brace/parentheses balance
if tokens == TOKEN_OPERATOR:
if data == "{":
balancing_stack.append("}")
elif data == "(":
balancing_stack.append(")")
elif data == "[":
balancing_stack.append("]")
elif data in ("}", ")", "]"):
if not balancing_stack:
raise TemplateSyntaxError(
"unexpected '%s'" % data, lineno, name, filename
)
expected_op = balancing_stack.pop()
if expected_op != data:
raise TemplateSyntaxError(
"unexpected '%s', "
"expected '%s'" % (data, expected_op),
lineno,
name,
filename,
)
# yield items
if data or tokens not in ignore_if_empty:
yield lineno, tokens, data
lineno += data.count("\n")
# fetch new position into new variable so that we can check
# if there is a internal parsing error which would result
# in an infinite loop
pos2 = m.end()
# handle state changes
if new_state is not None:
# remove the uppermost state
if new_state == "#pop":
stack.pop()
# resolve the new state by group checking
elif new_state == "#bygroup":
for key, value in iteritems(m.groupdict()):
if value is not None:
stack.append(key)
break
else:
raise RuntimeError(
"%r wanted to resolve the "
"new state dynamically but"
" no group matched" % regex
)
# direct state name given
else:
stack.append(new_state)
statetokens = self.rules[stack[-1]]
# we are still at the same position and no stack change.
# this means a loop without break condition, avoid that and
# raise error
elif pos2 == pos:
raise RuntimeError(
"%r yielded empty string without stack change" % regex
)
# publish new function and start again
pos = pos2
break
# if loop terminated without break we haven't found a single match
# either we are at the end of the file or we have a problem
else:
# end of text
if pos >= source_length:
return
# something went wrong
raise TemplateSyntaxError(
"unexpected char %r at %d" % (source[pos], pos),
lineno,
name,
filename,
)

572
third_party/python/Jinja2/jinja2/loaders.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,572 @@
# -*- coding: utf-8 -*-
"""API and implementations for loading templates from different data
sources.
"""
import os
import pkgutil
import sys
import weakref
from hashlib import sha1
from importlib import import_module
from os import path
from types import ModuleType
from ._compat import abc
from ._compat import fspath
from ._compat import iteritems
from ._compat import string_types
from .exceptions import TemplateNotFound
from .utils import internalcode
from .utils import open_if_exists
def split_template_path(template):
"""Split a path into segments and perform a sanity check. If it detects
'..' in the path it will raise a `TemplateNotFound` error.
"""
pieces = []
for piece in template.split("/"):
if (
path.sep in piece
or (path.altsep and path.altsep in piece)
or piece == path.pardir
):
raise TemplateNotFound(template)
elif piece and piece != ".":
pieces.append(piece)
return pieces
class BaseLoader(object):
"""Baseclass for all loaders. Subclass this and override `get_source` to
implement a custom loading mechanism. The environment provides a
`get_template` method that calls the loader's `load` method to get the
:class:`Template` object.
A very basic example for a loader that looks up templates on the file
system could look like this::
from jinja2 import BaseLoader, TemplateNotFound
from os.path import join, exists, getmtime
class MyLoader(BaseLoader):
def __init__(self, path):
self.path = path
def get_source(self, environment, template):
path = join(self.path, template)
if not exists(path):
raise TemplateNotFound(template)
mtime = getmtime(path)
with file(path) as f:
source = f.read().decode('utf-8')
return source, path, lambda: mtime == getmtime(path)
"""
#: if set to `False` it indicates that the loader cannot provide access
#: to the source of templates.
#:
#: .. versionadded:: 2.4
has_source_access = True
def get_source(self, environment, template):
"""Get the template source, filename and reload helper for a template.
It's passed the environment and template name and has to return a
tuple in the form ``(source, filename, uptodate)`` or raise a
`TemplateNotFound` error if it can't locate the template.
The source part of the returned tuple must be the source of the
template as unicode string or a ASCII bytestring. The filename should
be the name of the file on the filesystem if it was loaded from there,
otherwise `None`. The filename is used by python for the tracebacks
if no loader extension is used.
The last item in the tuple is the `uptodate` function. If auto
reloading is enabled it's always called to check if the template
changed. No arguments are passed so the function must store the
old state somewhere (for example in a closure). If it returns `False`
the template will be reloaded.
"""
if not self.has_source_access:
raise RuntimeError(
"%s cannot provide access to the source" % self.__class__.__name__
)
raise TemplateNotFound(template)
def list_templates(self):
"""Iterates over all templates. If the loader does not support that
it should raise a :exc:`TypeError` which is the default behavior.
"""
raise TypeError("this loader cannot iterate over all templates")
@internalcode
def load(self, environment, name, globals=None):
"""Loads a template. This method looks up the template in the cache
or loads one by calling :meth:`get_source`. Subclasses should not
override this method as loaders working on collections of other
loaders (such as :class:`PrefixLoader` or :class:`ChoiceLoader`)
will not call this method but `get_source` directly.
"""
code = None
if globals is None:
globals = {}
# first we try to get the source for this template together
# with the filename and the uptodate function.
source, filename, uptodate = self.get_source(environment, name)
# try to load the code from the bytecode cache if there is a
# bytecode cache configured.
bcc = environment.bytecode_cache
if bcc is not None:
bucket = bcc.get_bucket(environment, name, filename, source)
code = bucket.code
# if we don't have code so far (not cached, no longer up to
# date) etc. we compile the template
if code is None:
code = environment.compile(source, name, filename)
# if the bytecode cache is available and the bucket doesn't
# have a code so far, we give the bucket the new code and put
# it back to the bytecode cache.
if bcc is not None and bucket.code is None:
bucket.code = code
bcc.set_bucket(bucket)
return environment.template_class.from_code(
environment, code, globals, uptodate
)
class FileSystemLoader(BaseLoader):
"""Loads templates from the file system. This loader can find templates
in folders on the file system and is the preferred way to load them.
The loader takes the path to the templates as string, or if multiple
locations are wanted a list of them which is then looked up in the
given order::
>>> loader = FileSystemLoader('/path/to/templates')
>>> loader = FileSystemLoader(['/path/to/templates', '/other/path'])
Per default the template encoding is ``'utf-8'`` which can be changed
by setting the `encoding` parameter to something else.
To follow symbolic links, set the *followlinks* parameter to ``True``::
>>> loader = FileSystemLoader('/path/to/templates', followlinks=True)
.. versionchanged:: 2.8
The ``followlinks`` parameter was added.
"""
def __init__(self, searchpath, encoding="utf-8", followlinks=False):
if not isinstance(searchpath, abc.Iterable) or isinstance(
searchpath, string_types
):
searchpath = [searchpath]
# In Python 3.5, os.path.join doesn't support Path. This can be
# simplified to list(searchpath) when Python 3.5 is dropped.
self.searchpath = [fspath(p) for p in searchpath]
self.encoding = encoding
self.followlinks = followlinks
def get_source(self, environment, template):
pieces = split_template_path(template)
for searchpath in self.searchpath:
filename = path.join(searchpath, *pieces)
f = open_if_exists(filename)
if f is None:
continue
try:
contents = f.read().decode(self.encoding)
finally:
f.close()
mtime = path.getmtime(filename)
def uptodate():
try:
return path.getmtime(filename) == mtime
except OSError:
return False
return contents, filename, uptodate
raise TemplateNotFound(template)
def list_templates(self):
found = set()
for searchpath in self.searchpath:
walk_dir = os.walk(searchpath, followlinks=self.followlinks)
for dirpath, _, filenames in walk_dir:
for filename in filenames:
template = (
os.path.join(dirpath, filename)[len(searchpath) :]
.strip(os.path.sep)
.replace(os.path.sep, "/")
)
if template[:2] == "./":
template = template[2:]
if template not in found:
found.add(template)
return sorted(found)
class PackageLoader(BaseLoader):
"""Load templates from a directory in a Python package.
:param package_name: Import name of the package that contains the
template directory.
:param package_path: Directory within the imported package that
contains the templates.
:param encoding: Encoding of template files.
The following example looks up templates in the ``pages`` directory
within the ``project.ui`` package.
.. code-block:: python
loader = PackageLoader("project.ui", "pages")
Only packages installed as directories (standard pip behavior) or
zip/egg files (less common) are supported. The Python API for
introspecting data in packages is too limited to support other
installation methods the way this loader requires.
There is limited support for :pep:`420` namespace packages. The
template directory is assumed to only be in one namespace
contributor. Zip files contributing to a namespace are not
supported.
.. versionchanged:: 2.11.0
No longer uses ``setuptools`` as a dependency.
.. versionchanged:: 2.11.0
Limited PEP 420 namespace package support.
"""
def __init__(self, package_name, package_path="templates", encoding="utf-8"):
if package_path == os.path.curdir:
package_path = ""
elif package_path[:2] == os.path.curdir + os.path.sep:
package_path = package_path[2:]
package_path = os.path.normpath(package_path).rstrip(os.path.sep)
self.package_path = package_path
self.package_name = package_name
self.encoding = encoding
# Make sure the package exists. This also makes namespace
# packages work, otherwise get_loader returns None.
import_module(package_name)
self._loader = loader = pkgutil.get_loader(package_name)
# Zip loader's archive attribute points at the zip.
self._archive = getattr(loader, "archive", None)
self._template_root = None
if hasattr(loader, "get_filename"):
# A standard directory package, or a zip package.
self._template_root = os.path.join(
os.path.dirname(loader.get_filename(package_name)), package_path
)
elif hasattr(loader, "_path"):
# A namespace package, limited support. Find the first
# contributor with the template directory.
for root in loader._path:
root = os.path.join(root, package_path)
if os.path.isdir(root):
self._template_root = root
break
if self._template_root is None:
raise ValueError(
"The %r package was not installed in a way that"
" PackageLoader understands." % package_name
)
def get_source(self, environment, template):
p = os.path.join(self._template_root, *split_template_path(template))
if self._archive is None:
# Package is a directory.
if not os.path.isfile(p):
raise TemplateNotFound(template)
with open(p, "rb") as f:
source = f.read()
mtime = os.path.getmtime(p)
def up_to_date():
return os.path.isfile(p) and os.path.getmtime(p) == mtime
else:
# Package is a zip file.
try:
source = self._loader.get_data(p)
except OSError:
raise TemplateNotFound(template)
# Could use the zip's mtime for all template mtimes, but
# would need to safely reload the module if it's out of
# date, so just report it as always current.
up_to_date = None
return source.decode(self.encoding), p, up_to_date
def list_templates(self):
results = []
if self._archive is None:
# Package is a directory.
offset = len(self._template_root)
for dirpath, _, filenames in os.walk(self._template_root):
dirpath = dirpath[offset:].lstrip(os.path.sep)
results.extend(
os.path.join(dirpath, name).replace(os.path.sep, "/")
for name in filenames
)
else:
if not hasattr(self._loader, "_files"):
raise TypeError(
"This zip import does not have the required"
" metadata to list templates."
)
# Package is a zip file.
prefix = (
self._template_root[len(self._archive) :].lstrip(os.path.sep)
+ os.path.sep
)
offset = len(prefix)
for name in self._loader._files.keys():
# Find names under the templates directory that aren't directories.
if name.startswith(prefix) and name[-1] != os.path.sep:
results.append(name[offset:].replace(os.path.sep, "/"))
results.sort()
return results
class DictLoader(BaseLoader):
"""Loads a template from a python dict. It's passed a dict of unicode
strings bound to template names. This loader is useful for unittesting:
>>> loader = DictLoader({'index.html': 'source here'})
Because auto reloading is rarely useful this is disabled per default.
"""
def __init__(self, mapping):
self.mapping = mapping
def get_source(self, environment, template):
if template in self.mapping:
source = self.mapping[template]
return source, None, lambda: source == self.mapping.get(template)
raise TemplateNotFound(template)
def list_templates(self):
return sorted(self.mapping)
class FunctionLoader(BaseLoader):
"""A loader that is passed a function which does the loading. The
function receives the name of the template and has to return either
an unicode string with the template source, a tuple in the form ``(source,
filename, uptodatefunc)`` or `None` if the template does not exist.
>>> def load_template(name):
... if name == 'index.html':
... return '...'
...
>>> loader = FunctionLoader(load_template)
The `uptodatefunc` is a function that is called if autoreload is enabled
and has to return `True` if the template is still up to date. For more
details have a look at :meth:`BaseLoader.get_source` which has the same
return value.
"""
def __init__(self, load_func):
self.load_func = load_func
def get_source(self, environment, template):
rv = self.load_func(template)
if rv is None:
raise TemplateNotFound(template)
elif isinstance(rv, string_types):
return rv, None, None
return rv
class PrefixLoader(BaseLoader):
"""A loader that is passed a dict of loaders where each loader is bound
to a prefix. The prefix is delimited from the template by a slash per
default, which can be changed by setting the `delimiter` argument to
something else::
loader = PrefixLoader({
'app1': PackageLoader('mypackage.app1'),
'app2': PackageLoader('mypackage.app2')
})
By loading ``'app1/index.html'`` the file from the app1 package is loaded,
by loading ``'app2/index.html'`` the file from the second.
"""
def __init__(self, mapping, delimiter="/"):
self.mapping = mapping
self.delimiter = delimiter
def get_loader(self, template):
try:
prefix, name = template.split(self.delimiter, 1)
loader = self.mapping[prefix]
except (ValueError, KeyError):
raise TemplateNotFound(template)
return loader, name
def get_source(self, environment, template):
loader, name = self.get_loader(template)
try:
return loader.get_source(environment, name)
except TemplateNotFound:
# re-raise the exception with the correct filename here.
# (the one that includes the prefix)
raise TemplateNotFound(template)
@internalcode
def load(self, environment, name, globals=None):
loader, local_name = self.get_loader(name)
try:
return loader.load(environment, local_name, globals)
except TemplateNotFound:
# re-raise the exception with the correct filename here.
# (the one that includes the prefix)
raise TemplateNotFound(name)
def list_templates(self):
result = []
for prefix, loader in iteritems(self.mapping):
for template in loader.list_templates():
result.append(prefix + self.delimiter + template)
return result
class ChoiceLoader(BaseLoader):
"""This loader works like the `PrefixLoader` just that no prefix is
specified. If a template could not be found by one loader the next one
is tried.
>>> loader = ChoiceLoader([
... FileSystemLoader('/path/to/user/templates'),
... FileSystemLoader('/path/to/system/templates')
... ])
This is useful if you want to allow users to override builtin templates
from a different location.
"""
def __init__(self, loaders):
self.loaders = loaders
def get_source(self, environment, template):
for loader in self.loaders:
try:
return loader.get_source(environment, template)
except TemplateNotFound:
pass
raise TemplateNotFound(template)
@internalcode
def load(self, environment, name, globals=None):
for loader in self.loaders:
try:
return loader.load(environment, name, globals)
except TemplateNotFound:
pass
raise TemplateNotFound(name)
def list_templates(self):
found = set()
for loader in self.loaders:
found.update(loader.list_templates())
return sorted(found)
class _TemplateModule(ModuleType):
"""Like a normal module but with support for weak references"""
class ModuleLoader(BaseLoader):
"""This loader loads templates from precompiled templates.
Example usage:
>>> loader = ChoiceLoader([
... ModuleLoader('/path/to/compiled/templates'),
... FileSystemLoader('/path/to/templates')
... ])
Templates can be precompiled with :meth:`Environment.compile_templates`.
"""
has_source_access = False
def __init__(self, path):
package_name = "_jinja2_module_templates_%x" % id(self)
# create a fake module that looks for the templates in the
# path given.
mod = _TemplateModule(package_name)
if not isinstance(path, abc.Iterable) or isinstance(path, string_types):
path = [path]
mod.__path__ = [fspath(p) for p in path]
sys.modules[package_name] = weakref.proxy(
mod, lambda x: sys.modules.pop(package_name, None)
)
# the only strong reference, the sys.modules entry is weak
# so that the garbage collector can remove it once the
# loader that created it goes out of business.
self.module = mod
self.package_name = package_name
@staticmethod
def get_template_key(name):
return "tmpl_" + sha1(name.encode("utf-8")).hexdigest()
@staticmethod
def get_module_filename(name):
return ModuleLoader.get_template_key(name) + ".py"
@internalcode
def load(self, environment, name, globals=None):
key = self.get_template_key(name)
module = "%s.%s" % (self.package_name, key)
mod = getattr(self.module, module, None)
if mod is None:
try:
mod = __import__(module, None, None, ["root"])
except ImportError:
raise TemplateNotFound(name)
# remove the entry from sys.modules, we only want the attribute
# on the module object we have stored on the loader.
sys.modules.pop(module, None)
return environment.template_class.from_module_dict(
environment, mod.__dict__, globals
)

101
third_party/python/Jinja2/jinja2/meta.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,101 @@
# -*- coding: utf-8 -*-
"""Functions that expose information about templates that might be
interesting for introspection.
"""
from . import nodes
from ._compat import iteritems
from ._compat import string_types
from .compiler import CodeGenerator
class TrackingCodeGenerator(CodeGenerator):
"""We abuse the code generator for introspection."""
def __init__(self, environment):
CodeGenerator.__init__(self, environment, "<introspection>", "<introspection>")
self.undeclared_identifiers = set()
def write(self, x):
"""Don't write."""
def enter_frame(self, frame):
"""Remember all undeclared identifiers."""
CodeGenerator.enter_frame(self, frame)
for _, (action, param) in iteritems(frame.symbols.loads):
if action == "resolve" and param not in self.environment.globals:
self.undeclared_identifiers.add(param)
def find_undeclared_variables(ast):
"""Returns a set of all variables in the AST that will be looked up from
the context at runtime. Because at compile time it's not known which
variables will be used depending on the path the execution takes at
runtime, all variables are returned.
>>> from jinja2 import Environment, meta
>>> env = Environment()
>>> ast = env.parse('{% set foo = 42 %}{{ bar + foo }}')
>>> meta.find_undeclared_variables(ast) == set(['bar'])
True
.. admonition:: Implementation
Internally the code generator is used for finding undeclared variables.
This is good to know because the code generator might raise a
:exc:`TemplateAssertionError` during compilation and as a matter of
fact this function can currently raise that exception as well.
"""
codegen = TrackingCodeGenerator(ast.environment)
codegen.visit(ast)
return codegen.undeclared_identifiers
def find_referenced_templates(ast):
"""Finds all the referenced templates from the AST. This will return an
iterator over all the hardcoded template extensions, inclusions and
imports. If dynamic inheritance or inclusion is used, `None` will be
yielded.
>>> from jinja2 import Environment, meta
>>> env = Environment()
>>> ast = env.parse('{% extends "layout.html" %}{% include helper %}')
>>> list(meta.find_referenced_templates(ast))
['layout.html', None]
This function is useful for dependency tracking. For example if you want
to rebuild parts of the website after a layout template has changed.
"""
for node in ast.find_all(
(nodes.Extends, nodes.FromImport, nodes.Import, nodes.Include)
):
if not isinstance(node.template, nodes.Const):
# a tuple with some non consts in there
if isinstance(node.template, (nodes.Tuple, nodes.List)):
for template_name in node.template.items:
# something const, only yield the strings and ignore
# non-string consts that really just make no sense
if isinstance(template_name, nodes.Const):
if isinstance(template_name.value, string_types):
yield template_name.value
# something dynamic in there
else:
yield None
# something dynamic we don't know about here
else:
yield None
continue
# constant is a basestring, direct template name
if isinstance(node.template.value, string_types):
yield node.template.value
# a tuple or list (latter *should* not happen) made of consts,
# yield the consts that are strings. We could warn here for
# non string values
elif isinstance(node, nodes.Include) and isinstance(
node.template.value, (tuple, list)
):
for template_name in node.template.value:
if isinstance(template_name, string_types):
yield template_name
# something else we don't care about, we could warn here
else:
yield None

111
third_party/python/Jinja2/jinja2/nativetypes.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,111 @@
import types
from ast import literal_eval
from itertools import chain
from itertools import islice
from . import nodes
from ._compat import text_type
from .compiler import CodeGenerator
from .compiler import has_safe_repr
from .environment import Environment
from .environment import Template
def native_concat(nodes, preserve_quotes=True):
"""Return a native Python type from the list of compiled nodes. If
the result is a single node, its value is returned. Otherwise, the
nodes are concatenated as strings. If the result can be parsed with
:func:`ast.literal_eval`, the parsed value is returned. Otherwise,
the string is returned.
:param nodes: Iterable of nodes to concatenate.
:param preserve_quotes: Whether to re-wrap literal strings with
quotes, to preserve quotes around expressions for later parsing.
Should be ``False`` in :meth:`NativeEnvironment.render`.
"""
head = list(islice(nodes, 2))
if not head:
return None
if len(head) == 1:
raw = head[0]
else:
if isinstance(nodes, types.GeneratorType):
nodes = chain(head, nodes)
raw = u"".join([text_type(v) for v in nodes])
try:
literal = literal_eval(raw)
except (ValueError, SyntaxError, MemoryError):
return raw
# If literal_eval returned a string, re-wrap with the original
# quote character to avoid dropping quotes between expression nodes.
# Without this, "'{{ a }}', '{{ b }}'" results in "a, b", but should
# be ('a', 'b').
if preserve_quotes and isinstance(literal, str):
return "{quote}{}{quote}".format(literal, quote=raw[0])
return literal
class NativeCodeGenerator(CodeGenerator):
"""A code generator which renders Python types by not adding
``to_string()`` around output nodes, and using :func:`native_concat`
to convert complex strings back to Python types if possible.
"""
@staticmethod
def _default_finalize(value):
return value
def _output_const_repr(self, group):
return repr(native_concat(group))
def _output_child_to_const(self, node, frame, finalize):
const = node.as_const(frame.eval_ctx)
if not has_safe_repr(const):
raise nodes.Impossible()
if isinstance(node, nodes.TemplateData):
return const
return finalize.const(const)
def _output_child_pre(self, node, frame, finalize):
if finalize.src is not None:
self.write(finalize.src)
def _output_child_post(self, node, frame, finalize):
if finalize.src is not None:
self.write(")")
class NativeEnvironment(Environment):
"""An environment that renders templates to native Python types."""
code_generator_class = NativeCodeGenerator
class NativeTemplate(Template):
environment_class = NativeEnvironment
def render(self, *args, **kwargs):
"""Render the template to produce a native Python type. If the
result is a single node, its value is returned. Otherwise, the
nodes are concatenated as strings. If the result can be parsed
with :func:`ast.literal_eval`, the parsed value is returned.
Otherwise, the string is returned.
"""
vars = dict(*args, **kwargs)
try:
return native_concat(
self.root_render_func(self.new_context(vars)), preserve_quotes=False
)
except Exception:
return self.environment.handle_exception()
NativeEnvironment.template_class = NativeTemplate

1088
third_party/python/Jinja2/jinja2/nodes.py поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

41
third_party/python/Jinja2/jinja2/optimizer.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,41 @@
# -*- coding: utf-8 -*-
"""The optimizer tries to constant fold expressions and modify the AST
in place so that it should be faster to evaluate.
Because the AST does not contain all the scoping information and the
compiler has to find that out, we cannot do all the optimizations we
want. For example, loop unrolling doesn't work because unrolled loops
would have a different scope. The solution would be a second syntax tree
that stored the scoping rules.
"""
from . import nodes
from .visitor import NodeTransformer
def optimize(node, environment):
"""The context hint can be used to perform an static optimization
based on the context given."""
optimizer = Optimizer(environment)
return optimizer.visit(node)
class Optimizer(NodeTransformer):
def __init__(self, environment):
self.environment = environment
def generic_visit(self, node, *args, **kwargs):
node = super(Optimizer, self).generic_visit(node, *args, **kwargs)
# Do constant folding. Some other nodes besides Expr have
# as_const, but folding them causes errors later on.
if isinstance(node, nodes.Expr):
try:
return nodes.Const.from_untrusted(
node.as_const(args[0] if args else None),
lineno=node.lineno,
environment=self.environment,
)
except nodes.Impossible:
pass
return node

939
third_party/python/Jinja2/jinja2/parser.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,939 @@
# -*- coding: utf-8 -*-
"""Parse tokens from the lexer into nodes for the compiler."""
from . import nodes
from ._compat import imap
from .exceptions import TemplateAssertionError
from .exceptions import TemplateSyntaxError
from .lexer import describe_token
from .lexer import describe_token_expr
_statement_keywords = frozenset(
[
"for",
"if",
"block",
"extends",
"print",
"macro",
"include",
"from",
"import",
"set",
"with",
"autoescape",
]
)
_compare_operators = frozenset(["eq", "ne", "lt", "lteq", "gt", "gteq"])
_math_nodes = {
"add": nodes.Add,
"sub": nodes.Sub,
"mul": nodes.Mul,
"div": nodes.Div,
"floordiv": nodes.FloorDiv,
"mod": nodes.Mod,
}
class Parser(object):
"""This is the central parsing class Jinja uses. It's passed to
extensions and can be used to parse expressions or statements.
"""
def __init__(self, environment, source, name=None, filename=None, state=None):
self.environment = environment
self.stream = environment._tokenize(source, name, filename, state)
self.name = name
self.filename = filename
self.closed = False
self.extensions = {}
for extension in environment.iter_extensions():
for tag in extension.tags:
self.extensions[tag] = extension.parse
self._last_identifier = 0
self._tag_stack = []
self._end_token_stack = []
def fail(self, msg, lineno=None, exc=TemplateSyntaxError):
"""Convenience method that raises `exc` with the message, passed
line number or last line number as well as the current name and
filename.
"""
if lineno is None:
lineno = self.stream.current.lineno
raise exc(msg, lineno, self.name, self.filename)
def _fail_ut_eof(self, name, end_token_stack, lineno):
expected = []
for exprs in end_token_stack:
expected.extend(imap(describe_token_expr, exprs))
if end_token_stack:
currently_looking = " or ".join(
"'%s'" % describe_token_expr(expr) for expr in end_token_stack[-1]
)
else:
currently_looking = None
if name is None:
message = ["Unexpected end of template."]
else:
message = ["Encountered unknown tag '%s'." % name]
if currently_looking:
if name is not None and name in expected:
message.append(
"You probably made a nesting mistake. Jinja "
"is expecting this tag, but currently looking "
"for %s." % currently_looking
)
else:
message.append(
"Jinja was looking for the following tags: "
"%s." % currently_looking
)
if self._tag_stack:
message.append(
"The innermost block that needs to be "
"closed is '%s'." % self._tag_stack[-1]
)
self.fail(" ".join(message), lineno)
def fail_unknown_tag(self, name, lineno=None):
"""Called if the parser encounters an unknown tag. Tries to fail
with a human readable error message that could help to identify
the problem.
"""
return self._fail_ut_eof(name, self._end_token_stack, lineno)
def fail_eof(self, end_tokens=None, lineno=None):
"""Like fail_unknown_tag but for end of template situations."""
stack = list(self._end_token_stack)
if end_tokens is not None:
stack.append(end_tokens)
return self._fail_ut_eof(None, stack, lineno)
def is_tuple_end(self, extra_end_rules=None):
"""Are we at the end of a tuple?"""
if self.stream.current.type in ("variable_end", "block_end", "rparen"):
return True
elif extra_end_rules is not None:
return self.stream.current.test_any(extra_end_rules)
return False
def free_identifier(self, lineno=None):
"""Return a new free identifier as :class:`~jinja2.nodes.InternalName`."""
self._last_identifier += 1
rv = object.__new__(nodes.InternalName)
nodes.Node.__init__(rv, "fi%d" % self._last_identifier, lineno=lineno)
return rv
def parse_statement(self):
"""Parse a single statement."""
token = self.stream.current
if token.type != "name":
self.fail("tag name expected", token.lineno)
self._tag_stack.append(token.value)
pop_tag = True
try:
if token.value in _statement_keywords:
return getattr(self, "parse_" + self.stream.current.value)()
if token.value == "call":
return self.parse_call_block()
if token.value == "filter":
return self.parse_filter_block()
ext = self.extensions.get(token.value)
if ext is not None:
return ext(self)
# did not work out, remove the token we pushed by accident
# from the stack so that the unknown tag fail function can
# produce a proper error message.
self._tag_stack.pop()
pop_tag = False
self.fail_unknown_tag(token.value, token.lineno)
finally:
if pop_tag:
self._tag_stack.pop()
def parse_statements(self, end_tokens, drop_needle=False):
"""Parse multiple statements into a list until one of the end tokens
is reached. This is used to parse the body of statements as it also
parses template data if appropriate. The parser checks first if the
current token is a colon and skips it if there is one. Then it checks
for the block end and parses until if one of the `end_tokens` is
reached. Per default the active token in the stream at the end of
the call is the matched end token. If this is not wanted `drop_needle`
can be set to `True` and the end token is removed.
"""
# the first token may be a colon for python compatibility
self.stream.skip_if("colon")
# in the future it would be possible to add whole code sections
# by adding some sort of end of statement token and parsing those here.
self.stream.expect("block_end")
result = self.subparse(end_tokens)
# we reached the end of the template too early, the subparser
# does not check for this, so we do that now
if self.stream.current.type == "eof":
self.fail_eof(end_tokens)
if drop_needle:
next(self.stream)
return result
def parse_set(self):
"""Parse an assign statement."""
lineno = next(self.stream).lineno
target = self.parse_assign_target(with_namespace=True)
if self.stream.skip_if("assign"):
expr = self.parse_tuple()
return nodes.Assign(target, expr, lineno=lineno)
filter_node = self.parse_filter(None)
body = self.parse_statements(("name:endset",), drop_needle=True)
return nodes.AssignBlock(target, filter_node, body, lineno=lineno)
def parse_for(self):
"""Parse a for loop."""
lineno = self.stream.expect("name:for").lineno
target = self.parse_assign_target(extra_end_rules=("name:in",))
self.stream.expect("name:in")
iter = self.parse_tuple(
with_condexpr=False, extra_end_rules=("name:recursive",)
)
test = None
if self.stream.skip_if("name:if"):
test = self.parse_expression()
recursive = self.stream.skip_if("name:recursive")
body = self.parse_statements(("name:endfor", "name:else"))
if next(self.stream).value == "endfor":
else_ = []
else:
else_ = self.parse_statements(("name:endfor",), drop_needle=True)
return nodes.For(target, iter, body, else_, test, recursive, lineno=lineno)
def parse_if(self):
"""Parse an if construct."""
node = result = nodes.If(lineno=self.stream.expect("name:if").lineno)
while 1:
node.test = self.parse_tuple(with_condexpr=False)
node.body = self.parse_statements(("name:elif", "name:else", "name:endif"))
node.elif_ = []
node.else_ = []
token = next(self.stream)
if token.test("name:elif"):
node = nodes.If(lineno=self.stream.current.lineno)
result.elif_.append(node)
continue
elif token.test("name:else"):
result.else_ = self.parse_statements(("name:endif",), drop_needle=True)
break
return result
def parse_with(self):
node = nodes.With(lineno=next(self.stream).lineno)
targets = []
values = []
while self.stream.current.type != "block_end":
if targets:
self.stream.expect("comma")
target = self.parse_assign_target()
target.set_ctx("param")
targets.append(target)
self.stream.expect("assign")
values.append(self.parse_expression())
node.targets = targets
node.values = values
node.body = self.parse_statements(("name:endwith",), drop_needle=True)
return node
def parse_autoescape(self):
node = nodes.ScopedEvalContextModifier(lineno=next(self.stream).lineno)
node.options = [nodes.Keyword("autoescape", self.parse_expression())]
node.body = self.parse_statements(("name:endautoescape",), drop_needle=True)
return nodes.Scope([node])
def parse_block(self):
node = nodes.Block(lineno=next(self.stream).lineno)
node.name = self.stream.expect("name").value
node.scoped = self.stream.skip_if("name:scoped")
# common problem people encounter when switching from django
# to jinja. we do not support hyphens in block names, so let's
# raise a nicer error message in that case.
if self.stream.current.type == "sub":
self.fail(
"Block names in Jinja have to be valid Python "
"identifiers and may not contain hyphens, use an "
"underscore instead."
)
node.body = self.parse_statements(("name:endblock",), drop_needle=True)
self.stream.skip_if("name:" + node.name)
return node
def parse_extends(self):
node = nodes.Extends(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
return node
def parse_import_context(self, node, default):
if self.stream.current.test_any(
"name:with", "name:without"
) and self.stream.look().test("name:context"):
node.with_context = next(self.stream).value == "with"
self.stream.skip()
else:
node.with_context = default
return node
def parse_include(self):
node = nodes.Include(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
if self.stream.current.test("name:ignore") and self.stream.look().test(
"name:missing"
):
node.ignore_missing = True
self.stream.skip(2)
else:
node.ignore_missing = False
return self.parse_import_context(node, True)
def parse_import(self):
node = nodes.Import(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
self.stream.expect("name:as")
node.target = self.parse_assign_target(name_only=True).name
return self.parse_import_context(node, False)
def parse_from(self):
node = nodes.FromImport(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
self.stream.expect("name:import")
node.names = []
def parse_context():
if self.stream.current.value in (
"with",
"without",
) and self.stream.look().test("name:context"):
node.with_context = next(self.stream).value == "with"
self.stream.skip()
return True
return False
while 1:
if node.names:
self.stream.expect("comma")
if self.stream.current.type == "name":
if parse_context():
break
target = self.parse_assign_target(name_only=True)
if target.name.startswith("_"):
self.fail(
"names starting with an underline can not be imported",
target.lineno,
exc=TemplateAssertionError,
)
if self.stream.skip_if("name:as"):
alias = self.parse_assign_target(name_only=True)
node.names.append((target.name, alias.name))
else:
node.names.append(target.name)
if parse_context() or self.stream.current.type != "comma":
break
else:
self.stream.expect("name")
if not hasattr(node, "with_context"):
node.with_context = False
return node
def parse_signature(self, node):
node.args = args = []
node.defaults = defaults = []
self.stream.expect("lparen")
while self.stream.current.type != "rparen":
if args:
self.stream.expect("comma")
arg = self.parse_assign_target(name_only=True)
arg.set_ctx("param")
if self.stream.skip_if("assign"):
defaults.append(self.parse_expression())
elif defaults:
self.fail("non-default argument follows default argument")
args.append(arg)
self.stream.expect("rparen")
def parse_call_block(self):
node = nodes.CallBlock(lineno=next(self.stream).lineno)
if self.stream.current.type == "lparen":
self.parse_signature(node)
else:
node.args = []
node.defaults = []
node.call = self.parse_expression()
if not isinstance(node.call, nodes.Call):
self.fail("expected call", node.lineno)
node.body = self.parse_statements(("name:endcall",), drop_needle=True)
return node
def parse_filter_block(self):
node = nodes.FilterBlock(lineno=next(self.stream).lineno)
node.filter = self.parse_filter(None, start_inline=True)
node.body = self.parse_statements(("name:endfilter",), drop_needle=True)
return node
def parse_macro(self):
node = nodes.Macro(lineno=next(self.stream).lineno)
node.name = self.parse_assign_target(name_only=True).name
self.parse_signature(node)
node.body = self.parse_statements(("name:endmacro",), drop_needle=True)
return node
def parse_print(self):
node = nodes.Output(lineno=next(self.stream).lineno)
node.nodes = []
while self.stream.current.type != "block_end":
if node.nodes:
self.stream.expect("comma")
node.nodes.append(self.parse_expression())
return node
def parse_assign_target(
self,
with_tuple=True,
name_only=False,
extra_end_rules=None,
with_namespace=False,
):
"""Parse an assignment target. As Jinja allows assignments to
tuples, this function can parse all allowed assignment targets. Per
default assignments to tuples are parsed, that can be disable however
by setting `with_tuple` to `False`. If only assignments to names are
wanted `name_only` can be set to `True`. The `extra_end_rules`
parameter is forwarded to the tuple parsing function. If
`with_namespace` is enabled, a namespace assignment may be parsed.
"""
if with_namespace and self.stream.look().type == "dot":
token = self.stream.expect("name")
next(self.stream) # dot
attr = self.stream.expect("name")
target = nodes.NSRef(token.value, attr.value, lineno=token.lineno)
elif name_only:
token = self.stream.expect("name")
target = nodes.Name(token.value, "store", lineno=token.lineno)
else:
if with_tuple:
target = self.parse_tuple(
simplified=True, extra_end_rules=extra_end_rules
)
else:
target = self.parse_primary()
target.set_ctx("store")
if not target.can_assign():
self.fail(
"can't assign to %r" % target.__class__.__name__.lower(), target.lineno
)
return target
def parse_expression(self, with_condexpr=True):
"""Parse an expression. Per default all expressions are parsed, if
the optional `with_condexpr` parameter is set to `False` conditional
expressions are not parsed.
"""
if with_condexpr:
return self.parse_condexpr()
return self.parse_or()
def parse_condexpr(self):
lineno = self.stream.current.lineno
expr1 = self.parse_or()
while self.stream.skip_if("name:if"):
expr2 = self.parse_or()
if self.stream.skip_if("name:else"):
expr3 = self.parse_condexpr()
else:
expr3 = None
expr1 = nodes.CondExpr(expr2, expr1, expr3, lineno=lineno)
lineno = self.stream.current.lineno
return expr1
def parse_or(self):
lineno = self.stream.current.lineno
left = self.parse_and()
while self.stream.skip_if("name:or"):
right = self.parse_and()
left = nodes.Or(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_and(self):
lineno = self.stream.current.lineno
left = self.parse_not()
while self.stream.skip_if("name:and"):
right = self.parse_not()
left = nodes.And(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_not(self):
if self.stream.current.test("name:not"):
lineno = next(self.stream).lineno
return nodes.Not(self.parse_not(), lineno=lineno)
return self.parse_compare()
def parse_compare(self):
lineno = self.stream.current.lineno
expr = self.parse_math1()
ops = []
while 1:
token_type = self.stream.current.type
if token_type in _compare_operators:
next(self.stream)
ops.append(nodes.Operand(token_type, self.parse_math1()))
elif self.stream.skip_if("name:in"):
ops.append(nodes.Operand("in", self.parse_math1()))
elif self.stream.current.test("name:not") and self.stream.look().test(
"name:in"
):
self.stream.skip(2)
ops.append(nodes.Operand("notin", self.parse_math1()))
else:
break
lineno = self.stream.current.lineno
if not ops:
return expr
return nodes.Compare(expr, ops, lineno=lineno)
def parse_math1(self):
lineno = self.stream.current.lineno
left = self.parse_concat()
while self.stream.current.type in ("add", "sub"):
cls = _math_nodes[self.stream.current.type]
next(self.stream)
right = self.parse_concat()
left = cls(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_concat(self):
lineno = self.stream.current.lineno
args = [self.parse_math2()]
while self.stream.current.type == "tilde":
next(self.stream)
args.append(self.parse_math2())
if len(args) == 1:
return args[0]
return nodes.Concat(args, lineno=lineno)
def parse_math2(self):
lineno = self.stream.current.lineno
left = self.parse_pow()
while self.stream.current.type in ("mul", "div", "floordiv", "mod"):
cls = _math_nodes[self.stream.current.type]
next(self.stream)
right = self.parse_pow()
left = cls(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_pow(self):
lineno = self.stream.current.lineno
left = self.parse_unary()
while self.stream.current.type == "pow":
next(self.stream)
right = self.parse_unary()
left = nodes.Pow(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_unary(self, with_filter=True):
token_type = self.stream.current.type
lineno = self.stream.current.lineno
if token_type == "sub":
next(self.stream)
node = nodes.Neg(self.parse_unary(False), lineno=lineno)
elif token_type == "add":
next(self.stream)
node = nodes.Pos(self.parse_unary(False), lineno=lineno)
else:
node = self.parse_primary()
node = self.parse_postfix(node)
if with_filter:
node = self.parse_filter_expr(node)
return node
def parse_primary(self):
token = self.stream.current
if token.type == "name":
if token.value in ("true", "false", "True", "False"):
node = nodes.Const(token.value in ("true", "True"), lineno=token.lineno)
elif token.value in ("none", "None"):
node = nodes.Const(None, lineno=token.lineno)
else:
node = nodes.Name(token.value, "load", lineno=token.lineno)
next(self.stream)
elif token.type == "string":
next(self.stream)
buf = [token.value]
lineno = token.lineno
while self.stream.current.type == "string":
buf.append(self.stream.current.value)
next(self.stream)
node = nodes.Const("".join(buf), lineno=lineno)
elif token.type in ("integer", "float"):
next(self.stream)
node = nodes.Const(token.value, lineno=token.lineno)
elif token.type == "lparen":
next(self.stream)
node = self.parse_tuple(explicit_parentheses=True)
self.stream.expect("rparen")
elif token.type == "lbracket":
node = self.parse_list()
elif token.type == "lbrace":
node = self.parse_dict()
else:
self.fail("unexpected '%s'" % describe_token(token), token.lineno)
return node
def parse_tuple(
self,
simplified=False,
with_condexpr=True,
extra_end_rules=None,
explicit_parentheses=False,
):
"""Works like `parse_expression` but if multiple expressions are
delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
This method could also return a regular expression instead of a tuple
if no commas where found.
The default parsing mode is a full tuple. If `simplified` is `True`
only names and literals are parsed. The `no_condexpr` parameter is
forwarded to :meth:`parse_expression`.
Because tuples do not require delimiters and may end in a bogus comma
an extra hint is needed that marks the end of a tuple. For example
for loops support tuples between `for` and `in`. In that case the
`extra_end_rules` is set to ``['name:in']``.
`explicit_parentheses` is true if the parsing was triggered by an
expression in parentheses. This is used to figure out if an empty
tuple is a valid expression or not.
"""
lineno = self.stream.current.lineno
if simplified:
parse = self.parse_primary
elif with_condexpr:
parse = self.parse_expression
else:
def parse():
return self.parse_expression(with_condexpr=False)
args = []
is_tuple = False
while 1:
if args:
self.stream.expect("comma")
if self.is_tuple_end(extra_end_rules):
break
args.append(parse())
if self.stream.current.type == "comma":
is_tuple = True
else:
break
lineno = self.stream.current.lineno
if not is_tuple:
if args:
return args[0]
# if we don't have explicit parentheses, an empty tuple is
# not a valid expression. This would mean nothing (literally
# nothing) in the spot of an expression would be an empty
# tuple.
if not explicit_parentheses:
self.fail(
"Expected an expression, got '%s'"
% describe_token(self.stream.current)
)
return nodes.Tuple(args, "load", lineno=lineno)
def parse_list(self):
token = self.stream.expect("lbracket")
items = []
while self.stream.current.type != "rbracket":
if items:
self.stream.expect("comma")
if self.stream.current.type == "rbracket":
break
items.append(self.parse_expression())
self.stream.expect("rbracket")
return nodes.List(items, lineno=token.lineno)
def parse_dict(self):
token = self.stream.expect("lbrace")
items = []
while self.stream.current.type != "rbrace":
if items:
self.stream.expect("comma")
if self.stream.current.type == "rbrace":
break
key = self.parse_expression()
self.stream.expect("colon")
value = self.parse_expression()
items.append(nodes.Pair(key, value, lineno=key.lineno))
self.stream.expect("rbrace")
return nodes.Dict(items, lineno=token.lineno)
def parse_postfix(self, node):
while 1:
token_type = self.stream.current.type
if token_type == "dot" or token_type == "lbracket":
node = self.parse_subscript(node)
# calls are valid both after postfix expressions (getattr
# and getitem) as well as filters and tests
elif token_type == "lparen":
node = self.parse_call(node)
else:
break
return node
def parse_filter_expr(self, node):
while 1:
token_type = self.stream.current.type
if token_type == "pipe":
node = self.parse_filter(node)
elif token_type == "name" and self.stream.current.value == "is":
node = self.parse_test(node)
# calls are valid both after postfix expressions (getattr
# and getitem) as well as filters and tests
elif token_type == "lparen":
node = self.parse_call(node)
else:
break
return node
def parse_subscript(self, node):
token = next(self.stream)
if token.type == "dot":
attr_token = self.stream.current
next(self.stream)
if attr_token.type == "name":
return nodes.Getattr(
node, attr_token.value, "load", lineno=token.lineno
)
elif attr_token.type != "integer":
self.fail("expected name or number", attr_token.lineno)
arg = nodes.Const(attr_token.value, lineno=attr_token.lineno)
return nodes.Getitem(node, arg, "load", lineno=token.lineno)
if token.type == "lbracket":
args = []
while self.stream.current.type != "rbracket":
if args:
self.stream.expect("comma")
args.append(self.parse_subscribed())
self.stream.expect("rbracket")
if len(args) == 1:
arg = args[0]
else:
arg = nodes.Tuple(args, "load", lineno=token.lineno)
return nodes.Getitem(node, arg, "load", lineno=token.lineno)
self.fail("expected subscript expression", token.lineno)
def parse_subscribed(self):
lineno = self.stream.current.lineno
if self.stream.current.type == "colon":
next(self.stream)
args = [None]
else:
node = self.parse_expression()
if self.stream.current.type != "colon":
return node
next(self.stream)
args = [node]
if self.stream.current.type == "colon":
args.append(None)
elif self.stream.current.type not in ("rbracket", "comma"):
args.append(self.parse_expression())
else:
args.append(None)
if self.stream.current.type == "colon":
next(self.stream)
if self.stream.current.type not in ("rbracket", "comma"):
args.append(self.parse_expression())
else:
args.append(None)
else:
args.append(None)
return nodes.Slice(lineno=lineno, *args)
def parse_call(self, node):
token = self.stream.expect("lparen")
args = []
kwargs = []
dyn_args = dyn_kwargs = None
require_comma = False
def ensure(expr):
if not expr:
self.fail("invalid syntax for function call expression", token.lineno)
while self.stream.current.type != "rparen":
if require_comma:
self.stream.expect("comma")
# support for trailing comma
if self.stream.current.type == "rparen":
break
if self.stream.current.type == "mul":
ensure(dyn_args is None and dyn_kwargs is None)
next(self.stream)
dyn_args = self.parse_expression()
elif self.stream.current.type == "pow":
ensure(dyn_kwargs is None)
next(self.stream)
dyn_kwargs = self.parse_expression()
else:
if (
self.stream.current.type == "name"
and self.stream.look().type == "assign"
):
# Parsing a kwarg
ensure(dyn_kwargs is None)
key = self.stream.current.value
self.stream.skip(2)
value = self.parse_expression()
kwargs.append(nodes.Keyword(key, value, lineno=value.lineno))
else:
# Parsing an arg
ensure(dyn_args is None and dyn_kwargs is None and not kwargs)
args.append(self.parse_expression())
require_comma = True
self.stream.expect("rparen")
if node is None:
return args, kwargs, dyn_args, dyn_kwargs
return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno)
def parse_filter(self, node, start_inline=False):
while self.stream.current.type == "pipe" or start_inline:
if not start_inline:
next(self.stream)
token = self.stream.expect("name")
name = token.value
while self.stream.current.type == "dot":
next(self.stream)
name += "." + self.stream.expect("name").value
if self.stream.current.type == "lparen":
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
else:
args = []
kwargs = []
dyn_args = dyn_kwargs = None
node = nodes.Filter(
node, name, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno
)
start_inline = False
return node
def parse_test(self, node):
token = next(self.stream)
if self.stream.current.test("name:not"):
next(self.stream)
negated = True
else:
negated = False
name = self.stream.expect("name").value
while self.stream.current.type == "dot":
next(self.stream)
name += "." + self.stream.expect("name").value
dyn_args = dyn_kwargs = None
kwargs = []
if self.stream.current.type == "lparen":
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
elif self.stream.current.type in (
"name",
"string",
"integer",
"float",
"lparen",
"lbracket",
"lbrace",
) and not self.stream.current.test_any("name:else", "name:or", "name:and"):
if self.stream.current.test("name:is"):
self.fail("You cannot chain multiple tests with is")
arg_node = self.parse_primary()
arg_node = self.parse_postfix(arg_node)
args = [arg_node]
else:
args = []
node = nodes.Test(
node, name, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno
)
if negated:
node = nodes.Not(node, lineno=token.lineno)
return node
def subparse(self, end_tokens=None):
body = []
data_buffer = []
add_data = data_buffer.append
if end_tokens is not None:
self._end_token_stack.append(end_tokens)
def flush_data():
if data_buffer:
lineno = data_buffer[0].lineno
body.append(nodes.Output(data_buffer[:], lineno=lineno))
del data_buffer[:]
try:
while self.stream:
token = self.stream.current
if token.type == "data":
if token.value:
add_data(nodes.TemplateData(token.value, lineno=token.lineno))
next(self.stream)
elif token.type == "variable_begin":
next(self.stream)
add_data(self.parse_tuple(with_condexpr=True))
self.stream.expect("variable_end")
elif token.type == "block_begin":
flush_data()
next(self.stream)
if end_tokens is not None and self.stream.current.test_any(
*end_tokens
):
return body
rv = self.parse_statement()
if isinstance(rv, list):
body.extend(rv)
else:
body.append(rv)
self.stream.expect("block_end")
else:
raise AssertionError("internal parsing error")
flush_data()
finally:
if end_tokens is not None:
self._end_token_stack.pop()
return body
def parse(self):
"""Parse the whole template into a `Template` node."""
result = nodes.Template(self.subparse(), lineno=1)
result.set_environment(self.environment)
return result

1011
third_party/python/Jinja2/jinja2/runtime.py поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

510
third_party/python/Jinja2/jinja2/sandbox.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,510 @@
# -*- coding: utf-8 -*-
"""A sandbox layer that ensures unsafe operations cannot be performed.
Useful when the template itself comes from an untrusted source.
"""
import operator
import types
import warnings
from collections import deque
from string import Formatter
from markupsafe import EscapeFormatter
from markupsafe import Markup
from ._compat import abc
from ._compat import PY2
from ._compat import range_type
from ._compat import string_types
from .environment import Environment
from .exceptions import SecurityError
#: maximum number of items a range may produce
MAX_RANGE = 100000
#: attributes of function objects that are considered unsafe.
if PY2:
UNSAFE_FUNCTION_ATTRIBUTES = {
"func_closure",
"func_code",
"func_dict",
"func_defaults",
"func_globals",
}
else:
# On versions > python 2 the special attributes on functions are gone,
# but they remain on methods and generators for whatever reason.
UNSAFE_FUNCTION_ATTRIBUTES = set()
#: unsafe method attributes. function attributes are unsafe for methods too
UNSAFE_METHOD_ATTRIBUTES = {"im_class", "im_func", "im_self"}
#: unsafe generator attributes.
UNSAFE_GENERATOR_ATTRIBUTES = {"gi_frame", "gi_code"}
#: unsafe attributes on coroutines
UNSAFE_COROUTINE_ATTRIBUTES = {"cr_frame", "cr_code"}
#: unsafe attributes on async generators
UNSAFE_ASYNC_GENERATOR_ATTRIBUTES = {"ag_code", "ag_frame"}
# make sure we don't warn in python 2.6 about stuff we don't care about
warnings.filterwarnings(
"ignore", "the sets module", DeprecationWarning, module=__name__
)
_mutable_set_types = (set,)
_mutable_mapping_types = (dict,)
_mutable_sequence_types = (list,)
# on python 2.x we can register the user collection types
try:
from UserDict import UserDict, DictMixin
from UserList import UserList
_mutable_mapping_types += (UserDict, DictMixin)
_mutable_set_types += (UserList,)
except ImportError:
pass
# if sets is still available, register the mutable set from there as well
try:
from sets import Set
_mutable_set_types += (Set,)
except ImportError:
pass
#: register Python 2.6 abstract base classes
_mutable_set_types += (abc.MutableSet,)
_mutable_mapping_types += (abc.MutableMapping,)
_mutable_sequence_types += (abc.MutableSequence,)
_mutable_spec = (
(
_mutable_set_types,
frozenset(
[
"add",
"clear",
"difference_update",
"discard",
"pop",
"remove",
"symmetric_difference_update",
"update",
]
),
),
(
_mutable_mapping_types,
frozenset(["clear", "pop", "popitem", "setdefault", "update"]),
),
(
_mutable_sequence_types,
frozenset(["append", "reverse", "insert", "sort", "extend", "remove"]),
),
(
deque,
frozenset(
[
"append",
"appendleft",
"clear",
"extend",
"extendleft",
"pop",
"popleft",
"remove",
"rotate",
]
),
),
)
class _MagicFormatMapping(abc.Mapping):
"""This class implements a dummy wrapper to fix a bug in the Python
standard library for string formatting.
See https://bugs.python.org/issue13598 for information about why
this is necessary.
"""
def __init__(self, args, kwargs):
self._args = args
self._kwargs = kwargs
self._last_index = 0
def __getitem__(self, key):
if key == "":
idx = self._last_index
self._last_index += 1
try:
return self._args[idx]
except LookupError:
pass
key = str(idx)
return self._kwargs[key]
def __iter__(self):
return iter(self._kwargs)
def __len__(self):
return len(self._kwargs)
def inspect_format_method(callable):
if not isinstance(
callable, (types.MethodType, types.BuiltinMethodType)
) or callable.__name__ not in ("format", "format_map"):
return None
obj = callable.__self__
if isinstance(obj, string_types):
return obj
def safe_range(*args):
"""A range that can't generate ranges with a length of more than
MAX_RANGE items.
"""
rng = range_type(*args)
if len(rng) > MAX_RANGE:
raise OverflowError(
"Range too big. The sandbox blocks ranges larger than"
" MAX_RANGE (%d)." % MAX_RANGE
)
return rng
def unsafe(f):
"""Marks a function or method as unsafe.
::
@unsafe
def delete(self):
pass
"""
f.unsafe_callable = True
return f
def is_internal_attribute(obj, attr):
"""Test if the attribute given is an internal python attribute. For
example this function returns `True` for the `func_code` attribute of
python objects. This is useful if the environment method
:meth:`~SandboxedEnvironment.is_safe_attribute` is overridden.
>>> from jinja2.sandbox import is_internal_attribute
>>> is_internal_attribute(str, "mro")
True
>>> is_internal_attribute(str, "upper")
False
"""
if isinstance(obj, types.FunctionType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES:
return True
elif isinstance(obj, types.MethodType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES or attr in UNSAFE_METHOD_ATTRIBUTES:
return True
elif isinstance(obj, type):
if attr == "mro":
return True
elif isinstance(obj, (types.CodeType, types.TracebackType, types.FrameType)):
return True
elif isinstance(obj, types.GeneratorType):
if attr in UNSAFE_GENERATOR_ATTRIBUTES:
return True
elif hasattr(types, "CoroutineType") and isinstance(obj, types.CoroutineType):
if attr in UNSAFE_COROUTINE_ATTRIBUTES:
return True
elif hasattr(types, "AsyncGeneratorType") and isinstance(
obj, types.AsyncGeneratorType
):
if attr in UNSAFE_ASYNC_GENERATOR_ATTRIBUTES:
return True
return attr.startswith("__")
def modifies_known_mutable(obj, attr):
"""This function checks if an attribute on a builtin mutable object
(list, dict, set or deque) would modify it if called. It also supports
the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and
with Python 2.6 onwards the abstract base classes `MutableSet`,
`MutableMapping`, and `MutableSequence`.
>>> modifies_known_mutable({}, "clear")
True
>>> modifies_known_mutable({}, "keys")
False
>>> modifies_known_mutable([], "append")
True
>>> modifies_known_mutable([], "index")
False
If called with an unsupported object (such as unicode) `False` is
returned.
>>> modifies_known_mutable("foo", "upper")
False
"""
for typespec, unsafe in _mutable_spec:
if isinstance(obj, typespec):
return attr in unsafe
return False
class SandboxedEnvironment(Environment):
"""The sandboxed environment. It works like the regular environment but
tells the compiler to generate sandboxed code. Additionally subclasses of
this environment may override the methods that tell the runtime what
attributes or functions are safe to access.
If the template tries to access insecure code a :exc:`SecurityError` is
raised. However also other exceptions may occur during the rendering so
the caller has to ensure that all exceptions are caught.
"""
sandboxed = True
#: default callback table for the binary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`binop_table`
default_binop_table = {
"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"/": operator.truediv,
"//": operator.floordiv,
"**": operator.pow,
"%": operator.mod,
}
#: default callback table for the unary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`unop_table`
default_unop_table = {"+": operator.pos, "-": operator.neg}
#: a set of binary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_binop` method that will perform the operator. The default
#: operator callback is specified by :attr:`binop_table`.
#:
#: The following binary operators are interceptable:
#: ``//``, ``%``, ``+``, ``*``, ``-``, ``/``, and ``**``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_binops = frozenset()
#: a set of unary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_unop` method that will perform the operator. The default
#: operator callback is specified by :attr:`unop_table`.
#:
#: The following unary operators are interceptable: ``+``, ``-``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_unops = frozenset()
def intercept_unop(self, operator):
"""Called during template compilation with the name of a unary
operator to check if it should be intercepted at runtime. If this
method returns `True`, :meth:`call_unop` is executed for this unary
operator. The default implementation of :meth:`call_unop` will use
the :attr:`unop_table` dictionary to perform the operator with the
same logic as the builtin one.
The following unary operators are interceptable: ``+`` and ``-``
Intercepted calls are always slower than the native operator call,
so make sure only to intercept the ones you are interested in.
.. versionadded:: 2.6
"""
return False
def __init__(self, *args, **kwargs):
Environment.__init__(self, *args, **kwargs)
self.globals["range"] = safe_range
self.binop_table = self.default_binop_table.copy()
self.unop_table = self.default_unop_table.copy()
def is_safe_attribute(self, obj, attr, value):
"""The sandboxed environment will call this method to check if the
attribute of an object is safe to access. Per default all attributes
starting with an underscore are considered private as well as the
special attributes of internal python objects as returned by the
:func:`is_internal_attribute` function.
"""
return not (attr.startswith("_") or is_internal_attribute(obj, attr))
def is_safe_callable(self, obj):
"""Check if an object is safely callable. Per default a function is
considered safe unless the `unsafe_callable` attribute exists and is
True. Override this method to alter the behavior, but this won't
affect the `unsafe` decorator from this module.
"""
return not (
getattr(obj, "unsafe_callable", False) or getattr(obj, "alters_data", False)
)
def call_binop(self, context, operator, left, right):
"""For intercepted binary operator calls (:meth:`intercepted_binops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.binop_table[operator](left, right)
def call_unop(self, context, operator, arg):
"""For intercepted unary operator calls (:meth:`intercepted_unops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.unop_table[operator](arg)
def getitem(self, obj, argument):
"""Subscribe an object from sandboxed code."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, string_types):
try:
attr = str(argument)
except Exception:
pass
else:
try:
value = getattr(obj, attr)
except AttributeError:
pass
else:
if self.is_safe_attribute(obj, argument, value):
return value
return self.unsafe_undefined(obj, argument)
return self.undefined(obj=obj, name=argument)
def getattr(self, obj, attribute):
"""Subscribe an object from sandboxed code and prefer the
attribute. The attribute passed *must* be a bytestring.
"""
try:
value = getattr(obj, attribute)
except AttributeError:
try:
return obj[attribute]
except (TypeError, LookupError):
pass
else:
if self.is_safe_attribute(obj, attribute, value):
return value
return self.unsafe_undefined(obj, attribute)
return self.undefined(obj=obj, name=attribute)
def unsafe_undefined(self, obj, attribute):
"""Return an undefined object for unsafe attributes."""
return self.undefined(
"access to attribute %r of %r "
"object is unsafe." % (attribute, obj.__class__.__name__),
name=attribute,
obj=obj,
exc=SecurityError,
)
def format_string(self, s, args, kwargs, format_func=None):
"""If a format call is detected, then this is routed through this
method so that our safety sandbox can be used for it.
"""
if isinstance(s, Markup):
formatter = SandboxedEscapeFormatter(self, s.escape)
else:
formatter = SandboxedFormatter(self)
if format_func is not None and format_func.__name__ == "format_map":
if len(args) != 1 or kwargs:
raise TypeError(
"format_map() takes exactly one argument %d given"
% (len(args) + (kwargs is not None))
)
kwargs = args[0]
args = None
kwargs = _MagicFormatMapping(args, kwargs)
rv = formatter.vformat(s, args, kwargs)
return type(s)(rv)
def call(__self, __context, __obj, *args, **kwargs): # noqa: B902
"""Call an object from sandboxed code."""
fmt = inspect_format_method(__obj)
if fmt is not None:
return __self.format_string(fmt, args, kwargs, __obj)
# the double prefixes are to avoid double keyword argument
# errors when proxying the call.
if not __self.is_safe_callable(__obj):
raise SecurityError("%r is not safely callable" % (__obj,))
return __context.call(__obj, *args, **kwargs)
class ImmutableSandboxedEnvironment(SandboxedEnvironment):
"""Works exactly like the regular `SandboxedEnvironment` but does not
permit modifications on the builtin mutable objects `list`, `set`, and
`dict` by using the :func:`modifies_known_mutable` function.
"""
def is_safe_attribute(self, obj, attr, value):
if not SandboxedEnvironment.is_safe_attribute(self, obj, attr, value):
return False
return not modifies_known_mutable(obj, attr)
# This really is not a public API apparently.
try:
from _string import formatter_field_name_split
except ImportError:
def formatter_field_name_split(field_name):
return field_name._formatter_field_name_split()
class SandboxedFormatterMixin(object):
def __init__(self, env):
self._env = env
def get_field(self, field_name, args, kwargs):
first, rest = formatter_field_name_split(field_name)
obj = self.get_value(first, args, kwargs)
for is_attr, i in rest:
if is_attr:
obj = self._env.getattr(obj, i)
else:
obj = self._env.getitem(obj, i)
return obj, first
class SandboxedFormatter(SandboxedFormatterMixin, Formatter):
def __init__(self, env):
SandboxedFormatterMixin.__init__(self, env)
Formatter.__init__(self)
class SandboxedEscapeFormatter(SandboxedFormatterMixin, EscapeFormatter):
def __init__(self, env, escape):
SandboxedFormatterMixin.__init__(self, env)
EscapeFormatter.__init__(self, escape)

215
third_party/python/Jinja2/jinja2/tests.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,215 @@
# -*- coding: utf-8 -*-
"""Built-in template tests used with the ``is`` operator."""
import decimal
import operator
import re
from ._compat import abc
from ._compat import integer_types
from ._compat import string_types
from ._compat import text_type
from .runtime import Undefined
number_re = re.compile(r"^-?\d+(\.\d+)?$")
regex_type = type(number_re)
test_callable = callable
def test_odd(value):
"""Return true if the variable is odd."""
return value % 2 == 1
def test_even(value):
"""Return true if the variable is even."""
return value % 2 == 0
def test_divisibleby(value, num):
"""Check if a variable is divisible by a number."""
return value % num == 0
def test_defined(value):
"""Return true if the variable is defined:
.. sourcecode:: jinja
{% if variable is defined %}
value of variable: {{ variable }}
{% else %}
variable is not defined
{% endif %}
See the :func:`default` filter for a simple way to set undefined
variables.
"""
return not isinstance(value, Undefined)
def test_undefined(value):
"""Like :func:`defined` but the other way round."""
return isinstance(value, Undefined)
def test_none(value):
"""Return true if the variable is none."""
return value is None
def test_boolean(value):
"""Return true if the object is a boolean value.
.. versionadded:: 2.11
"""
return value is True or value is False
def test_false(value):
"""Return true if the object is False.
.. versionadded:: 2.11
"""
return value is False
def test_true(value):
"""Return true if the object is True.
.. versionadded:: 2.11
"""
return value is True
# NOTE: The existing 'number' test matches booleans and floats
def test_integer(value):
"""Return true if the object is an integer.
.. versionadded:: 2.11
"""
return isinstance(value, integer_types) and value is not True and value is not False
# NOTE: The existing 'number' test matches booleans and integers
def test_float(value):
"""Return true if the object is a float.
.. versionadded:: 2.11
"""
return isinstance(value, float)
def test_lower(value):
"""Return true if the variable is lowercased."""
return text_type(value).islower()
def test_upper(value):
"""Return true if the variable is uppercased."""
return text_type(value).isupper()
def test_string(value):
"""Return true if the object is a string."""
return isinstance(value, string_types)
def test_mapping(value):
"""Return true if the object is a mapping (dict etc.).
.. versionadded:: 2.6
"""
return isinstance(value, abc.Mapping)
def test_number(value):
"""Return true if the variable is a number."""
return isinstance(value, integer_types + (float, complex, decimal.Decimal))
def test_sequence(value):
"""Return true if the variable is a sequence. Sequences are variables
that are iterable.
"""
try:
len(value)
value.__getitem__
except Exception:
return False
return True
def test_sameas(value, other):
"""Check if an object points to the same memory address than another
object:
.. sourcecode:: jinja
{% if foo.attribute is sameas false %}
the foo attribute really is the `False` singleton
{% endif %}
"""
return value is other
def test_iterable(value):
"""Check if it's possible to iterate over an object."""
try:
iter(value)
except TypeError:
return False
return True
def test_escaped(value):
"""Check if the value is escaped."""
return hasattr(value, "__html__")
def test_in(value, seq):
"""Check if value is in seq.
.. versionadded:: 2.10
"""
return value in seq
TESTS = {
"odd": test_odd,
"even": test_even,
"divisibleby": test_divisibleby,
"defined": test_defined,
"undefined": test_undefined,
"none": test_none,
"boolean": test_boolean,
"false": test_false,
"true": test_true,
"integer": test_integer,
"float": test_float,
"lower": test_lower,
"upper": test_upper,
"string": test_string,
"mapping": test_mapping,
"number": test_number,
"sequence": test_sequence,
"iterable": test_iterable,
"callable": test_callable,
"sameas": test_sameas,
"escaped": test_escaped,
"in": test_in,
"==": operator.eq,
"eq": operator.eq,
"equalto": operator.eq,
"!=": operator.ne,
"ne": operator.ne,
">": operator.gt,
"gt": operator.gt,
"greaterthan": operator.gt,
"ge": operator.ge,
">=": operator.ge,
"<": operator.lt,
"lt": operator.lt,
"lessthan": operator.lt,
"<=": operator.le,
"le": operator.le,
}

727
third_party/python/Jinja2/jinja2/utils.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,727 @@
# -*- coding: utf-8 -*-
import json
import os
import re
import warnings
from collections import deque
from random import choice
from random import randrange
from threading import Lock
from markupsafe import escape
from markupsafe import Markup
from ._compat import abc
from ._compat import string_types
from ._compat import text_type
from ._compat import url_quote
_word_split_re = re.compile(r"(\s+)")
_punctuation_re = re.compile(
"^(?P<lead>(?:%s)*)(?P<middle>.*?)(?P<trail>(?:%s)*)$"
% (
"|".join(map(re.escape, ("(", "<", "&lt;"))),
"|".join(map(re.escape, (".", ",", ")", ">", "\n", "&gt;"))),
)
)
_simple_email_re = re.compile(r"^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$")
_striptags_re = re.compile(r"(<!--.*?-->|<[^>]*>)")
_entity_re = re.compile(r"&([^;]+);")
_letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
_digits = "0123456789"
# special singleton representing missing values for the runtime
missing = type("MissingType", (), {"__repr__": lambda x: "missing"})()
# internal code
internal_code = set()
concat = u"".join
_slash_escape = "\\/" not in json.dumps("/")
def contextfunction(f):
"""This decorator can be used to mark a function or method context callable.
A context callable is passed the active :class:`Context` as first argument when
called from the template. This is useful if a function wants to get access
to the context or functions provided on the context object. For example
a function that returns a sorted list of template variables the current
template exports could look like this::
@contextfunction
def get_exported_names(context):
return sorted(context.exported_vars)
"""
f.contextfunction = True
return f
def evalcontextfunction(f):
"""This decorator can be used to mark a function or method as an eval
context callable. This is similar to the :func:`contextfunction`
but instead of passing the context, an evaluation context object is
passed. For more information about the eval context, see
:ref:`eval-context`.
.. versionadded:: 2.4
"""
f.evalcontextfunction = True
return f
def environmentfunction(f):
"""This decorator can be used to mark a function or method as environment
callable. This decorator works exactly like the :func:`contextfunction`
decorator just that the first argument is the active :class:`Environment`
and not context.
"""
f.environmentfunction = True
return f
def internalcode(f):
"""Marks the function as internally used"""
internal_code.add(f.__code__)
return f
def is_undefined(obj):
"""Check if the object passed is undefined. This does nothing more than
performing an instance check against :class:`Undefined` but looks nicer.
This can be used for custom filters or tests that want to react to
undefined variables. For example a custom default filter can look like
this::
def default(var, default=''):
if is_undefined(var):
return default
return var
"""
from .runtime import Undefined
return isinstance(obj, Undefined)
def consume(iterable):
"""Consumes an iterable without doing anything with it."""
for _ in iterable:
pass
def clear_caches():
"""Jinja keeps internal caches for environments and lexers. These are
used so that Jinja doesn't have to recreate environments and lexers all
the time. Normally you don't have to care about that but if you are
measuring memory consumption you may want to clean the caches.
"""
from .environment import _spontaneous_environments
from .lexer import _lexer_cache
_spontaneous_environments.clear()
_lexer_cache.clear()
def import_string(import_name, silent=False):
"""Imports an object based on a string. This is useful if you want to
use import paths as endpoints or something similar. An import path can
be specified either in dotted notation (``xml.sax.saxutils.escape``)
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
If the `silent` is True the return value will be `None` if the import
fails.
:return: imported object
"""
try:
if ":" in import_name:
module, obj = import_name.split(":", 1)
elif "." in import_name:
module, _, obj = import_name.rpartition(".")
else:
return __import__(import_name)
return getattr(__import__(module, None, None, [obj]), obj)
except (ImportError, AttributeError):
if not silent:
raise
def open_if_exists(filename, mode="rb"):
"""Returns a file descriptor for the filename if that file exists,
otherwise ``None``.
"""
if not os.path.isfile(filename):
return None
return open(filename, mode)
def object_type_repr(obj):
"""Returns the name of the object's type. For some recognized
singletons the name of the object is returned instead. (For
example for `None` and `Ellipsis`).
"""
if obj is None:
return "None"
elif obj is Ellipsis:
return "Ellipsis"
# __builtin__ in 2.x, builtins in 3.x
if obj.__class__.__module__ in ("__builtin__", "builtins"):
name = obj.__class__.__name__
else:
name = obj.__class__.__module__ + "." + obj.__class__.__name__
return "%s object" % name
def pformat(obj, verbose=False):
"""Prettyprint an object. Either use the `pretty` library or the
builtin `pprint`.
"""
try:
from pretty import pretty
return pretty(obj, verbose=verbose)
except ImportError:
from pprint import pformat
return pformat(obj)
def urlize(text, trim_url_limit=None, rel=None, target=None):
"""Converts any URLs in text into clickable links. Works on http://,
https:// and www. links. Links can have trailing punctuation (periods,
commas, close-parens) and leading punctuation (opening parens) and
it'll still do the right thing.
If trim_url_limit is not None, the URLs in link text will be limited
to trim_url_limit characters.
If nofollow is True, the URLs in link text will get a rel="nofollow"
attribute.
If target is not None, a target attribute will be added to the link.
"""
trim_url = (
lambda x, limit=trim_url_limit: limit is not None
and (x[:limit] + (len(x) >= limit and "..." or ""))
or x
)
words = _word_split_re.split(text_type(escape(text)))
rel_attr = rel and ' rel="%s"' % text_type(escape(rel)) or ""
target_attr = target and ' target="%s"' % escape(target) or ""
for i, word in enumerate(words):
match = _punctuation_re.match(word)
if match:
lead, middle, trail = match.groups()
if middle.startswith("www.") or (
"@" not in middle
and not middle.startswith("http://")
and not middle.startswith("https://")
and len(middle) > 0
and middle[0] in _letters + _digits
and (
middle.endswith(".org")
or middle.endswith(".net")
or middle.endswith(".com")
)
):
middle = '<a href="http://%s"%s%s>%s</a>' % (
middle,
rel_attr,
target_attr,
trim_url(middle),
)
if middle.startswith("http://") or middle.startswith("https://"):
middle = '<a href="%s"%s%s>%s</a>' % (
middle,
rel_attr,
target_attr,
trim_url(middle),
)
if (
"@" in middle
and not middle.startswith("www.")
and ":" not in middle
and _simple_email_re.match(middle)
):
middle = '<a href="mailto:%s">%s</a>' % (middle, middle)
if lead + middle + trail != word:
words[i] = lead + middle + trail
return u"".join(words)
def generate_lorem_ipsum(n=5, html=True, min=20, max=100):
"""Generate some lorem ipsum for the template."""
from .constants import LOREM_IPSUM_WORDS
words = LOREM_IPSUM_WORDS.split()
result = []
for _ in range(n):
next_capitalized = True
last_comma = last_fullstop = 0
word = None
last = None
p = []
# each paragraph contains out of 20 to 100 words.
for idx, _ in enumerate(range(randrange(min, max))):
while True:
word = choice(words)
if word != last:
last = word
break
if next_capitalized:
word = word.capitalize()
next_capitalized = False
# add commas
if idx - randrange(3, 8) > last_comma:
last_comma = idx
last_fullstop += 2
word += ","
# add end of sentences
if idx - randrange(10, 20) > last_fullstop:
last_comma = last_fullstop = idx
word += "."
next_capitalized = True
p.append(word)
# ensure that the paragraph ends with a dot.
p = u" ".join(p)
if p.endswith(","):
p = p[:-1] + "."
elif not p.endswith("."):
p += "."
result.append(p)
if not html:
return u"\n\n".join(result)
return Markup(u"\n".join(u"<p>%s</p>" % escape(x) for x in result))
def unicode_urlencode(obj, charset="utf-8", for_qs=False):
"""Quote a string for use in a URL using the given charset.
This function is misnamed, it is a wrapper around
:func:`urllib.parse.quote`.
:param obj: String or bytes to quote. Other types are converted to
string then encoded to bytes using the given charset.
:param charset: Encode text to bytes using this charset.
:param for_qs: Quote "/" and use "+" for spaces.
"""
if not isinstance(obj, string_types):
obj = text_type(obj)
if isinstance(obj, text_type):
obj = obj.encode(charset)
safe = b"" if for_qs else b"/"
rv = url_quote(obj, safe)
if not isinstance(rv, text_type):
rv = rv.decode("utf-8")
if for_qs:
rv = rv.replace("%20", "+")
return rv
class LRUCache(object):
"""A simple LRU Cache implementation."""
# this is fast for small capacities (something below 1000) but doesn't
# scale. But as long as it's only used as storage for templates this
# won't do any harm.
def __init__(self, capacity):
self.capacity = capacity
self._mapping = {}
self._queue = deque()
self._postinit()
def _postinit(self):
# alias all queue methods for faster lookup
self._popleft = self._queue.popleft
self._pop = self._queue.pop
self._remove = self._queue.remove
self._wlock = Lock()
self._append = self._queue.append
def __getstate__(self):
return {
"capacity": self.capacity,
"_mapping": self._mapping,
"_queue": self._queue,
}
def __setstate__(self, d):
self.__dict__.update(d)
self._postinit()
def __getnewargs__(self):
return (self.capacity,)
def copy(self):
"""Return a shallow copy of the instance."""
rv = self.__class__(self.capacity)
rv._mapping.update(self._mapping)
rv._queue.extend(self._queue)
return rv
def get(self, key, default=None):
"""Return an item from the cache dict or `default`"""
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default=None):
"""Set `default` if the key is not in the cache otherwise
leave unchanged. Return the value of this key.
"""
try:
return self[key]
except KeyError:
self[key] = default
return default
def clear(self):
"""Clear the cache."""
self._wlock.acquire()
try:
self._mapping.clear()
self._queue.clear()
finally:
self._wlock.release()
def __contains__(self, key):
"""Check if a key exists in this cache."""
return key in self._mapping
def __len__(self):
"""Return the current size of the cache."""
return len(self._mapping)
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, self._mapping)
def __getitem__(self, key):
"""Get an item from the cache. Moves the item up so that it has the
highest priority then.
Raise a `KeyError` if it does not exist.
"""
self._wlock.acquire()
try:
rv = self._mapping[key]
if self._queue[-1] != key:
try:
self._remove(key)
except ValueError:
# if something removed the key from the container
# when we read, ignore the ValueError that we would
# get otherwise.
pass
self._append(key)
return rv
finally:
self._wlock.release()
def __setitem__(self, key, value):
"""Sets the value for an item. Moves the item up so that it
has the highest priority then.
"""
self._wlock.acquire()
try:
if key in self._mapping:
self._remove(key)
elif len(self._mapping) == self.capacity:
del self._mapping[self._popleft()]
self._append(key)
self._mapping[key] = value
finally:
self._wlock.release()
def __delitem__(self, key):
"""Remove an item from the cache dict.
Raise a `KeyError` if it does not exist.
"""
self._wlock.acquire()
try:
del self._mapping[key]
try:
self._remove(key)
except ValueError:
pass
finally:
self._wlock.release()
def items(self):
"""Return a list of items."""
result = [(key, self._mapping[key]) for key in list(self._queue)]
result.reverse()
return result
def iteritems(self):
"""Iterate over all items."""
warnings.warn(
"'iteritems()' will be removed in version 3.0. Use"
" 'iter(cache.items())' instead.",
DeprecationWarning,
stacklevel=2,
)
return iter(self.items())
def values(self):
"""Return a list of all values."""
return [x[1] for x in self.items()]
def itervalue(self):
"""Iterate over all values."""
warnings.warn(
"'itervalue()' will be removed in version 3.0. Use"
" 'iter(cache.values())' instead.",
DeprecationWarning,
stacklevel=2,
)
return iter(self.values())
def itervalues(self):
"""Iterate over all values."""
warnings.warn(
"'itervalues()' will be removed in version 3.0. Use"
" 'iter(cache.values())' instead.",
DeprecationWarning,
stacklevel=2,
)
return iter(self.values())
def keys(self):
"""Return a list of all keys ordered by most recent usage."""
return list(self)
def iterkeys(self):
"""Iterate over all keys in the cache dict, ordered by
the most recent usage.
"""
warnings.warn(
"'iterkeys()' will be removed in version 3.0. Use"
" 'iter(cache.keys())' instead.",
DeprecationWarning,
stacklevel=2,
)
return iter(self)
def __iter__(self):
return reversed(tuple(self._queue))
def __reversed__(self):
"""Iterate over the keys in the cache dict, oldest items
coming first.
"""
return iter(tuple(self._queue))
__copy__ = copy
abc.MutableMapping.register(LRUCache)
def select_autoescape(
enabled_extensions=("html", "htm", "xml"),
disabled_extensions=(),
default_for_string=True,
default=False,
):
"""Intelligently sets the initial value of autoescaping based on the
filename of the template. This is the recommended way to configure
autoescaping if you do not want to write a custom function yourself.
If you want to enable it for all templates created from strings or
for all templates with `.html` and `.xml` extensions::
from jinja2 import Environment, select_autoescape
env = Environment(autoescape=select_autoescape(
enabled_extensions=('html', 'xml'),
default_for_string=True,
))
Example configuration to turn it on at all times except if the template
ends with `.txt`::
from jinja2 import Environment, select_autoescape
env = Environment(autoescape=select_autoescape(
disabled_extensions=('txt',),
default_for_string=True,
default=True,
))
The `enabled_extensions` is an iterable of all the extensions that
autoescaping should be enabled for. Likewise `disabled_extensions` is
a list of all templates it should be disabled for. If a template is
loaded from a string then the default from `default_for_string` is used.
If nothing matches then the initial value of autoescaping is set to the
value of `default`.
For security reasons this function operates case insensitive.
.. versionadded:: 2.9
"""
enabled_patterns = tuple("." + x.lstrip(".").lower() for x in enabled_extensions)
disabled_patterns = tuple("." + x.lstrip(".").lower() for x in disabled_extensions)
def autoescape(template_name):
if template_name is None:
return default_for_string
template_name = template_name.lower()
if template_name.endswith(enabled_patterns):
return True
if template_name.endswith(disabled_patterns):
return False
return default
return autoescape
def htmlsafe_json_dumps(obj, dumper=None, **kwargs):
"""Works exactly like :func:`dumps` but is safe for use in ``<script>``
tags. It accepts the same arguments and returns a JSON string. Note that
this is available in templates through the ``|tojson`` filter which will
also mark the result as safe. Due to how this function escapes certain
characters this is safe even if used outside of ``<script>`` tags.
The following characters are escaped in strings:
- ``<``
- ``>``
- ``&``
- ``'``
This makes it safe to embed such strings in any place in HTML with the
notable exception of double quoted attributes. In that case single
quote your attributes or HTML escape it in addition.
"""
if dumper is None:
dumper = json.dumps
rv = (
dumper(obj, **kwargs)
.replace(u"<", u"\\u003c")
.replace(u">", u"\\u003e")
.replace(u"&", u"\\u0026")
.replace(u"'", u"\\u0027")
)
return Markup(rv)
class Cycler(object):
"""Cycle through values by yield them one at a time, then restarting
once the end is reached. Available as ``cycler`` in templates.
Similar to ``loop.cycle``, but can be used outside loops or across
multiple loops. For example, render a list of folders and files in a
list, alternating giving them "odd" and "even" classes.
.. code-block:: html+jinja
{% set row_class = cycler("odd", "even") %}
<ul class="browser">
{% for folder in folders %}
<li class="folder {{ row_class.next() }}">{{ folder }}
{% endfor %}
{% for file in files %}
<li class="file {{ row_class.next() }}">{{ file }}
{% endfor %}
</ul>
:param items: Each positional argument will be yielded in the order
given for each cycle.
.. versionadded:: 2.1
"""
def __init__(self, *items):
if not items:
raise RuntimeError("at least one item has to be provided")
self.items = items
self.pos = 0
def reset(self):
"""Resets the current item to the first item."""
self.pos = 0
@property
def current(self):
"""Return the current item. Equivalent to the item that will be
returned next time :meth:`next` is called.
"""
return self.items[self.pos]
def next(self):
"""Return the current item, then advance :attr:`current` to the
next item.
"""
rv = self.current
self.pos = (self.pos + 1) % len(self.items)
return rv
__next__ = next
class Joiner(object):
"""A joining helper for templates."""
def __init__(self, sep=u", "):
self.sep = sep
self.used = False
def __call__(self):
if not self.used:
self.used = True
return u""
return self.sep
class Namespace(object):
"""A namespace object that can hold arbitrary attributes. It may be
initialized from a dictionary or with keyword arguments."""
def __init__(*args, **kwargs): # noqa: B902
self, args = args[0], args[1:]
self.__attrs = dict(*args, **kwargs)
def __getattribute__(self, name):
if name == "_Namespace__attrs":
return object.__getattribute__(self, name)
try:
return self.__attrs[name]
except KeyError:
raise AttributeError(name)
def __setitem__(self, name, value):
self.__attrs[name] = value
def __repr__(self):
return "<Namespace %r>" % self.__attrs
# does this python version support async for in and async generators?
try:
exec("async def _():\n async for _ in ():\n yield _")
have_async_gen = True
except SyntaxError:
have_async_gen = False
def soft_unicode(s):
from markupsafe import soft_unicode
warnings.warn(
"'jinja2.utils.soft_unicode' will be removed in version 3.0."
" Use 'markupsafe.soft_unicode' instead.",
DeprecationWarning,
stacklevel=2,
)
return soft_unicode(s)

81
third_party/python/Jinja2/jinja2/visitor.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,81 @@
# -*- coding: utf-8 -*-
"""API for traversing the AST nodes. Implemented by the compiler and
meta introspection.
"""
from .nodes import Node
class NodeVisitor(object):
"""Walks the abstract syntax tree and call visitor functions for every
node found. The visitor functions may return values which will be
forwarded by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
"""
def get_visitor(self, node):
"""Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = "visit_" + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node, *args, **kwargs):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node, *args, **kwargs)
return self.generic_visit(node, *args, **kwargs)
def generic_visit(self, node, *args, **kwargs):
"""Called if no explicit visitor function exists for a node."""
for node in node.iter_child_nodes():
self.visit(node, *args, **kwargs)
class NodeTransformer(NodeVisitor):
"""Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
"""
def generic_visit(self, node, *args, **kwargs):
for field, old_value in node.iter_fields():
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, Node):
value = self.visit(value, *args, **kwargs)
if value is None:
continue
elif not isinstance(value, Node):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, Node):
new_node = self.visit(old_value, *args, **kwargs)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
def visit_list(self, node, *args, **kwargs):
"""As transformers may return lists in some places this method
can be used to enforce a list as return value.
"""
rv = self.visit(node, *args, **kwargs)
if not isinstance(rv, list):
rv = [rv]
return rv

97
third_party/python/MarkupSafe/CHANGES.rst поставляемый Normal file
Просмотреть файл

@ -0,0 +1,97 @@
Version 1.1.1
-------------
Released 2019-02-23
- Fix segfault when ``__html__`` method raises an exception when using
the C speedups. The exception is now propagated correctly. (`#109`_)
.. _#109: https://github.com/pallets/markupsafe/pull/109
Version 1.1.0
-------------
Released 2018-11-05
- Drop support for Python 2.6 and 3.3.
- Build wheels for Linux, Mac, and Windows, allowing systems without
a compiler to take advantage of the C extension speedups. (`#104`_)
- Use newer CPython API on Python 3, resulting in a 1.5x speedup.
(`#64`_)
- ``escape`` wraps ``__html__`` result in ``Markup``, consistent with
documented behavior. (`#69`_)
.. _#64: https://github.com/pallets/markupsafe/pull/64
.. _#69: https://github.com/pallets/markupsafe/pull/69
.. _#104: https://github.com/pallets/markupsafe/pull/104
Version 1.0
-----------
Released 2017-03-07
- Fixed custom types not invoking ``__unicode__`` when used with
``format()``.
- Added ``__version__`` module attribute.
- Improve unescape code to leave lone ampersands alone.
Version 0.18
------------
Released 2013-05-22
- Fixed ``__mul__`` and string splitting on Python 3.
Version 0.17
------------
Released 2013-05-21
- Fixed a bug with broken interpolation on tuples.
Version 0.16
------------
Released 2013-05-20
- Improved Python 3 Support and removed 2to3.
- Removed support for Python 3.2 and 2.5.
Version 0.15
------------
Released 2011-07-20
- Fixed a typo that caused the library to fail to install on pypy and
jython.
Version 0.14
------------
Released 2011-07-20
- Release fix for 0.13.
Version 0.13
------------
Released 2011-07-20
- Do not attempt to compile extension for PyPy or Jython.
- Work around some 64bit Windows issues.
Version 0.12
------------
Released 2011-02-17
- Improved PyPy compatibility.

28
third_party/python/MarkupSafe/LICENSE.rst поставляемый Normal file
Просмотреть файл

@ -0,0 +1,28 @@
Copyright 2010 Pallets
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

8
third_party/python/MarkupSafe/MANIFEST.in поставляемый Normal file
Просмотреть файл

@ -0,0 +1,8 @@
include CHANGES.rst
include LICENSE.rst
include README.rst
include tox.ini
graft docs
prune docs/_build
graft tests
global-exclude *.py[co]

101
third_party/python/MarkupSafe/PKG-INFO поставляемый Normal file
Просмотреть файл

@ -0,0 +1,101 @@
Metadata-Version: 1.2
Name: MarkupSafe
Version: 1.1.1
Summary: Safely add untrusted strings to HTML/XML markup.
Home-page: https://palletsprojects.com/p/markupsafe/
Author: Armin Ronacher
Author-email: armin.ronacher@active-4.com
Maintainer: The Pallets Team
Maintainer-email: contact@palletsprojects.com
License: BSD-3-Clause
Project-URL: Documentation, https://markupsafe.palletsprojects.com/
Project-URL: Code, https://github.com/pallets/markupsafe
Project-URL: Issue tracker, https://github.com/pallets/markupsafe/issues
Description: MarkupSafe
==========
MarkupSafe implements a text object that escapes characters so it is
safe to use in HTML and XML. Characters that have special meanings are
replaced so that they display as the actual characters. This mitigates
injection attacks, meaning untrusted user input can safely be displayed
on a page.
Installing
----------
Install and update using `pip`_:
.. code-block:: text
pip install -U MarkupSafe
.. _pip: https://pip.pypa.io/en/stable/quickstart/
Examples
--------
.. code-block:: pycon
>>> from markupsafe import Markup, escape
>>> # escape replaces special characters and wraps in Markup
>>> escape('<script>alert(document.cookie);</script>')
Markup(u'&lt;script&gt;alert(document.cookie);&lt;/script&gt;')
>>> # wrap in Markup to mark text "safe" and prevent escaping
>>> Markup('<strong>Hello</strong>')
Markup('<strong>hello</strong>')
>>> escape(Markup('<strong>Hello</strong>'))
Markup('<strong>hello</strong>')
>>> # Markup is a text subclass (str on Python 3, unicode on Python 2)
>>> # methods and operators escape their arguments
>>> template = Markup("Hello <em>%s</em>")
>>> template % '"World"'
Markup('Hello <em>&#34;World&#34;</em>')
Donate
------
The Pallets organization develops and supports MarkupSafe and other
libraries that use it. In order to grow the community of contributors
and users, and allow the maintainers to devote more time to the
projects, `please donate today`_.
.. _please donate today: https://palletsprojects.com/donate
Links
-----
* Website: https://palletsprojects.com/p/markupsafe/
* Documentation: https://markupsafe.palletsprojects.com/
* License: `BSD-3-Clause <https://github.com/pallets/markupsafe/blob/master/LICENSE.rst>`_
* Releases: https://pypi.org/project/MarkupSafe/
* Code: https://github.com/pallets/markupsafe
* Issue tracker: https://github.com/pallets/markupsafe/issues
* Test status:
* Linux, Mac: https://travis-ci.org/pallets/markupsafe
* Windows: https://ci.appveyor.com/project/pallets/markupsafe
* Test coverage: https://codecov.io/gh/pallets/markupsafe
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
Classifier: Environment :: Web Environment
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: BSD License
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
Classifier: Topic :: Software Development :: Libraries :: Python Modules
Classifier: Topic :: Text Processing :: Markup :: HTML
Requires-Python: >=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*

69
third_party/python/MarkupSafe/README.rst поставляемый Normal file
Просмотреть файл

@ -0,0 +1,69 @@
MarkupSafe
==========
MarkupSafe implements a text object that escapes characters so it is
safe to use in HTML and XML. Characters that have special meanings are
replaced so that they display as the actual characters. This mitigates
injection attacks, meaning untrusted user input can safely be displayed
on a page.
Installing
----------
Install and update using `pip`_:
.. code-block:: text
pip install -U MarkupSafe
.. _pip: https://pip.pypa.io/en/stable/quickstart/
Examples
--------
.. code-block:: pycon
>>> from markupsafe import Markup, escape
>>> # escape replaces special characters and wraps in Markup
>>> escape('<script>alert(document.cookie);</script>')
Markup(u'&lt;script&gt;alert(document.cookie);&lt;/script&gt;')
>>> # wrap in Markup to mark text "safe" and prevent escaping
>>> Markup('<strong>Hello</strong>')
Markup('<strong>hello</strong>')
>>> escape(Markup('<strong>Hello</strong>'))
Markup('<strong>hello</strong>')
>>> # Markup is a text subclass (str on Python 3, unicode on Python 2)
>>> # methods and operators escape their arguments
>>> template = Markup("Hello <em>%s</em>")
>>> template % '"World"'
Markup('Hello <em>&#34;World&#34;</em>')
Donate
------
The Pallets organization develops and supports MarkupSafe and other
libraries that use it. In order to grow the community of contributors
and users, and allow the maintainers to devote more time to the
projects, `please donate today`_.
.. _please donate today: https://palletsprojects.com/donate
Links
-----
* Website: https://palletsprojects.com/p/markupsafe/
* Documentation: https://markupsafe.palletsprojects.com/
* License: `BSD-3-Clause <https://github.com/pallets/markupsafe/blob/master/LICENSE.rst>`_
* Releases: https://pypi.org/project/MarkupSafe/
* Code: https://github.com/pallets/markupsafe
* Issue tracker: https://github.com/pallets/markupsafe/issues
* Test status:
* Linux, Mac: https://travis-ci.org/pallets/markupsafe
* Windows: https://ci.appveyor.com/project/pallets/markupsafe
* Test coverage: https://codecov.io/gh/pallets/markupsafe

19
third_party/python/MarkupSafe/docs/Makefile поставляемый Normal file
Просмотреть файл

@ -0,0 +1,19 @@
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
SOURCEDIR = .
BUILDDIR = _build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)

4
third_party/python/MarkupSafe/docs/changes.rst поставляемый Normal file
Просмотреть файл

@ -0,0 +1,4 @@
Changes
=======
.. include:: ../CHANGES.rst

42
third_party/python/MarkupSafe/docs/conf.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,42 @@
from pallets_sphinx_themes import get_version
from pallets_sphinx_themes import ProjectLink
# Project --------------------------------------------------------------
project = "MarkupSafe"
copyright = "2010 Pallets Team"
author = "Pallets Team"
release, version = get_version("MarkupSafe")
# General --------------------------------------------------------------
master_doc = "index"
extensions = ["sphinx.ext.autodoc", "sphinx.ext.intersphinx", "pallets_sphinx_themes"]
intersphinx_mapping = {"python": ("https://docs.python.org/3/", None)}
# HTML -----------------------------------------------------------------
html_theme = "flask"
html_theme_options = {"index_sidebar_logo": False}
html_context = {
"project_links": [
ProjectLink("Donate to Pallets", "https://palletsprojects.com/donate"),
ProjectLink("Website", "https://palletsprojects.com/p/markupsafe/"),
ProjectLink("PyPI releases", "https://pypi.org/project/MarkupSafe/"),
ProjectLink("Source Code", "https://github.com/pallets/markupsafe/"),
ProjectLink("Issue Tracker", "https://github.com/pallets/markupsafe/issues/"),
]
}
html_sidebars = {
"index": ["project.html", "localtoc.html", "searchbox.html"],
"**": ["localtoc.html", "relations.html", "searchbox.html"],
}
singlehtml_sidebars = {"index": ["project.html", "localtoc.html"]}
html_title = "MarkupSafe Documentation ({})".format(version)
html_show_sourcelink = False
# LaTeX ----------------------------------------------------------------
latex_documents = [
(master_doc, "MarkupSafe-{}.tex".format(version), html_title, author, "manual")
]

21
third_party/python/MarkupSafe/docs/escaping.rst поставляемый Normal file
Просмотреть файл

@ -0,0 +1,21 @@
.. module:: markupsafe
Working With Safe Text
======================
.. autofunction:: escape
.. autoclass:: Markup
:members: escape, unescape, striptags
Optional Values
---------------
.. autofunction:: escape_silent
Convert an Object to a String
-----------------------------
.. autofunction:: soft_unicode

77
third_party/python/MarkupSafe/docs/formatting.rst поставляемый Normal file
Просмотреть файл

@ -0,0 +1,77 @@
.. currentmodule:: markupsafe
String Formatting
=================
The :class:`Markup` class can be used as a format string. Objects
formatted into a markup string will be escaped first.
Format Method
-------------
The ``format`` method extends the standard :meth:`str.format` behavior
to use an ``__html_format__`` method.
#. If an object has an ``__html_format__`` method, it is called as a
replacement for the ``__format__`` method. It is passed a format
specifier if it's given. The method must return a string or
:class:`Markup` instance.
#. If an object has an ``__html__`` method, it is called. If a format
specifier was passed and the class defined ``__html__`` but not
``__html_format__``, a ``ValueError`` is raised.
#. Otherwise Python's default format behavior is used and the result
is escaped.
For example, to implement a ``User`` that wraps its ``name`` in a
``span`` tag, and adds a link when using the ``'link'`` format
specifier:
.. code-block:: python
class User(object):
def __init__(self, id, name):
self.id = id
self.name = name
def __html_format__(self, format_spec):
if format_spec == 'link':
return Markup(
'<a href="/user/{}">{}</a>'
).format(self.id, self.__html__())
elif format_spec:
raise ValueError('Invalid format spec')
return self.__html__()
def __html__(self):
return Markup(
'<span class="user">{0}</span>'
).format(self.name)
.. code-block:: pycon
>>> user = User(3, '<script>')
>>> escape(user)
Markup('<span class="user">&lt;script&gt;</span>')
>>> Markup('<p>User: {user:link}').format(user=user)
Markup('<p>User: <a href="/user/3"><span class="user">&lt;script&gt;</span></a>
See Python's docs on :ref:`format string syntax <python:formatstrings>`.
printf-style Formatting
-----------------------
Besides escaping, there's no special behavior involved with percent
formatting.
.. code-block:: pycon
>>> user = User(3, '<script>')
>>> Markup('<a href="/user/%d">"%s</a>') % (user.id, user.name)
Markup('<a href="/user/3">&lt;script&gt;</a>')
See Python's docs on :ref:`printf-style formatting <python:old-string-formatting>`.

51
third_party/python/MarkupSafe/docs/html.rst поставляемый Normal file
Просмотреть файл

@ -0,0 +1,51 @@
.. currentmodule:: markupsafe
HTML Representations
====================
In many frameworks, if a class implements an ``__html__`` method it
will be used to get the object's representation in HTML. MarkupSafe's
:func:`escape` function and :class:`Markup` class understand and
implement this method. If an object has an ``__html__`` method it will
be called rather than converting the object to a string, and the result
will be assumed safe and not escaped.
For example, an ``Image`` class might automatically generate an
``<img>`` tag:
.. code-block:: python
class Image:
def __init__(self, url):
self.url = url
def __html__(self):
return '<img src="%s">' % self.url
.. code-block:: pycon
>>> img = Image('/static/logo.png')
>>> Markup(img)
Markup('<img src="/static/logo.png">')
Since this bypasses escaping, you need to be careful about using
user-provided data in the output. For example, a user's display name
should still be escaped:
.. code-block:: python
class User:
def __init__(self, id, name):
self.id = id
self.name = name
def __html__(self):
return '<a href="/user/{}">{}</a>'.format(
self.id, escape(self.name)
)
.. code-block:: pycon
>>> user = User(3, '<script>')
>>> escape(user)
Markup('<a href="/users/3">&lt;script&gt;</a>')

53
third_party/python/MarkupSafe/docs/index.rst поставляемый Normal file
Просмотреть файл

@ -0,0 +1,53 @@
.. currentmodule:: markupsafe
MarkupSafe
==========
MarkupSafe escapes characters so text is safe to use in HTML and XML.
Characters that have special meanings are replaced so that they display
as the actual characters. This mitigates injection attacks, meaning
untrusted user input can safely be displayed on a page.
The :func:`escape` function escapes text and returns a :class:`Markup`
object. The object won't be escaped anymore, but any text that is used
with it will be, ensuring that the result remains safe to use in HTML.
>>> from markupsafe import escape
>>> hello = escape('<em>Hello</em>')
>>> hello
Markup('&lt;em&gt;Hello&lt;/em&gt;')
>>> escape(hello)
Markup('&lt;em&gt;Hello&lt;/em&gt;')
>>> hello + ' <strong>World</strong>'
Markup('&lt;em&gt;Hello&lt;/em&gt; &lt;strong&gt;World&lt;/strong&gt;')
.. note::
The docs assume you're using Python 3. The terms "text" and "string"
refer to the :class:`str` class. In Python 2, this would be the
``unicode`` class instead.
Installing
----------
Install and update using `pip`_:
.. code-block:: text
pip install -U MarkupSafe
.. _pip: https://pip.pypa.io/en/stable/quickstart/
Table of Contents
-----------------
.. toctree::
:maxdepth: 2
escaping
html
formatting
license
changes

4
third_party/python/MarkupSafe/docs/license.rst поставляемый Normal file
Просмотреть файл

@ -0,0 +1,4 @@
License
=======
.. include:: ../LICENSE.rst

35
third_party/python/MarkupSafe/docs/make.bat поставляемый Normal file
Просмотреть файл

@ -0,0 +1,35 @@
@ECHO OFF
pushd %~dp0
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set SOURCEDIR=.
set BUILDDIR=_build
if "%1" == "" goto help
%SPHINXBUILD% >NUL 2>NUL
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.http://sphinx-doc.org/
exit /b 1
)
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
goto end
:help
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
:end
popd

2
third_party/python/MarkupSafe/docs/requirements.txt поставляемый Normal file
Просмотреть файл

@ -0,0 +1,2 @@
Sphinx~=1.8.0
Pallets-Sphinx-Themes~=1.1.0

27
third_party/python/MarkupSafe/setup.cfg поставляемый Normal file
Просмотреть файл

@ -0,0 +1,27 @@
[metadata]
license_file = LICENSE.rst
[tool:pytest]
testpaths = tests
[coverage:run]
branch = True
source =
markupsafe
[coverage:paths]
source =
src/markupsafe
.tox/*/lib/python*/site-packages/markupsafe
.tox/*/site-packages/markupsafe
[flake8]
select = B, E, F, W, B9
ignore = E203, E501, W503
max-line-length = 80
exclude = src/markupsafe/_compat.py
[egg_info]
tag_build =
tag_date = 0

125
third_party/python/MarkupSafe/setup.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,125 @@
from __future__ import print_function
import io
import re
import sys
from distutils.errors import CCompilerError
from distutils.errors import DistutilsExecError
from distutils.errors import DistutilsPlatformError
from setuptools import Extension
from setuptools import find_packages
from setuptools import setup
from setuptools.command.build_ext import build_ext
with io.open("README.rst", "rt", encoding="utf8") as f:
readme = f.read()
with io.open("src/markupsafe/__init__.py", "rt", encoding="utf8") as f:
version = re.search(r'__version__ = "(.*?)"', f.read()).group(1)
is_jython = "java" in sys.platform
is_pypy = hasattr(sys, "pypy_version_info")
ext_modules = [Extension("markupsafe._speedups", ["src/markupsafe/_speedups.c"])]
class BuildFailed(Exception):
pass
class ve_build_ext(build_ext):
"""This class allows C extension building to fail."""
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError:
raise BuildFailed()
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except (CCompilerError, DistutilsExecError, DistutilsPlatformError):
raise BuildFailed()
except ValueError:
# this can happen on Windows 64 bit, see Python issue 7511
if "'path'" in str(sys.exc_info()[1]): # works with Python 2 and 3
raise BuildFailed()
raise
def run_setup(with_binary):
setup(
name="MarkupSafe",
version=version,
url="https://palletsprojects.com/p/markupsafe/",
project_urls={
"Documentation": "https://markupsafe.palletsprojects.com/",
"Code": "https://github.com/pallets/markupsafe",
"Issue tracker": "https://github.com/pallets/markupsafe/issues",
},
license="BSD-3-Clause",
author="Armin Ronacher",
author_email="armin.ronacher@active-4.com",
maintainer="The Pallets Team",
maintainer_email="contact@palletsprojects.com",
description="Safely add untrusted strings to HTML/XML markup.",
long_description=readme,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Text Processing :: Markup :: HTML",
],
packages=find_packages("src"),
package_dir={"": "src"},
include_package_data=True,
python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
cmdclass={"build_ext": ve_build_ext},
ext_modules=ext_modules if with_binary else [],
)
def show_message(*lines):
print("=" * 74)
for line in lines:
print(line)
print("=" * 74)
if not (is_pypy or is_jython):
try:
run_setup(True)
except BuildFailed:
show_message(
"WARNING: The C extension could not be compiled, speedups"
" are not enabled.",
"Failure information, if any, is above.",
"Retrying the build without the C extension now.",
)
run_setup(False)
show_message(
"WARNING: The C extension could not be compiled, speedups"
" are not enabled.",
"Plain-Python build succeeded.",
)
else:
run_setup(False)
show_message(
"WARNING: C extensions are not supported on this Python"
" platform, speedups are not enabled.",
"Plain-Python build succeeded.",
)

327
third_party/python/MarkupSafe/src/markupsafe/__init__.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,327 @@
# -*- coding: utf-8 -*-
"""
markupsafe
~~~~~~~~~~
Implements an escape function and a Markup string to replace HTML
special characters with safe representations.
:copyright: 2010 Pallets
:license: BSD-3-Clause
"""
import re
import string
from ._compat import int_types
from ._compat import iteritems
from ._compat import Mapping
from ._compat import PY2
from ._compat import string_types
from ._compat import text_type
from ._compat import unichr
__version__ = "1.1.1"
__all__ = ["Markup", "soft_unicode", "escape", "escape_silent"]
_striptags_re = re.compile(r"(<!--.*?-->|<[^>]*>)")
_entity_re = re.compile(r"&([^& ;]+);")
class Markup(text_type):
"""A string that is ready to be safely inserted into an HTML or XML
document, either because it was escaped or because it was marked
safe.
Passing an object to the constructor converts it to text and wraps
it to mark it safe without escaping. To escape the text, use the
:meth:`escape` class method instead.
>>> Markup('Hello, <em>World</em>!')
Markup('Hello, <em>World</em>!')
>>> Markup(42)
Markup('42')
>>> Markup.escape('Hello, <em>World</em>!')
Markup('Hello &lt;em&gt;World&lt;/em&gt;!')
This implements the ``__html__()`` interface that some frameworks
use. Passing an object that implements ``__html__()`` will wrap the
output of that method, marking it safe.
>>> class Foo:
... def __html__(self):
... return '<a href="/foo">foo</a>'
...
>>> Markup(Foo())
Markup('<a href="/foo">foo</a>')
This is a subclass of the text type (``str`` in Python 3,
``unicode`` in Python 2). It has the same methods as that type, but
all methods escape their arguments and return a ``Markup`` instance.
>>> Markup('<em>%s</em>') % 'foo & bar'
Markup('<em>foo &amp; bar</em>')
>>> Markup('<em>Hello</em> ') + '<foo>'
Markup('<em>Hello</em> &lt;foo&gt;')
"""
__slots__ = ()
def __new__(cls, base=u"", encoding=None, errors="strict"):
if hasattr(base, "__html__"):
base = base.__html__()
if encoding is None:
return text_type.__new__(cls, base)
return text_type.__new__(cls, base, encoding, errors)
def __html__(self):
return self
def __add__(self, other):
if isinstance(other, string_types) or hasattr(other, "__html__"):
return self.__class__(super(Markup, self).__add__(self.escape(other)))
return NotImplemented
def __radd__(self, other):
if hasattr(other, "__html__") or isinstance(other, string_types):
return self.escape(other).__add__(self)
return NotImplemented
def __mul__(self, num):
if isinstance(num, int_types):
return self.__class__(text_type.__mul__(self, num))
return NotImplemented
__rmul__ = __mul__
def __mod__(self, arg):
if isinstance(arg, tuple):
arg = tuple(_MarkupEscapeHelper(x, self.escape) for x in arg)
else:
arg = _MarkupEscapeHelper(arg, self.escape)
return self.__class__(text_type.__mod__(self, arg))
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, text_type.__repr__(self))
def join(self, seq):
return self.__class__(text_type.join(self, map(self.escape, seq)))
join.__doc__ = text_type.join.__doc__
def split(self, *args, **kwargs):
return list(map(self.__class__, text_type.split(self, *args, **kwargs)))
split.__doc__ = text_type.split.__doc__
def rsplit(self, *args, **kwargs):
return list(map(self.__class__, text_type.rsplit(self, *args, **kwargs)))
rsplit.__doc__ = text_type.rsplit.__doc__
def splitlines(self, *args, **kwargs):
return list(map(self.__class__, text_type.splitlines(self, *args, **kwargs)))
splitlines.__doc__ = text_type.splitlines.__doc__
def unescape(self):
"""Convert escaped markup back into a text string. This replaces
HTML entities with the characters they represent.
>>> Markup('Main &raquo; <em>About</em>').unescape()
'Main » <em>About</em>'
"""
from ._constants import HTML_ENTITIES
def handle_match(m):
name = m.group(1)
if name in HTML_ENTITIES:
return unichr(HTML_ENTITIES[name])
try:
if name[:2] in ("#x", "#X"):
return unichr(int(name[2:], 16))
elif name.startswith("#"):
return unichr(int(name[1:]))
except ValueError:
pass
# Don't modify unexpected input.
return m.group()
return _entity_re.sub(handle_match, text_type(self))
def striptags(self):
""":meth:`unescape` the markup, remove tags, and normalize
whitespace to single spaces.
>>> Markup('Main &raquo;\t<em>About</em>').striptags()
'Main » About'
"""
stripped = u" ".join(_striptags_re.sub("", self).split())
return Markup(stripped).unescape()
@classmethod
def escape(cls, s):
"""Escape a string. Calls :func:`escape` and ensures that for
subclasses the correct type is returned.
"""
rv = escape(s)
if rv.__class__ is not cls:
return cls(rv)
return rv
def make_simple_escaping_wrapper(name): # noqa: B902
orig = getattr(text_type, name)
def func(self, *args, **kwargs):
args = _escape_argspec(list(args), enumerate(args), self.escape)
_escape_argspec(kwargs, iteritems(kwargs), self.escape)
return self.__class__(orig(self, *args, **kwargs))
func.__name__ = orig.__name__
func.__doc__ = orig.__doc__
return func
for method in (
"__getitem__",
"capitalize",
"title",
"lower",
"upper",
"replace",
"ljust",
"rjust",
"lstrip",
"rstrip",
"center",
"strip",
"translate",
"expandtabs",
"swapcase",
"zfill",
):
locals()[method] = make_simple_escaping_wrapper(method)
def partition(self, sep):
return tuple(map(self.__class__, text_type.partition(self, self.escape(sep))))
def rpartition(self, sep):
return tuple(map(self.__class__, text_type.rpartition(self, self.escape(sep))))
def format(self, *args, **kwargs):
formatter = EscapeFormatter(self.escape)
kwargs = _MagicFormatMapping(args, kwargs)
return self.__class__(formatter.vformat(self, args, kwargs))
def __html_format__(self, format_spec):
if format_spec:
raise ValueError("Unsupported format specification " "for Markup.")
return self
# not in python 3
if hasattr(text_type, "__getslice__"):
__getslice__ = make_simple_escaping_wrapper("__getslice__")
del method, make_simple_escaping_wrapper
class _MagicFormatMapping(Mapping):
"""This class implements a dummy wrapper to fix a bug in the Python
standard library for string formatting.
See http://bugs.python.org/issue13598 for information about why
this is necessary.
"""
def __init__(self, args, kwargs):
self._args = args
self._kwargs = kwargs
self._last_index = 0
def __getitem__(self, key):
if key == "":
idx = self._last_index
self._last_index += 1
try:
return self._args[idx]
except LookupError:
pass
key = str(idx)
return self._kwargs[key]
def __iter__(self):
return iter(self._kwargs)
def __len__(self):
return len(self._kwargs)
if hasattr(text_type, "format"):
class EscapeFormatter(string.Formatter):
def __init__(self, escape):
self.escape = escape
def format_field(self, value, format_spec):
if hasattr(value, "__html_format__"):
rv = value.__html_format__(format_spec)
elif hasattr(value, "__html__"):
if format_spec:
raise ValueError(
"Format specifier {0} given, but {1} does not"
" define __html_format__. A class that defines"
" __html__ must define __html_format__ to work"
" with format specifiers.".format(format_spec, type(value))
)
rv = value.__html__()
else:
# We need to make sure the format spec is unicode here as
# otherwise the wrong callback methods are invoked. For
# instance a byte string there would invoke __str__ and
# not __unicode__.
rv = string.Formatter.format_field(self, value, text_type(format_spec))
return text_type(self.escape(rv))
def _escape_argspec(obj, iterable, escape):
"""Helper for various string-wrapped functions."""
for key, value in iterable:
if hasattr(value, "__html__") or isinstance(value, string_types):
obj[key] = escape(value)
return obj
class _MarkupEscapeHelper(object):
"""Helper for Markup.__mod__"""
def __init__(self, obj, escape):
self.obj = obj
self.escape = escape
def __getitem__(self, item):
return _MarkupEscapeHelper(self.obj[item], self.escape)
def __str__(self):
return text_type(self.escape(self.obj))
__unicode__ = __str__
def __repr__(self):
return str(self.escape(repr(self.obj)))
def __int__(self):
return int(self.obj)
def __float__(self):
return float(self.obj)
# we have to import it down here as the speedups and native
# modules imports the markup type which is define above.
try:
from ._speedups import escape, escape_silent, soft_unicode
except ImportError:
from ._native import escape, escape_silent, soft_unicode
if not PY2:
soft_str = soft_unicode
__all__.append("soft_str")

33
third_party/python/MarkupSafe/src/markupsafe/_compat.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,33 @@
# -*- coding: utf-8 -*-
"""
markupsafe._compat
~~~~~~~~~~~~~~~~~~
:copyright: 2010 Pallets
:license: BSD-3-Clause
"""
import sys
PY2 = sys.version_info[0] == 2
if not PY2:
text_type = str
string_types = (str,)
unichr = chr
int_types = (int,)
def iteritems(x):
return iter(x.items())
from collections.abc import Mapping
else:
text_type = unicode
string_types = (str, unicode)
unichr = unichr
int_types = (int, long)
def iteritems(x):
return x.iteritems()
from collections import Mapping

264
third_party/python/MarkupSafe/src/markupsafe/_constants.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,264 @@
# -*- coding: utf-8 -*-
"""
markupsafe._constants
~~~~~~~~~~~~~~~~~~~~~
:copyright: 2010 Pallets
:license: BSD-3-Clause
"""
HTML_ENTITIES = {
"AElig": 198,
"Aacute": 193,
"Acirc": 194,
"Agrave": 192,
"Alpha": 913,
"Aring": 197,
"Atilde": 195,
"Auml": 196,
"Beta": 914,
"Ccedil": 199,
"Chi": 935,
"Dagger": 8225,
"Delta": 916,
"ETH": 208,
"Eacute": 201,
"Ecirc": 202,
"Egrave": 200,
"Epsilon": 917,
"Eta": 919,
"Euml": 203,
"Gamma": 915,
"Iacute": 205,
"Icirc": 206,
"Igrave": 204,
"Iota": 921,
"Iuml": 207,
"Kappa": 922,
"Lambda": 923,
"Mu": 924,
"Ntilde": 209,
"Nu": 925,
"OElig": 338,
"Oacute": 211,
"Ocirc": 212,
"Ograve": 210,
"Omega": 937,
"Omicron": 927,
"Oslash": 216,
"Otilde": 213,
"Ouml": 214,
"Phi": 934,
"Pi": 928,
"Prime": 8243,
"Psi": 936,
"Rho": 929,
"Scaron": 352,
"Sigma": 931,
"THORN": 222,
"Tau": 932,
"Theta": 920,
"Uacute": 218,
"Ucirc": 219,
"Ugrave": 217,
"Upsilon": 933,
"Uuml": 220,
"Xi": 926,
"Yacute": 221,
"Yuml": 376,
"Zeta": 918,
"aacute": 225,
"acirc": 226,
"acute": 180,
"aelig": 230,
"agrave": 224,
"alefsym": 8501,
"alpha": 945,
"amp": 38,
"and": 8743,
"ang": 8736,
"apos": 39,
"aring": 229,
"asymp": 8776,
"atilde": 227,
"auml": 228,
"bdquo": 8222,
"beta": 946,
"brvbar": 166,
"bull": 8226,
"cap": 8745,
"ccedil": 231,
"cedil": 184,
"cent": 162,
"chi": 967,
"circ": 710,
"clubs": 9827,
"cong": 8773,
"copy": 169,
"crarr": 8629,
"cup": 8746,
"curren": 164,
"dArr": 8659,
"dagger": 8224,
"darr": 8595,
"deg": 176,
"delta": 948,
"diams": 9830,
"divide": 247,
"eacute": 233,
"ecirc": 234,
"egrave": 232,
"empty": 8709,
"emsp": 8195,
"ensp": 8194,
"epsilon": 949,
"equiv": 8801,
"eta": 951,
"eth": 240,
"euml": 235,
"euro": 8364,
"exist": 8707,
"fnof": 402,
"forall": 8704,
"frac12": 189,
"frac14": 188,
"frac34": 190,
"frasl": 8260,
"gamma": 947,
"ge": 8805,
"gt": 62,
"hArr": 8660,
"harr": 8596,
"hearts": 9829,
"hellip": 8230,
"iacute": 237,
"icirc": 238,
"iexcl": 161,
"igrave": 236,
"image": 8465,
"infin": 8734,
"int": 8747,
"iota": 953,
"iquest": 191,
"isin": 8712,
"iuml": 239,
"kappa": 954,
"lArr": 8656,
"lambda": 955,
"lang": 9001,
"laquo": 171,
"larr": 8592,
"lceil": 8968,
"ldquo": 8220,
"le": 8804,
"lfloor": 8970,
"lowast": 8727,
"loz": 9674,
"lrm": 8206,
"lsaquo": 8249,
"lsquo": 8216,
"lt": 60,
"macr": 175,
"mdash": 8212,
"micro": 181,
"middot": 183,
"minus": 8722,
"mu": 956,
"nabla": 8711,
"nbsp": 160,
"ndash": 8211,
"ne": 8800,
"ni": 8715,
"not": 172,
"notin": 8713,
"nsub": 8836,
"ntilde": 241,
"nu": 957,
"oacute": 243,
"ocirc": 244,
"oelig": 339,
"ograve": 242,
"oline": 8254,
"omega": 969,
"omicron": 959,
"oplus": 8853,
"or": 8744,
"ordf": 170,
"ordm": 186,
"oslash": 248,
"otilde": 245,
"otimes": 8855,
"ouml": 246,
"para": 182,
"part": 8706,
"permil": 8240,
"perp": 8869,
"phi": 966,
"pi": 960,
"piv": 982,
"plusmn": 177,
"pound": 163,
"prime": 8242,
"prod": 8719,
"prop": 8733,
"psi": 968,
"quot": 34,
"rArr": 8658,
"radic": 8730,
"rang": 9002,
"raquo": 187,
"rarr": 8594,
"rceil": 8969,
"rdquo": 8221,
"real": 8476,
"reg": 174,
"rfloor": 8971,
"rho": 961,
"rlm": 8207,
"rsaquo": 8250,
"rsquo": 8217,
"sbquo": 8218,
"scaron": 353,
"sdot": 8901,
"sect": 167,
"shy": 173,
"sigma": 963,
"sigmaf": 962,
"sim": 8764,
"spades": 9824,
"sub": 8834,
"sube": 8838,
"sum": 8721,
"sup": 8835,
"sup1": 185,
"sup2": 178,
"sup3": 179,
"supe": 8839,
"szlig": 223,
"tau": 964,
"there4": 8756,
"theta": 952,
"thetasym": 977,
"thinsp": 8201,
"thorn": 254,
"tilde": 732,
"times": 215,
"trade": 8482,
"uArr": 8657,
"uacute": 250,
"uarr": 8593,
"ucirc": 251,
"ugrave": 249,
"uml": 168,
"upsih": 978,
"upsilon": 965,
"uuml": 252,
"weierp": 8472,
"xi": 958,
"yacute": 253,
"yen": 165,
"yuml": 255,
"zeta": 950,
"zwj": 8205,
"zwnj": 8204,
}

69
third_party/python/MarkupSafe/src/markupsafe/_native.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,69 @@
# -*- coding: utf-8 -*-
"""
markupsafe._native
~~~~~~~~~~~~~~~~~~
Native Python implementation used when the C module is not compiled.
:copyright: 2010 Pallets
:license: BSD-3-Clause
"""
from . import Markup
from ._compat import text_type
def escape(s):
"""Replace the characters ``&``, ``<``, ``>``, ``'``, and ``"`` in
the string with HTML-safe sequences. Use this if you need to display
text that might contain such characters in HTML.
If the object has an ``__html__`` method, it is called and the
return value is assumed to already be safe for HTML.
:param s: An object to be converted to a string and escaped.
:return: A :class:`Markup` string with the escaped text.
"""
if hasattr(s, "__html__"):
return Markup(s.__html__())
return Markup(
text_type(s)
.replace("&", "&amp;")
.replace(">", "&gt;")
.replace("<", "&lt;")
.replace("'", "&#39;")
.replace('"', "&#34;")
)
def escape_silent(s):
"""Like :func:`escape` but treats ``None`` as the empty string.
Useful with optional values, as otherwise you get the string
``'None'`` when the value is ``None``.
>>> escape(None)
Markup('None')
>>> escape_silent(None)
Markup('')
"""
if s is None:
return Markup()
return escape(s)
def soft_unicode(s):
"""Convert an object to a string if it isn't already. This preserves
a :class:`Markup` string rather than converting it back to a basic
string, so it will still be marked as safe and won't be escaped
again.
>>> value = escape('<User 1>')
>>> value
Markup('&lt;User 1&gt;')
>>> escape(str(value))
Markup('&amp;lt;User 1&amp;gt;')
>>> escape(soft_unicode(value))
Markup('&lt;User 1&gt;')
"""
if not isinstance(s, text_type):
s = text_type(s)
return s

423
third_party/python/MarkupSafe/src/markupsafe/_speedups.c поставляемый Normal file
Просмотреть файл

@ -0,0 +1,423 @@
/**
* markupsafe._speedups
* ~~~~~~~~~~~~~~~~~~~~
*
* C implementation of escaping for better performance. Used instead of
* the native Python implementation when compiled.
*
* :copyright: 2010 Pallets
* :license: BSD-3-Clause
*/
#include <Python.h>
#if PY_MAJOR_VERSION < 3
#define ESCAPED_CHARS_TABLE_SIZE 63
#define UNICHR(x) (PyUnicode_AS_UNICODE((PyUnicodeObject*)PyUnicode_DecodeASCII(x, strlen(x), NULL)));
static Py_ssize_t escaped_chars_delta_len[ESCAPED_CHARS_TABLE_SIZE];
static Py_UNICODE *escaped_chars_repl[ESCAPED_CHARS_TABLE_SIZE];
#endif
static PyObject* markup;
static int
init_constants(void)
{
PyObject *module;
#if PY_MAJOR_VERSION < 3
/* mapping of characters to replace */
escaped_chars_repl['"'] = UNICHR("&#34;");
escaped_chars_repl['\''] = UNICHR("&#39;");
escaped_chars_repl['&'] = UNICHR("&amp;");
escaped_chars_repl['<'] = UNICHR("&lt;");
escaped_chars_repl['>'] = UNICHR("&gt;");
/* lengths of those characters when replaced - 1 */
memset(escaped_chars_delta_len, 0, sizeof (escaped_chars_delta_len));
escaped_chars_delta_len['"'] = escaped_chars_delta_len['\''] = \
escaped_chars_delta_len['&'] = 4;
escaped_chars_delta_len['<'] = escaped_chars_delta_len['>'] = 3;
#endif
/* import markup type so that we can mark the return value */
module = PyImport_ImportModule("markupsafe");
if (!module)
return 0;
markup = PyObject_GetAttrString(module, "Markup");
Py_DECREF(module);
return 1;
}
#if PY_MAJOR_VERSION < 3
static PyObject*
escape_unicode(PyUnicodeObject *in)
{
PyUnicodeObject *out;
Py_UNICODE *inp = PyUnicode_AS_UNICODE(in);
const Py_UNICODE *inp_end = PyUnicode_AS_UNICODE(in) + PyUnicode_GET_SIZE(in);
Py_UNICODE *next_escp;
Py_UNICODE *outp;
Py_ssize_t delta=0, erepl=0, delta_len=0;
/* First we need to figure out how long the escaped string will be */
while (*(inp) || inp < inp_end) {
if (*inp < ESCAPED_CHARS_TABLE_SIZE) {
delta += escaped_chars_delta_len[*inp];
erepl += !!escaped_chars_delta_len[*inp];
}
++inp;
}
/* Do we need to escape anything at all? */
if (!erepl) {
Py_INCREF(in);
return (PyObject*)in;
}
out = (PyUnicodeObject*)PyUnicode_FromUnicode(NULL, PyUnicode_GET_SIZE(in) + delta);
if (!out)
return NULL;
outp = PyUnicode_AS_UNICODE(out);
inp = PyUnicode_AS_UNICODE(in);
while (erepl-- > 0) {
/* look for the next substitution */
next_escp = inp;
while (next_escp < inp_end) {
if (*next_escp < ESCAPED_CHARS_TABLE_SIZE &&
(delta_len = escaped_chars_delta_len[*next_escp])) {
++delta_len;
break;
}
++next_escp;
}
if (next_escp > inp) {
/* copy unescaped chars between inp and next_escp */
Py_UNICODE_COPY(outp, inp, next_escp-inp);
outp += next_escp - inp;
}
/* escape 'next_escp' */
Py_UNICODE_COPY(outp, escaped_chars_repl[*next_escp], delta_len);
outp += delta_len;
inp = next_escp + 1;
}
if (inp < inp_end)
Py_UNICODE_COPY(outp, inp, PyUnicode_GET_SIZE(in) - (inp - PyUnicode_AS_UNICODE(in)));
return (PyObject*)out;
}
#else /* PY_MAJOR_VERSION < 3 */
#define GET_DELTA(inp, inp_end, delta) \
while (inp < inp_end) { \
switch (*inp++) { \
case '"': \
case '\'': \
case '&': \
delta += 4; \
break; \
case '<': \
case '>': \
delta += 3; \
break; \
} \
}
#define DO_ESCAPE(inp, inp_end, outp) \
{ \
Py_ssize_t ncopy = 0; \
while (inp < inp_end) { \
switch (*inp) { \
case '"': \
memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
outp += ncopy; ncopy = 0; \
*outp++ = '&'; \
*outp++ = '#'; \
*outp++ = '3'; \
*outp++ = '4'; \
*outp++ = ';'; \
break; \
case '\'': \
memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
outp += ncopy; ncopy = 0; \
*outp++ = '&'; \
*outp++ = '#'; \
*outp++ = '3'; \
*outp++ = '9'; \
*outp++ = ';'; \
break; \
case '&': \
memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
outp += ncopy; ncopy = 0; \
*outp++ = '&'; \
*outp++ = 'a'; \
*outp++ = 'm'; \
*outp++ = 'p'; \
*outp++ = ';'; \
break; \
case '<': \
memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
outp += ncopy; ncopy = 0; \
*outp++ = '&'; \
*outp++ = 'l'; \
*outp++ = 't'; \
*outp++ = ';'; \
break; \
case '>': \
memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
outp += ncopy; ncopy = 0; \
*outp++ = '&'; \
*outp++ = 'g'; \
*outp++ = 't'; \
*outp++ = ';'; \
break; \
default: \
ncopy++; \
} \
inp++; \
} \
memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
}
static PyObject*
escape_unicode_kind1(PyUnicodeObject *in)
{
Py_UCS1 *inp = PyUnicode_1BYTE_DATA(in);
Py_UCS1 *inp_end = inp + PyUnicode_GET_LENGTH(in);
Py_UCS1 *outp;
PyObject *out;
Py_ssize_t delta = 0;
GET_DELTA(inp, inp_end, delta);
if (!delta) {
Py_INCREF(in);
return (PyObject*)in;
}
out = PyUnicode_New(PyUnicode_GET_LENGTH(in) + delta,
PyUnicode_IS_ASCII(in) ? 127 : 255);
if (!out)
return NULL;
inp = PyUnicode_1BYTE_DATA(in);
outp = PyUnicode_1BYTE_DATA(out);
DO_ESCAPE(inp, inp_end, outp);
return out;
}
static PyObject*
escape_unicode_kind2(PyUnicodeObject *in)
{
Py_UCS2 *inp = PyUnicode_2BYTE_DATA(in);
Py_UCS2 *inp_end = inp + PyUnicode_GET_LENGTH(in);
Py_UCS2 *outp;
PyObject *out;
Py_ssize_t delta = 0;
GET_DELTA(inp, inp_end, delta);
if (!delta) {
Py_INCREF(in);
return (PyObject*)in;
}
out = PyUnicode_New(PyUnicode_GET_LENGTH(in) + delta, 65535);
if (!out)
return NULL;
inp = PyUnicode_2BYTE_DATA(in);
outp = PyUnicode_2BYTE_DATA(out);
DO_ESCAPE(inp, inp_end, outp);
return out;
}
static PyObject*
escape_unicode_kind4(PyUnicodeObject *in)
{
Py_UCS4 *inp = PyUnicode_4BYTE_DATA(in);
Py_UCS4 *inp_end = inp + PyUnicode_GET_LENGTH(in);
Py_UCS4 *outp;
PyObject *out;
Py_ssize_t delta = 0;
GET_DELTA(inp, inp_end, delta);
if (!delta) {
Py_INCREF(in);
return (PyObject*)in;
}
out = PyUnicode_New(PyUnicode_GET_LENGTH(in) + delta, 1114111);
if (!out)
return NULL;
inp = PyUnicode_4BYTE_DATA(in);
outp = PyUnicode_4BYTE_DATA(out);
DO_ESCAPE(inp, inp_end, outp);
return out;
}
static PyObject*
escape_unicode(PyUnicodeObject *in)
{
if (PyUnicode_READY(in))
return NULL;
switch (PyUnicode_KIND(in)) {
case PyUnicode_1BYTE_KIND:
return escape_unicode_kind1(in);
case PyUnicode_2BYTE_KIND:
return escape_unicode_kind2(in);
case PyUnicode_4BYTE_KIND:
return escape_unicode_kind4(in);
}
assert(0); /* shouldn't happen */
return NULL;
}
#endif /* PY_MAJOR_VERSION < 3 */
static PyObject*
escape(PyObject *self, PyObject *text)
{
static PyObject *id_html;
PyObject *s = NULL, *rv = NULL, *html;
if (id_html == NULL) {
#if PY_MAJOR_VERSION < 3
id_html = PyString_InternFromString("__html__");
#else
id_html = PyUnicode_InternFromString("__html__");
#endif
if (id_html == NULL) {
return NULL;
}
}
/* we don't have to escape integers, bools or floats */
if (PyLong_CheckExact(text) ||
#if PY_MAJOR_VERSION < 3
PyInt_CheckExact(text) ||
#endif
PyFloat_CheckExact(text) || PyBool_Check(text) ||
text == Py_None)
return PyObject_CallFunctionObjArgs(markup, text, NULL);
/* if the object has an __html__ method that performs the escaping */
html = PyObject_GetAttr(text ,id_html);
if (html) {
s = PyObject_CallObject(html, NULL);
Py_DECREF(html);
if (s == NULL) {
return NULL;
}
/* Convert to Markup object */
rv = PyObject_CallFunctionObjArgs(markup, (PyObject*)s, NULL);
Py_DECREF(s);
return rv;
}
/* otherwise make the object unicode if it isn't, then escape */
PyErr_Clear();
if (!PyUnicode_Check(text)) {
#if PY_MAJOR_VERSION < 3
PyObject *unicode = PyObject_Unicode(text);
#else
PyObject *unicode = PyObject_Str(text);
#endif
if (!unicode)
return NULL;
s = escape_unicode((PyUnicodeObject*)unicode);
Py_DECREF(unicode);
}
else
s = escape_unicode((PyUnicodeObject*)text);
/* convert the unicode string into a markup object. */
rv = PyObject_CallFunctionObjArgs(markup, (PyObject*)s, NULL);
Py_DECREF(s);
return rv;
}
static PyObject*
escape_silent(PyObject *self, PyObject *text)
{
if (text != Py_None)
return escape(self, text);
return PyObject_CallFunctionObjArgs(markup, NULL);
}
static PyObject*
soft_unicode(PyObject *self, PyObject *s)
{
if (!PyUnicode_Check(s))
#if PY_MAJOR_VERSION < 3
return PyObject_Unicode(s);
#else
return PyObject_Str(s);
#endif
Py_INCREF(s);
return s;
}
static PyMethodDef module_methods[] = {
{"escape", (PyCFunction)escape, METH_O,
"escape(s) -> markup\n\n"
"Convert the characters &, <, >, ', and \" in string s to HTML-safe\n"
"sequences. Use this if you need to display text that might contain\n"
"such characters in HTML. Marks return value as markup string."},
{"escape_silent", (PyCFunction)escape_silent, METH_O,
"escape_silent(s) -> markup\n\n"
"Like escape but converts None to an empty string."},
{"soft_unicode", (PyCFunction)soft_unicode, METH_O,
"soft_unicode(object) -> string\n\n"
"Make a string unicode if it isn't already. That way a markup\n"
"string is not converted back to unicode."},
{NULL, NULL, 0, NULL} /* Sentinel */
};
#if PY_MAJOR_VERSION < 3
#ifndef PyMODINIT_FUNC /* declarations for DLL import/export */
#define PyMODINIT_FUNC void
#endif
PyMODINIT_FUNC
init_speedups(void)
{
if (!init_constants())
return;
Py_InitModule3("markupsafe._speedups", module_methods, "");
}
#else /* Python 3.x module initialization */
static struct PyModuleDef module_definition = {
PyModuleDef_HEAD_INIT,
"markupsafe._speedups",
NULL,
-1,
module_methods,
NULL,
NULL,
NULL,
NULL
};
PyMODINIT_FUNC
PyInit__speedups(void)
{
if (!init_constants())
return NULL;
return PyModule_Create(&module_definition);
}
#endif

44
third_party/python/MarkupSafe/tox.ini поставляемый Normal file
Просмотреть файл

@ -0,0 +1,44 @@
[tox]
envlist =
py{37,36,35,34,27,py3,py3,py}
stylecheck
docs-html
coverage-report
skip_missing_interpreters = true
[testenv]
setenv =
COVERAGE_FILE = .coverage.{envname}
deps =
pytest-cov
commands = pytest --tb=short --cov --cov-report= {posargs}
[testenv:stylecheck]
deps = pre-commit
skip_install = true
commands = pre-commit run --all-files --show-diff-on-failure
[testenv:docs-html]
deps = -r docs/requirements.txt
commands = sphinx-build -W -b html -d {envtmpdir}/doctrees docs {envtmpdir}/html
[testenv:coverage-report]
setenv =
COVERAGE_FILE = .coverage
deps = coverage
skip_install = true
commands =
coverage combine
coverage html
coverage report
[testenv:codecov]
passenv = CI TRAVIS TRAVIS_*
setenv =
COVERAGE_FILE = .coverage
deps = codecov
skip_install = true
commands =
coverage combine
codecov
coverage report

608
third_party/python/appdirs/appdirs.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,608 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2005-2010 ActiveState Software Inc.
# Copyright (c) 2013 Eddy Petrișor
"""Utilities for determining application-specific dirs.
See <http://github.com/ActiveState/appdirs> for details and usage.
"""
# Dev Notes:
# - MSDN on where to store app data files:
# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
__version_info__ = (1, 4, 3)
__version__ = '.'.join(map(str, __version_info__))
import sys
import os
PY3 = sys.version_info[0] == 3
if PY3:
unicode = str
if sys.platform.startswith('java'):
import platform
os_name = platform.java_ver()[3][0]
if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
system = 'win32'
elif os_name.startswith('Mac'): # "Mac OS X", etc.
system = 'darwin'
else: # "Linux", "SunOS", "FreeBSD", etc.
# Setting this to "linux2" is not ideal, but only Windows or Mac
# are actually checked for and the rest of the module expects
# *sys.platform* style strings.
system = 'linux2'
else:
system = sys.platform
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: ~/Library/Application Support/<AppName>
Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by default "~/.local/share/<AppName>".
"""
if system == "win32":
if appauthor is None:
appauthor = appname
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
path = os.path.normpath(_get_win_folder(const))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('~/Library/Application Support/')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
r"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of data dirs should be
returned. By default, the first item from XDG_DATA_DIRS is
returned, or '/usr/local/share/<AppName>',
if XDG_DATA_DIRS is not set
Typical site data directories are:
Mac OS X: /Library/Application Support/<AppName>
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
For Unix, this is using the $XDG_DATA_DIRS[0] default.
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('/Library/Application Support')
if appname:
path = os.path.join(path, appname)
else:
# XDG default for $XDG_DATA_DIRS
# only first, if multipath is False
path = os.getenv('XDG_DATA_DIRS',
os.pathsep.join(['/usr/local/share', '/usr/share']))
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
if appname and version:
path = os.path.join(path, version)
return path
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific config dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user config directories are:
Mac OS X: same as user_data_dir
Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
That means, by default "~/.config/<AppName>".
"""
if system in ["win32", "darwin"]:
path = user_data_dir(appname, appauthor, None, roaming)
else:
path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
r"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of config dirs should be
returned. By default, the first item from XDG_CONFIG_DIRS is
returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
Typical site config directories are:
Mac OS X: same as site_data_dir
Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
$XDG_CONFIG_DIRS
Win *: same as site_data_dir
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system in ["win32", "darwin"]:
path = site_data_dir(appname, appauthor)
if appname and version:
path = os.path.join(path, version)
else:
# XDG default for $XDG_CONFIG_DIRS
# only first, if multipath is False
path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Cache" to the base app data dir for Windows. See
discussion below.
Typical user cache directories are:
Mac OS X: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go in
the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
app data dir (the default returned by `user_data_dir` above). Apps typically
put cache data somewhere *under* the given dir here. Some examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
This can be disabled with the `opinion=False` option.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
if opinion:
path = os.path.join(path, "Cache")
elif system == 'darwin':
path = os.path.expanduser('~/Library/Caches')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific state dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user state directories are:
Mac OS X: same as user_data_dir
Unix: ~/.local/state/<AppName> # or in $XDG_STATE_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>
to extend the XDG spec and support $XDG_STATE_HOME.
That means, by default "~/.local/state/<AppName>".
"""
if system in ["win32", "darwin"]:
path = user_data_dir(appname, appauthor, None, roaming)
else:
path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific log dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Logs" to the base app data dir for Windows, and "log" to the
base cache dir for Unix. See discussion below.
Typical user log directories are:
Mac OS X: ~/Library/Logs/<AppName>
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
On Windows the only suggestion in the MSDN docs is that local settings
go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
examples of what some windows apps use for a logs dir.)
OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
value for Windows and appends "log" to the user cache dir for Unix.
This can be disabled with the `opinion=False` option.
"""
if system == "darwin":
path = os.path.join(
os.path.expanduser('~/Library/Logs'),
appname)
elif system == "win32":
path = user_data_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "Logs")
else:
path = user_cache_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "log")
if appname and version:
path = os.path.join(path, version)
return path
class AppDirs(object):
"""Convenience wrapper for getting application dirs."""
def __init__(self, appname=None, appauthor=None, version=None,
roaming=False, multipath=False):
self.appname = appname
self.appauthor = appauthor
self.version = version
self.roaming = roaming
self.multipath = multipath
@property
def user_data_dir(self):
return user_data_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_data_dir(self):
return site_data_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_config_dir(self):
return user_config_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_config_dir(self):
return site_config_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_cache_dir(self):
return user_cache_dir(self.appname, self.appauthor,
version=self.version)
@property
def user_state_dir(self):
return user_state_dir(self.appname, self.appauthor,
version=self.version)
@property
def user_log_dir(self):
return user_log_dir(self.appname, self.appauthor,
version=self.version)
#---- internal support stuff
def _get_win_folder_from_registry(csidl_name):
"""This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
if PY3:
import winreg as _winreg
else:
import _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
return dir
def _get_win_folder_with_pywin32(csidl_name):
from win32com.shell import shellcon, shell
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
# Try to make this a unicode path because SHGetFolderPath does
# not return unicode strings when there is unicode data in the
# path.
try:
dir = unicode(dir)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
try:
import win32api
dir = win32api.GetShortPathName(dir)
except ImportError:
pass
except UnicodeError:
pass
return dir
def _get_win_folder_with_ctypes(csidl_name):
import ctypes
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
def _get_win_folder_with_jna(csidl_name):
import array
from com.sun import jna
from com.sun.jna.platform import win32
buf_size = win32.WinDef.MAX_PATH * 2
buf = array.zeros('c', buf_size)
shell = win32.Shell32.INSTANCE
shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf = array.zeros('c', buf_size)
kernel = win32.Kernel32.INSTANCE
if kernel.GetShortPathName(dir, buf, buf_size):
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
return dir
if system == "win32":
try:
import win32com.shell
_get_win_folder = _get_win_folder_with_pywin32
except ImportError:
try:
from ctypes import windll
_get_win_folder = _get_win_folder_with_ctypes
except ImportError:
try:
import com.sun.jna
_get_win_folder = _get_win_folder_with_jna
except ImportError:
_get_win_folder = _get_win_folder_from_registry
#---- self test code
if __name__ == "__main__":
appname = "MyApp"
appauthor = "MyCompany"
props = ("user_data_dir",
"user_config_dir",
"user_cache_dir",
"user_state_dir",
"user_log_dir",
"site_data_dir",
"site_config_dir")
print("-- app dirs %s --" % __version__)
print("-- app dirs (with optional 'version')")
dirs = AppDirs(appname, appauthor, version="1.0")
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'version')")
dirs = AppDirs(appname, appauthor)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'appauthor')")
dirs = AppDirs(appname)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (with disabled 'appauthor')")
dirs = AppDirs(appname, appauthor=False)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))

51
third_party/python/diskcache/diskcache/__init__.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,51 @@
"""
DiskCache API Reference
=======================
The :doc:`tutorial` provides a helpful walkthrough of most methods.
"""
from .core import Cache, Disk, EmptyDirWarning, JSONDisk, UnknownFileWarning, Timeout
from .core import DEFAULT_SETTINGS, ENOVAL, EVICTION_POLICY, UNKNOWN
from .fanout import FanoutCache
from .persistent import Deque, Index
from .recipes import Averager, BoundedSemaphore, Lock, RLock
from .recipes import barrier, memoize_stampede, throttle
__all__ = [
'Averager',
'BoundedSemaphore',
'Cache',
'DEFAULT_SETTINGS',
'Deque',
'Disk',
'ENOVAL',
'EVICTION_POLICY',
'EmptyDirWarning',
'FanoutCache',
'Index',
'JSONDisk',
'Lock',
'RLock',
'Timeout',
'UNKNOWN',
'UnknownFileWarning',
'barrier',
'memoize_stampede',
'throttle',
]
try:
from .djangocache import DjangoCache # pylint: disable=wrong-import-position
__all__.append('DjangoCache')
except Exception: # pylint: disable=broad-except
# Django not installed or not setup so ignore.
pass
__title__ = 'diskcache'
__version__ = '4.1.0'
__build__ = 0x040100
__author__ = 'Grant Jenks'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2016-2018 Grant Jenks'

1
third_party/python/diskcache/diskcache/cli.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1 @@
"Command line interface to disk cache."

2481
third_party/python/diskcache/diskcache/core.py поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

433
third_party/python/diskcache/diskcache/djangocache.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,433 @@
"Django-compatible disk and file backed cache."
from functools import wraps
from django.core.cache.backends.base import BaseCache
try:
from django.core.cache.backends.base import DEFAULT_TIMEOUT
except ImportError:
# For older versions of Django simply use 300 seconds.
DEFAULT_TIMEOUT = 300
from .core import ENOVAL, args_to_key, full_name
from .fanout import FanoutCache
class DjangoCache(BaseCache):
"Django-compatible disk and file backed cache."
def __init__(self, directory, params):
"""Initialize DjangoCache instance.
:param str directory: cache directory
:param dict params: cache parameters
"""
super(DjangoCache, self).__init__(params)
shards = params.get('SHARDS', 8)
timeout = params.get('DATABASE_TIMEOUT', 0.010)
options = params.get('OPTIONS', {})
self._cache = FanoutCache(directory, shards, timeout, **options)
@property
def directory(self):
"""Cache directory."""
return self._cache.directory
def cache(self, name):
"""Return Cache with given `name` in subdirectory.
:param str name: subdirectory name for Cache
:return: Cache with given name
"""
return self._cache.cache(name)
def deque(self, name):
"""Return Deque with given `name` in subdirectory.
:param str name: subdirectory name for Deque
:return: Deque with given name
"""
return self._cache.deque(name)
def index(self, name):
"""Return Index with given `name` in subdirectory.
:param str name: subdirectory name for Index
:return: Index with given name
"""
return self._cache.index(name)
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None,
read=False, tag=None, retry=True):
"""Set a value in the cache if the key does not already exist. If
timeout is given, that timeout will be used for the key; otherwise the
default cache timeout will be used.
Return True if the value was stored, False otherwise.
:param key: key for item
:param value: value for item
:param float timeout: seconds until the item expires
(default 300 seconds)
:param int version: key version number (default None, cache parameter)
:param bool read: read value as bytes from file (default False)
:param str tag: text to associate with key (default None)
:param bool retry: retry if database timeout occurs (default True)
:return: True if item was added
"""
# pylint: disable=arguments-differ
key = self.make_key(key, version=version)
timeout = self.get_backend_timeout(timeout=timeout)
return self._cache.add(key, value, timeout, read, tag, retry)
def get(self, key, default=None, version=None, read=False,
expire_time=False, tag=False, retry=False):
"""Fetch a given key from the cache. If the key does not exist, return
default, which itself defaults to None.
:param key: key for item
:param default: return value if key is missing (default None)
:param int version: key version number (default None, cache parameter)
:param bool read: if True, return file handle to value
(default False)
:param float expire_time: if True, return expire_time in tuple
(default False)
:param tag: if True, return tag in tuple (default False)
:param bool retry: retry if database timeout occurs (default False)
:return: value for item if key is found else default
"""
# pylint: disable=arguments-differ
key = self.make_key(key, version=version)
return self._cache.get(key, default, read, expire_time, tag, retry)
def read(self, key, version=None):
"""Return file handle corresponding to `key` from Cache.
:param key: Python key to retrieve
:param int version: key version number (default None, cache parameter)
:return: file open for reading in binary mode
:raises KeyError: if key is not found
"""
key = self.make_key(key, version=version)
return self._cache.read(key)
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None,
read=False, tag=None, retry=True):
"""Set a value in the cache. If timeout is given, that timeout will be
used for the key; otherwise the default cache timeout will be used.
:param key: key for item
:param value: value for item
:param float timeout: seconds until the item expires
(default 300 seconds)
:param int version: key version number (default None, cache parameter)
:param bool read: read value as bytes from file (default False)
:param str tag: text to associate with key (default None)
:param bool retry: retry if database timeout occurs (default True)
:return: True if item was set
"""
# pylint: disable=arguments-differ
key = self.make_key(key, version=version)
timeout = self.get_backend_timeout(timeout=timeout)
return self._cache.set(key, value, timeout, read, tag, retry)
def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None, retry=True):
"""Touch a key in the cache. If timeout is given, that timeout will be
used for the key; otherwise the default cache timeout will be used.
:param key: key for item
:param float timeout: seconds until the item expires
(default 300 seconds)
:param int version: key version number (default None, cache parameter)
:param bool retry: retry if database timeout occurs (default True)
:return: True if key was touched
"""
# pylint: disable=arguments-differ
key = self.make_key(key, version=version)
timeout = self.get_backend_timeout(timeout=timeout)
return self._cache.touch(key, timeout, retry)
def pop(self, key, default=None, version=None, expire_time=False,
tag=False, retry=True):
"""Remove corresponding item for `key` from cache and return value.
If `key` is missing, return `default`.
Operation is atomic. Concurrent operations will be serialized.
:param key: key for item
:param default: return value if key is missing (default None)
:param int version: key version number (default None, cache parameter)
:param float expire_time: if True, return expire_time in tuple
(default False)
:param tag: if True, return tag in tuple (default False)
:param bool retry: retry if database timeout occurs (default True)
:return: value for item if key is found else default
"""
key = self.make_key(key, version=version)
return self._cache.pop(key, default, expire_time, tag, retry)
def delete(self, key, version=None, retry=True):
"""Delete a key from the cache, failing silently.
:param key: key for item
:param int version: key version number (default None, cache parameter)
:param bool retry: retry if database timeout occurs (default True)
:return: True if item was deleted
"""
# pylint: disable=arguments-differ
key = self.make_key(key, version=version)
self._cache.delete(key, retry)
def incr(self, key, delta=1, version=None, default=None, retry=True):
"""Increment value by delta for item with key.
If key is missing and default is None then raise KeyError. Else if key
is missing and default is not None then use default for value.
Operation is atomic. All concurrent increment operations will be
counted individually.
Assumes value may be stored in a SQLite column. Most builds that target
machines with 64-bit pointer widths will support 64-bit signed
integers.
:param key: key for item
:param int delta: amount to increment (default 1)
:param int version: key version number (default None, cache parameter)
:param int default: value if key is missing (default None)
:param bool retry: retry if database timeout occurs (default True)
:return: new value for item on success else None
:raises ValueError: if key is not found and default is None
"""
# pylint: disable=arguments-differ
key = self.make_key(key, version=version)
try:
return self._cache.incr(key, delta, default, retry)
except KeyError:
raise ValueError("Key '%s' not found" % key)
def decr(self, key, delta=1, version=None, default=None, retry=True):
"""Decrement value by delta for item with key.
If key is missing and default is None then raise KeyError. Else if key
is missing and default is not None then use default for value.
Operation is atomic. All concurrent decrement operations will be
counted individually.
Unlike Memcached, negative values are supported. Value may be
decremented below zero.
Assumes value may be stored in a SQLite column. Most builds that target
machines with 64-bit pointer widths will support 64-bit signed
integers.
:param key: key for item
:param int delta: amount to decrement (default 1)
:param int version: key version number (default None, cache parameter)
:param int default: value if key is missing (default None)
:param bool retry: retry if database timeout occurs (default True)
:return: new value for item on success else None
:raises ValueError: if key is not found and default is None
"""
# pylint: disable=arguments-differ
return self.incr(key, -delta, version, default, retry)
def has_key(self, key, version=None):
"""Returns True if the key is in the cache and has not expired.
:param key: key for item
:param int version: key version number (default None, cache parameter)
:return: True if key is found
"""
key = self.make_key(key, version=version)
return key in self._cache
def expire(self):
"""Remove expired items from cache.
:return: count of items removed
"""
return self._cache.expire()
def stats(self, enable=True, reset=False):
"""Return cache statistics hits and misses.
:param bool enable: enable collecting statistics (default True)
:param bool reset: reset hits and misses to 0 (default False)
:return: (hits, misses)
"""
return self._cache.stats(enable=enable, reset=reset)
def create_tag_index(self):
"""Create tag index on cache database.
Better to initialize cache with `tag_index=True` than use this.
:raises Timeout: if database timeout occurs
"""
self._cache.create_tag_index()
def drop_tag_index(self):
"""Drop tag index on cache database.
:raises Timeout: if database timeout occurs
"""
self._cache.drop_tag_index()
def evict(self, tag):
"""Remove items with matching `tag` from cache.
:param str tag: tag identifying items
:return: count of items removed
"""
return self._cache.evict(tag)
def cull(self):
"""Cull items from cache until volume is less than size limit.
:return: count of items removed
"""
return self._cache.cull()
def clear(self):
"Remove *all* values from the cache at once."
return self._cache.clear()
def close(self, **kwargs):
"Close the cache connection."
# pylint: disable=unused-argument
self._cache.close()
def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT):
"""Return seconds to expiration.
:param float timeout: seconds until the item expires
(default 300 seconds)
"""
if timeout == DEFAULT_TIMEOUT:
timeout = self.default_timeout
elif timeout == 0:
# ticket 21147 - avoid time.time() related precision issues
timeout = -1
return None if timeout is None else timeout
def memoize(self, name=None, timeout=DEFAULT_TIMEOUT, version=None,
typed=False, tag=None):
"""Memoizing cache decorator.
Decorator to wrap callable with memoizing function using cache.
Repeated calls with the same arguments will lookup result in cache and
avoid function evaluation.
If name is set to None (default), the callable name will be determined
automatically.
When timeout is set to zero, function results will not be set in the
cache. Cache lookups still occur, however. Read
:doc:`case-study-landing-page-caching` for example usage.
If typed is set to True, function arguments of different types will be
cached separately. For example, f(3) and f(3.0) will be treated as
distinct calls with distinct results.
The original underlying function is accessible through the __wrapped__
attribute. This is useful for introspection, for bypassing the cache,
or for rewrapping the function with a different cache.
An additional `__cache_key__` attribute can be used to generate the
cache key used for the given arguments.
Remember to call memoize when decorating a callable. If you forget,
then a TypeError will occur.
:param str name: name given for callable (default None, automatic)
:param float timeout: seconds until the item expires
(default 300 seconds)
:param int version: key version number (default None, cache parameter)
:param bool typed: cache different types separately (default False)
:param str tag: text to associate with arguments (default None)
:return: callable decorator
"""
# Caution: Nearly identical code exists in Cache.memoize
if callable(name):
raise TypeError('name cannot be callable')
def decorator(func):
"Decorator created by memoize() for callable `func`."
base = (full_name(func),) if name is None else (name,)
@wraps(func)
def wrapper(*args, **kwargs):
"Wrapper for callable to cache arguments and return values."
key = wrapper.__cache_key__(*args, **kwargs)
result = self.get(key, ENOVAL, version, retry=True)
if result is ENOVAL:
result = func(*args, **kwargs)
valid_timeout = (
timeout is None
or timeout == DEFAULT_TIMEOUT
or timeout > 0
)
if valid_timeout:
self.set(
key, result, timeout, version, tag=tag, retry=True,
)
return result
def __cache_key__(*args, **kwargs):
"Make key for cache given function arguments."
return args_to_key(base, args, kwargs, typed)
wrapper.__cache_key__ = __cache_key__
return wrapper
return decorator

677
third_party/python/diskcache/diskcache/fanout.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,677 @@
"Fanout cache automatically shards keys and values."
import itertools as it
import operator
import os.path as op
import sqlite3
import sys
import tempfile
import time
from .core import ENOVAL, DEFAULT_SETTINGS, Cache, Disk, Timeout
from .persistent import Deque, Index
############################################################################
# BEGIN Python 2/3 Shims
############################################################################
if sys.hexversion >= 0x03000000:
from functools import reduce
############################################################################
# END Python 2/3 Shims
############################################################################
class FanoutCache(object):
"Cache that shards keys and values."
def __init__(self, directory=None, shards=8, timeout=0.010, disk=Disk,
**settings):
"""Initialize cache instance.
:param str directory: cache directory
:param int shards: number of shards to distribute writes
:param float timeout: SQLite connection timeout
:param disk: `Disk` instance for serialization
:param settings: any of `DEFAULT_SETTINGS`
"""
if directory is None:
directory = tempfile.mkdtemp(prefix='diskcache-')
directory = op.expanduser(directory)
directory = op.expandvars(directory)
default_size_limit = DEFAULT_SETTINGS['size_limit']
size_limit = settings.pop('size_limit', default_size_limit) / shards
self._count = shards
self._directory = directory
self._shards = tuple(
Cache(
directory=op.join(directory, '%03d' % num),
timeout=timeout,
disk=disk,
size_limit=size_limit,
**settings
)
for num in range(shards)
)
self._hash = self._shards[0].disk.hash
self._caches = {}
self._deques = {}
self._indexes = {}
@property
def directory(self):
"""Cache directory."""
return self._directory
def __getattr__(self, name):
return getattr(self._shards[0], name)
def set(self, key, value, expire=None, read=False, tag=None, retry=False):
"""Set `key` and `value` item in cache.
When `read` is `True`, `value` should be a file-like object opened
for reading in binary mode.
If database timeout occurs then fails silently unless `retry` is set to
`True` (default `False`).
:param key: key for item
:param value: value for item
:param float expire: seconds until the key expires
(default None, no expiry)
:param bool read: read value as raw bytes from file (default False)
:param str tag: text to associate with key (default None)
:param bool retry: retry if database timeout occurs (default False)
:return: True if item was set
"""
index = self._hash(key) % self._count
shard = self._shards[index]
try:
return shard.set(key, value, expire, read, tag, retry)
except Timeout:
return False
def __setitem__(self, key, value):
"""Set `key` and `value` item in cache.
Calls :func:`FanoutCache.set` internally with `retry` set to `True`.
:param key: key for item
:param value: value for item
"""
index = self._hash(key) % self._count
shard = self._shards[index]
shard[key] = value
def touch(self, key, expire=None, retry=False):
"""Touch `key` in cache and update `expire` time.
If database timeout occurs then fails silently unless `retry` is set to
`True` (default `False`).
:param key: key for item
:param float expire: seconds until the key expires
(default None, no expiry)
:param bool retry: retry if database timeout occurs (default False)
:return: True if key was touched
"""
index = self._hash(key) % self._count
shard = self._shards[index]
try:
return shard.touch(key, expire, retry)
except Timeout:
return False
def add(self, key, value, expire=None, read=False, tag=None, retry=False):
"""Add `key` and `value` item to cache.
Similar to `set`, but only add to cache if key not present.
This operation is atomic. Only one concurrent add operation for given
key from separate threads or processes will succeed.
When `read` is `True`, `value` should be a file-like object opened
for reading in binary mode.
If database timeout occurs then fails silently unless `retry` is set to
`True` (default `False`).
:param key: key for item
:param value: value for item
:param float expire: seconds until the key expires
(default None, no expiry)
:param bool read: read value as bytes from file (default False)
:param str tag: text to associate with key (default None)
:param bool retry: retry if database timeout occurs (default False)
:return: True if item was added
"""
index = self._hash(key) % self._count
shard = self._shards[index]
try:
return shard.add(key, value, expire, read, tag, retry)
except Timeout:
return False
def incr(self, key, delta=1, default=0, retry=False):
"""Increment value by delta for item with key.
If key is missing and default is None then raise KeyError. Else if key
is missing and default is not None then use default for value.
Operation is atomic. All concurrent increment operations will be
counted individually.
Assumes value may be stored in a SQLite column. Most builds that target
machines with 64-bit pointer widths will support 64-bit signed
integers.
If database timeout occurs then fails silently unless `retry` is set to
`True` (default `False`).
:param key: key for item
:param int delta: amount to increment (default 1)
:param int default: value if key is missing (default 0)
:param bool retry: retry if database timeout occurs (default False)
:return: new value for item on success else None
:raises KeyError: if key is not found and default is None
"""
index = self._hash(key) % self._count
shard = self._shards[index]
try:
return shard.incr(key, delta, default, retry)
except Timeout:
return None
def decr(self, key, delta=1, default=0, retry=False):
"""Decrement value by delta for item with key.
If key is missing and default is None then raise KeyError. Else if key
is missing and default is not None then use default for value.
Operation is atomic. All concurrent decrement operations will be
counted individually.
Unlike Memcached, negative values are supported. Value may be
decremented below zero.
Assumes value may be stored in a SQLite column. Most builds that target
machines with 64-bit pointer widths will support 64-bit signed
integers.
If database timeout occurs then fails silently unless `retry` is set to
`True` (default `False`).
:param key: key for item
:param int delta: amount to decrement (default 1)
:param int default: value if key is missing (default 0)
:param bool retry: retry if database timeout occurs (default False)
:return: new value for item on success else None
:raises KeyError: if key is not found and default is None
"""
index = self._hash(key) % self._count
shard = self._shards[index]
try:
return shard.decr(key, delta, default, retry)
except Timeout:
return None
def get(self, key, default=None, read=False, expire_time=False, tag=False,
retry=False):
"""Retrieve value from cache. If `key` is missing, return `default`.
If database timeout occurs then returns `default` unless `retry` is set
to `True` (default `False`).
:param key: key for item
:param default: return value if key is missing (default None)
:param bool read: if True, return file handle to value
(default False)
:param float expire_time: if True, return expire_time in tuple
(default False)
:param tag: if True, return tag in tuple (default False)
:param bool retry: retry if database timeout occurs (default False)
:return: value for item if key is found else default
"""
index = self._hash(key) % self._count
shard = self._shards[index]
try:
return shard.get(key, default, read, expire_time, tag, retry)
except (Timeout, sqlite3.OperationalError):
return default
def __getitem__(self, key):
"""Return corresponding value for `key` from cache.
Calls :func:`FanoutCache.get` internally with `retry` set to `True`.
:param key: key for item
:return: value for item
:raises KeyError: if key is not found
"""
index = self._hash(key) % self._count
shard = self._shards[index]
return shard[key]
def read(self, key):
"""Return file handle corresponding to `key` from cache.
:param key: key for item
:return: file open for reading in binary mode
:raises KeyError: if key is not found
"""
handle = self.get(key, default=ENOVAL, read=True, retry=True)
if handle is ENOVAL:
raise KeyError(key)
return handle
def __contains__(self, key):
"""Return `True` if `key` matching item is found in cache.
:param key: key for item
:return: True if key is found
"""
index = self._hash(key) % self._count
shard = self._shards[index]
return key in shard
def pop(self, key, default=None, expire_time=False, tag=False, retry=False):
"""Remove corresponding item for `key` from cache and return value.
If `key` is missing, return `default`.
Operation is atomic. Concurrent operations will be serialized.
If database timeout occurs then fails silently unless `retry` is set to
`True` (default `False`).
:param key: key for item
:param default: return value if key is missing (default None)
:param float expire_time: if True, return expire_time in tuple
(default False)
:param tag: if True, return tag in tuple (default False)
:param bool retry: retry if database timeout occurs (default False)
:return: value for item if key is found else default
"""
index = self._hash(key) % self._count
shard = self._shards[index]
try:
return shard.pop(key, default, expire_time, tag, retry)
except Timeout:
return default
def delete(self, key, retry=False):
"""Delete corresponding item for `key` from cache.
Missing keys are ignored.
If database timeout occurs then fails silently unless `retry` is set to
`True` (default `False`).
:param key: key for item
:param bool retry: retry if database timeout occurs (default False)
:return: True if item was deleted
"""
index = self._hash(key) % self._count
shard = self._shards[index]
try:
return shard.delete(key, retry)
except Timeout:
return False
def __delitem__(self, key):
"""Delete corresponding item for `key` from cache.
Calls :func:`FanoutCache.delete` internally with `retry` set to `True`.
:param key: key for item
:raises KeyError: if key is not found
"""
index = self._hash(key) % self._count
shard = self._shards[index]
del shard[key]
def check(self, fix=False, retry=False):
"""Check database and file system consistency.
Intended for use in testing and post-mortem error analysis.
While checking the cache table for consistency, a writer lock is held
on the database. The lock blocks other cache clients from writing to
the database. For caches with many file references, the lock may be
held for a long time. For example, local benchmarking shows that a
cache with 1,000 file references takes ~60ms to check.
If database timeout occurs then fails silently unless `retry` is set to
`True` (default `False`).
:param bool fix: correct inconsistencies
:param bool retry: retry if database timeout occurs (default False)
:return: list of warnings
:raises Timeout: if database timeout occurs
"""
warnings = (shard.check(fix, retry) for shard in self._shards)
return reduce(operator.iadd, warnings, [])
def expire(self, retry=False):
"""Remove expired items from cache.
If database timeout occurs then fails silently unless `retry` is set to
`True` (default `False`).
:param bool retry: retry if database timeout occurs (default False)
:return: count of items removed
"""
return self._remove('expire', args=(time.time(),), retry=retry)
def create_tag_index(self):
"""Create tag index on cache database.
Better to initialize cache with `tag_index=True` than use this.
:raises Timeout: if database timeout occurs
"""
for shard in self._shards:
shard.create_tag_index()
def drop_tag_index(self):
"""Drop tag index on cache database.
:raises Timeout: if database timeout occurs
"""
for shard in self._shards:
shard.drop_tag_index()
def evict(self, tag, retry=False):
"""Remove items with matching `tag` from cache.
If database timeout occurs then fails silently unless `retry` is set to
`True` (default `False`).
:param str tag: tag identifying items
:param bool retry: retry if database timeout occurs (default False)
:return: count of items removed
"""
return self._remove('evict', args=(tag,), retry=retry)
def cull(self, retry=False):
"""Cull items from cache until volume is less than size limit.
If database timeout occurs then fails silently unless `retry` is set to
`True` (default `False`).
:param bool retry: retry if database timeout occurs (default False)
:return: count of items removed
"""
return self._remove('cull', retry=retry)
def clear(self, retry=False):
"""Remove all items from cache.
If database timeout occurs then fails silently unless `retry` is set to
`True` (default `False`).
:param bool retry: retry if database timeout occurs (default False)
:return: count of items removed
"""
return self._remove('clear', retry=retry)
def _remove(self, name, args=(), retry=False):
total = 0
for shard in self._shards:
method = getattr(shard, name)
while True:
try:
count = method(*args, retry=retry)
total += count
except Timeout as timeout:
total += timeout.args[0]
else:
break
return total
def stats(self, enable=True, reset=False):
"""Return cache statistics hits and misses.
:param bool enable: enable collecting statistics (default True)
:param bool reset: reset hits and misses to 0 (default False)
:return: (hits, misses)
"""
results = [shard.stats(enable, reset) for shard in self._shards]
total_hits = sum(hits for hits, _ in results)
total_misses = sum(misses for _, misses in results)
return total_hits, total_misses
def volume(self):
"""Return estimated total size of cache on disk.
:return: size in bytes
"""
return sum(shard.volume() for shard in self._shards)
def close(self):
"Close database connection."
for shard in self._shards:
shard.close()
self._caches.clear()
self._deques.clear()
self._indexes.clear()
def __enter__(self):
return self
def __exit__(self, *exception):
self.close()
def __getstate__(self):
return (self._directory, self._count, self.timeout, type(self.disk))
def __setstate__(self, state):
self.__init__(*state)
def __iter__(self):
"Iterate keys in cache including expired items."
iterators = (iter(shard) for shard in self._shards)
return it.chain.from_iterable(iterators)
def __reversed__(self):
"Reverse iterate keys in cache including expired items."
iterators = (reversed(shard) for shard in reversed(self._shards))
return it.chain.from_iterable(iterators)
def __len__(self):
"Count of items in cache including expired items."
return sum(len(shard) for shard in self._shards)
def reset(self, key, value=ENOVAL):
"""Reset `key` and `value` item from Settings table.
If `value` is not given, it is reloaded from the Settings
table. Otherwise, the Settings table is updated.
Settings attributes on cache objects are lazy-loaded and
read-only. Use `reset` to update the value.
Settings with the ``sqlite_`` prefix correspond to SQLite
pragmas. Updating the value will execute the corresponding PRAGMA
statement.
:param str key: Settings key for item
:param value: value for item (optional)
:return: updated value for item
"""
for shard in self._shards:
while True:
try:
result = shard.reset(key, value)
except Timeout:
pass
else:
break
return result
def cache(self, name):
"""Return Cache with given `name` in subdirectory.
>>> fanout_cache = FanoutCache()
>>> cache = fanout_cache.cache('test')
>>> cache.set('abc', 123)
True
>>> cache.get('abc')
123
>>> len(cache)
1
>>> cache.delete('abc')
True
:param str name: subdirectory name for Cache
:return: Cache with given name
"""
_caches = self._caches
try:
return _caches[name]
except KeyError:
parts = name.split('/')
directory = op.join(self._directory, 'cache', *parts)
temp = Cache(directory=directory)
_caches[name] = temp
return temp
def deque(self, name):
"""Return Deque with given `name` in subdirectory.
>>> cache = FanoutCache()
>>> deque = cache.deque('test')
>>> deque.extend('abc')
>>> deque.popleft()
'a'
>>> deque.pop()
'c'
>>> len(deque)
1
:param str name: subdirectory name for Deque
:return: Deque with given name
"""
_deques = self._deques
try:
return _deques[name]
except KeyError:
parts = name.split('/')
directory = op.join(self._directory, 'deque', *parts)
temp = Deque(directory=directory)
_deques[name] = temp
return temp
def index(self, name):
"""Return Index with given `name` in subdirectory.
>>> cache = FanoutCache()
>>> index = cache.index('test')
>>> index['abc'] = 123
>>> index['def'] = 456
>>> index['ghi'] = 789
>>> index.popitem()
('ghi', 789)
>>> del index['abc']
>>> len(index)
1
>>> index['def']
456
:param str name: subdirectory name for Index
:return: Index with given name
"""
_indexes = self._indexes
try:
return _indexes[name]
except KeyError:
parts = name.split('/')
directory = op.join(self._directory, 'index', *parts)
temp = Index(directory)
_indexes[name] = temp
return temp
############################################################################
# BEGIN Python 2/3 Shims
############################################################################
if sys.hexversion < 0x03000000:
import types
memoize_func = Cache.__dict__['memoize'] # pylint: disable=invalid-name
FanoutCache.memoize = types.MethodType(memoize_func, None, FanoutCache)
else:
FanoutCache.memoize = Cache.memoize
############################################################################
# END Python 2/3 Shims
############################################################################

1403
third_party/python/diskcache/diskcache/persistent.py поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

437
third_party/python/diskcache/diskcache/recipes.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,437 @@
"""Disk Cache Recipes
"""
import functools
import math
import os
import random
import sys
import threading
import time
from .core import ENOVAL, args_to_key, full_name
############################################################################
# BEGIN Python 2/3 Shims
############################################################################
if sys.hexversion < 0x03000000:
from thread import get_ident # pylint: disable=import-error
else:
from threading import get_ident
############################################################################
# END Python 2/3 Shims
############################################################################
class Averager(object):
"""Recipe for calculating a running average.
Sometimes known as "online statistics," the running average maintains the
total and count. The average can then be calculated at any time.
>>> import diskcache
>>> cache = diskcache.FanoutCache()
>>> ave = Averager(cache, 'latency')
>>> ave.add(0.080)
>>> ave.add(0.120)
>>> ave.get()
0.1
>>> ave.add(0.160)
>>> ave.pop()
0.12
>>> print(ave.get())
None
"""
def __init__(self, cache, key, expire=None, tag=None):
self._cache = cache
self._key = key
self._expire = expire
self._tag = tag
def add(self, value):
"Add `value` to average."
with self._cache.transact(retry=True):
total, count = self._cache.get(self._key, default=(0.0, 0))
total += value
count += 1
self._cache.set(
self._key, (total, count), expire=self._expire, tag=self._tag,
)
def get(self):
"Get current average or return `None` if count equals zero."
total, count = self._cache.get(self._key, default=(0.0, 0), retry=True)
return None if count == 0 else total / count
def pop(self):
"Return current average and delete key."
total, count = self._cache.pop(self._key, default=(0.0, 0), retry=True)
return None if count == 0 else total / count
class Lock(object):
"""Recipe for cross-process and cross-thread lock.
>>> import diskcache
>>> cache = diskcache.Cache()
>>> lock = Lock(cache, 'report-123')
>>> lock.acquire()
>>> lock.release()
>>> with lock:
... pass
"""
def __init__(self, cache, key, expire=None, tag=None):
self._cache = cache
self._key = key
self._expire = expire
self._tag = tag
def acquire(self):
"Acquire lock using spin-lock algorithm."
while True:
added = self._cache.add(
self._key, None, expire=self._expire, tag=self._tag, retry=True,
)
if added:
break
time.sleep(0.001)
def release(self):
"Release lock by deleting key."
self._cache.delete(self._key, retry=True)
def __enter__(self):
self.acquire()
def __exit__(self, *exc_info):
self.release()
class RLock(object):
"""Recipe for cross-process and cross-thread re-entrant lock.
>>> import diskcache
>>> cache = diskcache.Cache()
>>> rlock = RLock(cache, 'user-123')
>>> rlock.acquire()
>>> rlock.acquire()
>>> rlock.release()
>>> with rlock:
... pass
>>> rlock.release()
>>> rlock.release()
Traceback (most recent call last):
...
AssertionError: cannot release un-acquired lock
"""
def __init__(self, cache, key, expire=None, tag=None):
self._cache = cache
self._key = key
self._expire = expire
self._tag = tag
def acquire(self):
"Acquire lock by incrementing count using spin-lock algorithm."
pid = os.getpid()
tid = get_ident()
pid_tid = '{}-{}'.format(pid, tid)
while True:
with self._cache.transact(retry=True):
value, count = self._cache.get(self._key, default=(None, 0))
if pid_tid == value or count == 0:
self._cache.set(
self._key, (pid_tid, count + 1),
expire=self._expire, tag=self._tag,
)
return
time.sleep(0.001)
def release(self):
"Release lock by decrementing count."
pid = os.getpid()
tid = get_ident()
pid_tid = '{}-{}'.format(pid, tid)
with self._cache.transact(retry=True):
value, count = self._cache.get(self._key, default=(None, 0))
is_owned = pid_tid == value and count > 0
assert is_owned, 'cannot release un-acquired lock'
self._cache.set(
self._key, (value, count - 1),
expire=self._expire, tag=self._tag,
)
def __enter__(self):
self.acquire()
def __exit__(self, *exc_info):
self.release()
class BoundedSemaphore(object):
"""Recipe for cross-process and cross-thread bounded semaphore.
>>> import diskcache
>>> cache = diskcache.Cache()
>>> semaphore = BoundedSemaphore(cache, 'max-cons', value=2)
>>> semaphore.acquire()
>>> semaphore.acquire()
>>> semaphore.release()
>>> with semaphore:
... pass
>>> semaphore.release()
>>> semaphore.release()
Traceback (most recent call last):
...
AssertionError: cannot release un-acquired semaphore
"""
def __init__(self, cache, key, value=1, expire=None, tag=None):
self._cache = cache
self._key = key
self._value = value
self._expire = expire
self._tag = tag
def acquire(self):
"Acquire semaphore by decrementing value using spin-lock algorithm."
while True:
with self._cache.transact(retry=True):
value = self._cache.get(self._key, default=self._value)
if value > 0:
self._cache.set(
self._key, value - 1,
expire=self._expire, tag=self._tag,
)
return
time.sleep(0.001)
def release(self):
"Release semaphore by incrementing value."
with self._cache.transact(retry=True):
value = self._cache.get(self._key, default=self._value)
assert self._value > value, 'cannot release un-acquired semaphore'
value += 1
self._cache.set(
self._key, value, expire=self._expire, tag=self._tag,
)
def __enter__(self):
self.acquire()
def __exit__(self, *exc_info):
self.release()
def throttle(cache, count, seconds, name=None, expire=None, tag=None,
time_func=time.time, sleep_func=time.sleep):
"""Decorator to throttle calls to function.
>>> import diskcache, time
>>> cache = diskcache.Cache()
>>> count = 0
>>> @throttle(cache, 2, 1) # 2 calls per 1 second
... def increment():
... global count
... count += 1
>>> start = time.time()
>>> while (time.time() - start) <= 2:
... increment()
>>> count in (6, 7) # 6 or 7 calls depending on CPU load
True
"""
def decorator(func):
rate = count / float(seconds)
key = full_name(func) if name is None else name
now = time_func()
cache.set(key, (now, count), expire=expire, tag=tag, retry=True)
@functools.wraps(func)
def wrapper(*args, **kwargs):
while True:
with cache.transact(retry=True):
last, tally = cache.get(key)
now = time_func()
tally += (now - last) * rate
delay = 0
if tally > count:
cache.set(key, (now, count - 1), expire)
elif tally >= 1:
cache.set(key, (now, tally - 1), expire)
else:
delay = (1 - tally) / rate
if delay:
sleep_func(delay)
else:
break
return func(*args, **kwargs)
return wrapper
return decorator
def barrier(cache, lock_factory, name=None, expire=None, tag=None):
"""Barrier to calling decorated function.
Supports different kinds of locks: Lock, RLock, BoundedSemaphore.
>>> import diskcache, time
>>> cache = diskcache.Cache()
>>> @barrier(cache, Lock)
... def work(num):
... print('worker started')
... time.sleep(1)
... print('worker finished')
>>> import multiprocessing.pool
>>> pool = multiprocessing.pool.ThreadPool(2)
>>> _ = pool.map(work, range(2))
worker started
worker finished
worker started
worker finished
>>> pool.terminate()
"""
def decorator(func):
key = full_name(func) if name is None else name
lock = lock_factory(cache, key, expire=expire, tag=tag)
@functools.wraps(func)
def wrapper(*args, **kwargs):
with lock:
return func(*args, **kwargs)
return wrapper
return decorator
def memoize_stampede(cache, expire, name=None, typed=False, tag=None, beta=1):
"""Memoizing cache decorator with cache stampede protection.
Cache stampedes are a type of system overload that can occur when parallel
computing systems using memoization come under heavy load. This behaviour
is sometimes also called dog-piling, cache miss storm, cache choking, or
the thundering herd problem.
The memoization decorator implements cache stampede protection through
early recomputation. Early recomputation of function results will occur
probabilistically before expiration in a background thread of
execution. Early probabilistic recomputation is based on research by
Vattani, A.; Chierichetti, F.; Lowenstein, K. (2015), Optimal Probabilistic
Cache Stampede Prevention, VLDB, pp. 886-897, ISSN 2150-8097
If name is set to None (default), the callable name will be determined
automatically.
If typed is set to True, function arguments of different types will be
cached separately. For example, f(3) and f(3.0) will be treated as distinct
calls with distinct results.
The original underlying function is accessible through the `__wrapped__`
attribute. This is useful for introspection, for bypassing the cache, or
for rewrapping the function with a different cache.
>>> from diskcache import Cache
>>> cache = Cache()
>>> @memoize_stampede(cache, expire=1)
... def fib(number):
... if number == 0:
... return 0
... elif number == 1:
... return 1
... else:
... return fib(number - 1) + fib(number - 2)
>>> print(fib(100))
354224848179261915075
An additional `__cache_key__` attribute can be used to generate the cache
key used for the given arguments.
>>> key = fib.__cache_key__(100)
>>> del cache[key]
Remember to call memoize when decorating a callable. If you forget, then a
TypeError will occur.
:param cache: cache to store callable arguments and return values
:param float expire: seconds until arguments expire
:param str name: name given for callable (default None, automatic)
:param bool typed: cache different types separately (default False)
:param str tag: text to associate with arguments (default None)
:return: callable decorator
"""
# Caution: Nearly identical code exists in Cache.memoize
def decorator(func):
"Decorator created by memoize call for callable."
base = (full_name(func),) if name is None else (name,)
def timer(*args, **kwargs):
"Time execution of `func` and return result and time delta."
start = time.time()
result = func(*args, **kwargs)
delta = time.time() - start
return result, delta
@functools.wraps(func)
def wrapper(*args, **kwargs):
"Wrapper for callable to cache arguments and return values."
key = wrapper.__cache_key__(*args, **kwargs)
pair, expire_time = cache.get(
key, default=ENOVAL, expire_time=True, retry=True,
)
if pair is not ENOVAL:
result, delta = pair
now = time.time()
ttl = expire_time - now
if (-delta * beta * math.log(random.random())) < ttl:
return result # Cache hit.
# Check whether a thread has started for early recomputation.
thread_key = key + (ENOVAL,)
thread_added = cache.add(
thread_key, None, expire=delta, retry=True,
)
if thread_added:
# Start thread for early recomputation.
def recompute():
with cache:
pair = timer(*args, **kwargs)
cache.set(
key, pair, expire=expire, tag=tag, retry=True,
)
thread = threading.Thread(target=recompute)
thread.daemon = True
thread.start()
return result
pair = timer(*args, **kwargs)
cache.set(key, pair, expire=expire, tag=tag, retry=True)
return pair[0]
def __cache_key__(*args, **kwargs):
"Make key for cache given function arguments."
return args_to_key(base, args, kwargs, typed)
wrapper.__cache_key__ = __cache_key__
return wrapper
return decorator

18
third_party/python/glean_parser/glean_parser/__init__.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,18 @@
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""Top-level package for Glean parser."""
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
# package is not installed
pass
__author__ = """Michael Droettboom"""
__email__ = "mdroettboom@mozilla.com"

131
third_party/python/glean_parser/glean_parser/__main__.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,131 @@
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""Console script for glean_parser."""
import io
from pathlib import Path
import sys
import click
from . import lint
from . import translate as mod_translate
from . import validate_ping
@click.command()
@click.argument(
"input",
type=click.Path(exists=False, dir_okay=False, file_okay=True, readable=True),
nargs=-1,
)
@click.option(
"--output",
"-o",
type=click.Path(dir_okay=True, file_okay=False, writable=True),
nargs=1,
required=True,
)
@click.option(
"--format", "-f", type=click.Choice(mod_translate.OUTPUTTERS.keys()), required=True
)
@click.option(
"--option",
"-s",
help="backend-specific option. Must be of the form key=value",
type=str,
multiple=True,
required=False,
)
@click.option(
"--allow-reserved",
is_flag=True,
help=(
"If provided, allow the use of reserved fields. "
"Should only be set when building the Glean library itself."
),
)
def translate(input, format, output, option, allow_reserved):
"""
Translate metrics.yaml and pings.yaml files to other formats.
"""
option_dict = {}
for opt in option:
key, val = opt.split("=", 1)
option_dict[key] = val
sys.exit(
mod_translate.translate(
[Path(x) for x in input],
format,
Path(output),
option_dict,
{"allow_reserved": allow_reserved},
)
)
@click.command()
@click.option(
"--schema",
"-s",
type=str,
nargs=1,
required=True,
help=("HTTP url or file path to Glean ping schema. If remote, will cache to disk."),
)
def check(schema):
"""
Validate the contents of a Glean ping.
The ping contents are read from stdin, and the validation errors are
written to stdout.
"""
sys.exit(
validate_ping.validate_ping(
io.TextIOWrapper(sys.stdin.buffer, encoding="utf-8"),
io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8"),
schema_url=schema,
)
)
@click.command()
@click.argument(
"input",
type=click.Path(exists=True, dir_okay=False, file_okay=True, readable=True),
nargs=-1,
)
@click.option(
"--allow-reserved",
is_flag=True,
help=(
"If provided, allow the use of reserved fields. "
"Should only be set when building the Glean library itself."
),
)
def glinter(input, allow_reserved):
"""
Runs a linter over the metrics.
"""
sys.exit(lint.glinter([Path(x) for x in input], {"allow_reserved": allow_reserved}))
@click.group()
@click.version_option()
def main(args=None):
"""Command line utility for glean_parser."""
pass
main.add_command(translate)
main.add_command(check)
main.add_command(glinter)
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover

262
third_party/python/glean_parser/glean_parser/kotlin.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,262 @@
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Outputter to generate Kotlin code for metrics.
"""
from collections import OrderedDict
import enum
import json
from . import util
def kotlin_datatypes_filter(value):
"""
A Jinja2 filter that renders Kotlin literals.
Based on Python's JSONEncoder, but overrides:
- lists to use listOf
- dicts to use mapOf
- sets to use setOf
- enums to use the like-named Kotlin enum
"""
class KotlinEncoder(json.JSONEncoder):
def iterencode(self, value):
if isinstance(value, list):
yield "listOf("
first = True
for subvalue in value:
if not first:
yield ", "
yield from self.iterencode(subvalue)
first = False
yield ")"
elif isinstance(value, dict):
yield "mapOf("
first = True
for key, subvalue in value.items():
if not first:
yield ", "
yield from self.iterencode(key)
yield " to "
yield from self.iterencode(subvalue)
first = False
yield ")"
elif isinstance(value, enum.Enum):
yield (value.__class__.__name__ + "." + util.Camelize(value.name))
elif isinstance(value, set):
yield "setOf("
first = True
for subvalue in sorted(list(value)):
if not first:
yield ", "
yield from self.iterencode(subvalue)
first = False
yield ")"
else:
yield from super().iterencode(value)
return "".join(KotlinEncoder().iterencode(value))
def type_name(obj):
"""
Returns the Kotlin type to use for a given metric or ping object.
"""
generate_enums = getattr(obj, "_generate_enums", [])
if len(generate_enums):
template_args = []
for member, suffix in generate_enums:
if len(getattr(obj, member)):
template_args.append(util.camelize(obj.name) + suffix)
else:
if suffix == "Keys":
template_args.append("NoExtraKeys")
else:
template_args.append("No" + suffix)
return "{}<{}>".format(class_name(obj.type), ", ".join(template_args))
return class_name(obj.type)
def class_name(obj_type):
"""
Returns the Kotlin class name for a given metric or ping type.
"""
if obj_type == "ping":
return "PingType"
if obj_type.startswith("labeled_"):
obj_type = obj_type[8:]
return util.Camelize(obj_type) + "MetricType"
def output_gecko_lookup(objs, output_dir, options={}):
"""
Given a tree of objects, generate a Kotlin map between Gecko histograms and
Glean SDK metric types.
:param objects: A tree of objects (metrics and pings) as returned from
`parser.parse_objects`.
:param output_dir: Path to an output directory to write to.
:param options: options dictionary, with the following optional keys:
- `namespace`: The package namespace to declare at the top of the
generated files. Defaults to `GleanMetrics`.
- `glean_namespace`: The package namespace of the glean library itself.
This is where glean objects will be imported from in the generated
code.
"""
template = util.get_jinja2_template(
"kotlin.geckoview.jinja2",
filters=(
("kotlin", kotlin_datatypes_filter),
("type_name", type_name),
("class_name", class_name),
),
)
namespace = options.get("namespace", "GleanMetrics")
glean_namespace = options.get("glean_namespace", "mozilla.components.service.glean")
# Build a dictionary that contains data for metrics that are
# histogram-like/scalar-like and contain a gecko_datapoint, with this format:
#
# {
# "histograms": {
# "category": [
# {"gecko_datapoint": "the-datapoint", "name": "the-metric-name"},
# ...
# ],
# ...
# },
# "other-type": {}
# }
gecko_metrics = OrderedDict()
# Define scalar-like types.
SCALAR_LIKE_TYPES = ["boolean", "string", "quantity"]
for category_key, category_val in objs.items():
# Support exfiltration of Gecko metrics from products using both the
# Glean SDK and GeckoView. See bug 1566356 for more context.
for metric in category_val.values():
# This is not a Gecko metric, skip it.
if not getattr(metric, "gecko_datapoint", False):
continue
# Put scalars in their own categories, histogram-like in "histograms" and
# categorical histograms in "categoricals".
type_category = "histograms"
if metric.type in SCALAR_LIKE_TYPES:
type_category = metric.type
elif metric.type == "labeled_counter":
# Labeled counters with a 'gecko_datapoint' property
# are categorical histograms.
type_category = "categoricals"
gecko_metrics.setdefault(type_category, OrderedDict())
gecko_metrics[type_category].setdefault(category_key, [])
gecko_metrics[type_category][category_key].append(
{"gecko_datapoint": metric.gecko_datapoint, "name": metric.name}
)
if not gecko_metrics:
# Bail out and don't create a file if no gecko metrics
# are found.
return
filepath = output_dir / "GleanGeckoMetricsMapping.kt"
with filepath.open("w", encoding="utf-8") as fd:
fd.write(
template.render(
gecko_metrics=gecko_metrics,
namespace=namespace,
glean_namespace=glean_namespace,
)
)
# Jinja2 squashes the final newline, so we explicitly add it
fd.write("\n")
def output_kotlin(objs, output_dir, options={}):
"""
Given a tree of objects, output Kotlin code to `output_dir`.
:param objects: A tree of objects (metrics and pings) as returned from
`parser.parse_objects`.
:param output_dir: Path to an output directory to write to.
:param options: options dictionary, with the following optional keys:
- `namespace`: The package namespace to declare at the top of the
generated files. Defaults to `GleanMetrics`.
- `glean_namespace`: The package namespace of the glean library itself.
This is where glean objects will be imported from in the generated
code.
"""
template = util.get_jinja2_template(
"kotlin.jinja2",
filters=(
("kotlin", kotlin_datatypes_filter),
("type_name", type_name),
("class_name", class_name),
),
)
# The object parameters to pass to constructors
extra_args = [
"allowed_extra_keys",
"bucket_count",
"category",
"disabled",
"histogram_type",
"include_client_id",
"send_if_empty",
"lifetime",
"memory_unit",
"name",
"range_max",
"range_min",
"reason_codes",
"send_in_pings",
"time_unit",
]
namespace = options.get("namespace", "GleanMetrics")
glean_namespace = options.get("glean_namespace", "mozilla.components.service.glean")
for category_key, category_val in objs.items():
filename = util.Camelize(category_key) + ".kt"
filepath = output_dir / filename
obj_types = sorted(
list(set(class_name(obj.type) for obj in category_val.values()))
)
has_labeled_metrics = any(
getattr(metric, "labeled", False) for metric in category_val.values()
)
with filepath.open("w", encoding="utf-8") as fd:
fd.write(
template.render(
category_name=category_key,
objs=category_val,
obj_types=obj_types,
extra_args=extra_args,
namespace=namespace,
has_labeled_metrics=has_labeled_metrics,
glean_namespace=glean_namespace,
)
)
# Jinja2 squashes the final newline, so we explicitly add it
fd.write("\n")
# TODO: Maybe this should just be a separate outputter?
output_gecko_lookup(objs, output_dir, options)

302
third_party/python/glean_parser/glean_parser/lint.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,302 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import re
import sys
from . import parser
from . import util
from yamllint.config import YamlLintConfig
from yamllint import linter
def _split_words(name):
"""
Helper function to split words on either `.` or `_`.
"""
return re.split("[._]", name)
def _hamming_distance(str1, str2):
"""
Count the # of differences between strings str1 and str2,
padding the shorter one with whitespace
"""
diffs = 0
if len(str1) < len(str2):
str1, str2 = str2, str1
len_dist = len(str1) - len(str2)
str2 += " " * len_dist
for ch1, ch2 in zip(str1, str2):
if ch1 != ch2:
diffs += 1
return diffs
def check_common_prefix(category_name, metrics):
"""
Check if all metrics begin with a common prefix.
"""
metric_words = sorted([_split_words(metric.name) for metric in metrics])
if len(metric_words) < 2:
return
first = metric_words[0]
last = metric_words[-1]
for i in range(min(len(first), len(last))):
if first[i] != last[i]:
break
if i > 0:
common_prefix = "_".join(first[:i])
yield (
"Within category '{}', all metrics begin with prefix "
"'{}'. Remove prefixes and (possibly) rename category."
).format(category_name, common_prefix)
def check_unit_in_name(metric, parser_config={}):
"""
The metric name ends in a unit.
"""
TIME_UNIT_ABBREV = {
"nanosecond": "ns",
"microsecond": "us",
"millisecond": "ms",
"second": "s",
"minute": "m",
"hour": "h",
"day": "d",
}
MEMORY_UNIT_ABBREV = {
"byte": "b",
"kilobyte": "kb",
"megabyte": "mb",
"gigabyte": "gb",
}
name_words = _split_words(metric.name)
unit_in_name = name_words[-1]
if hasattr(metric, "time_unit"):
if (
unit_in_name == TIME_UNIT_ABBREV.get(metric.time_unit.name)
or unit_in_name == metric.time_unit.name
):
yield (
"Suffix '{}' is redundant with time_unit. " "Only include time_unit."
).format(unit_in_name)
elif (
unit_in_name in TIME_UNIT_ABBREV.keys()
or unit_in_name in TIME_UNIT_ABBREV.values()
):
yield (
"Suffix '{}' doesn't match time_unit. "
"Confirm the unit is correct and only include time_unit."
).format(unit_in_name)
elif hasattr(metric, "memory_unit"):
if (
unit_in_name == MEMORY_UNIT_ABBREV.get(metric.memory_unit.name)
or unit_in_name == metric.memory_unit.name
):
yield (
"Suffix '{}' is redundant with memory_unit. "
"Only include memory_unit."
).format(unit_in_name)
elif (
unit_in_name in MEMORY_UNIT_ABBREV.keys()
or unit_in_name in MEMORY_UNIT_ABBREV.values()
):
yield (
"Suffix '{}' doesn't match memory_unit. "
"Confirm the unit is correct and only include memory_unit."
).format(unit_in_name)
elif hasattr(metric, "unit"):
if unit_in_name == metric.unit:
yield (
"Suffix '{}' is redundant with unit param. " "Only include unit."
).format(unit_in_name)
def check_category_generic(category_name, metrics):
"""
The category name is too generic.
"""
GENERIC_CATEGORIES = ["metrics", "events"]
if category_name in GENERIC_CATEGORIES:
yield "Category '{}' is too generic.".format(category_name)
def check_bug_number(metric, parser_config={}):
number_bugs = [str(bug) for bug in metric.bugs if isinstance(bug, int)]
if len(number_bugs):
yield (
"For bugs {}: "
"Bug numbers are deprecated and should be changed to full URLs."
).format(", ".join(number_bugs))
def check_valid_in_baseline(metric, parser_config={}):
allow_reserved = parser_config.get("allow_reserved", False)
if not allow_reserved and "baseline" in metric.send_in_pings:
yield (
"The baseline ping is Glean-internal. "
"User metrics should go into the 'metrics' ping or custom pings."
)
def check_misspelled_pings(metric, parser_config={}):
builtin_pings = ["metrics", "events"]
for ping in metric.send_in_pings:
for builtin in builtin_pings:
distance = _hamming_distance(ping, builtin)
if distance == 1:
yield ("Ping '{}' seems misspelled. Did you mean '{}'?").format(
ping, builtin
)
CATEGORY_CHECKS = {
"COMMON_PREFIX": check_common_prefix,
"CATEGORY_GENERIC": check_category_generic,
}
INDIVIDUAL_CHECKS = {
"UNIT_IN_NAME": check_unit_in_name,
"BUG_NUMBER": check_bug_number,
"BASELINE_PING": check_valid_in_baseline,
"MISSPELLED_PING": check_misspelled_pings,
}
def lint_metrics(objs, parser_config={}, file=sys.stderr):
"""
Performs glinter checks on a set of metrics objects.
:param objs: Tree of metric objects, as returns by `parser.parse_objects`.
:param file: The stream to write errors to.
:returns: List of nits.
"""
nits = []
for (category_name, metrics) in sorted(list(objs.items())):
if category_name == "pings":
continue
for (check_name, check_func) in CATEGORY_CHECKS.items():
if any(check_name in metric.no_lint for metric in metrics.values()):
continue
nits.extend(
(check_name, category_name, msg)
for msg in check_func(category_name, metrics.values())
)
for (metric_name, metric) in sorted(list(metrics.items())):
for (check_name, check_func) in INDIVIDUAL_CHECKS.items():
new_nits = list(check_func(metric, parser_config))
if len(new_nits):
if check_name not in metric.no_lint:
nits.extend(
(check_name, ".".join([metric.category, metric.name]), msg)
for msg in new_nits
)
else:
if (
check_name not in CATEGORY_CHECKS
and check_name in metric.no_lint
):
nits.append(
(
"SUPERFLUOUS_NO_LINT",
".".join([metric.category, metric.name]),
(
"Superfluous no_lint entry '{}'. "
"Please remove it."
).format(check_name),
)
)
if len(nits):
print("Sorry, Glean found some glinter nits:", file=file)
for check_name, name, msg in nits:
print("{}: {}: {}".format(check_name, name, msg), file=file)
print("", file=file)
print("Please fix the above nits to continue.", file=file)
print(
"To disable a check, add a `no_lint` parameter "
"with a list of check names to disable.\n"
"This parameter can appear with each individual metric, or at the "
"top-level to affect the entire file.",
file=file,
)
return nits
def lint_yaml_files(input_filepaths, file=sys.stderr):
"""
Performs glinter YAML lint on a set of files.
:param input_filepaths: List of input files to lint.
:param file: The stream to write errors to.
:returns: List of nits.
"""
nits = []
for path in input_filepaths:
# yamllint needs both the file content and the path.
file_content = None
with path.open("r") as fd:
file_content = fd.read()
problems = linter.run(file_content, YamlLintConfig("extends: default"), path)
nits.extend(p for p in problems)
if len(nits):
print("Sorry, Glean found some glinter nits:", file=file)
for p in nits:
print("{} ({}:{}) - {}".format(path, p.line, p.column, p.message))
print("", file=file)
print("Please fix the above nits to continue.", file=file)
return nits
def glinter(input_filepaths, parser_config={}, file=sys.stderr):
"""
Commandline helper for glinter.
:param input_filepaths: List of Path objects to load metrics from.
:param parser_config: Parser configuration objects, passed to
`parser.parse_objects`.
:param file: The stream to write the errors to.
:return: Non-zero if there were any glinter errors.
"""
if lint_yaml_files(input_filepaths, file=file):
return 1
objs = parser.parse_objects(input_filepaths, parser_config)
if util.report_validation_errors(objs):
return 1
if lint_metrics(objs.value, parser_config=parser_config, file=file):
return 1
print("✨ Your metrics are Glean! ✨", file=file)
return 0

181
third_party/python/glean_parser/glean_parser/markdown.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,181 @@
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Outputter to generate Markdown documentation for metrics.
"""
from . import metrics
from . import pings
from . import util
from collections import defaultdict
def extra_info(obj):
"""
Returns a list of string to string tuples with extra information for the type
(e.g. extra keys for events) or an empty list if nothing is available.
"""
extra_info = []
if isinstance(obj, metrics.Event):
for key in obj.allowed_extra_keys:
extra_info.append((key, obj.extra_keys[key]["description"]))
if isinstance(obj, metrics.Labeled) and obj.ordered_labels is not None:
for label in obj.ordered_labels:
extra_info.append((label, None))
return extra_info
def ping_desc(ping_name, custom_pings_cache={}):
"""
Return a text description of the ping. If a custom_pings_cache
is available, look in there for non-reserved ping names description.
"""
desc = ""
if ping_name in pings.RESERVED_PING_NAMES:
desc = (
"This is a built-in ping that is assembled out of the "
"box by the Glean SDK."
)
elif ping_name == "all-pings":
desc = "These metrics are sent in every ping."
elif ping_name in custom_pings_cache:
desc = custom_pings_cache[ping_name].description
return desc
def metrics_docs(obj_name):
"""
Return a link to the documentation entry for the Glean SDK metric of the
requested type.
"""
base_url = "https://mozilla.github.io/glean/book/user/metrics/{}.html"
# We need to fixup labeled stuff, as types are singular and docs refer
# to them as plural.
fixedup_name = obj_name
if obj_name.startswith("labeled_"):
fixedup_name += "s"
return base_url.format(fixedup_name)
def ping_docs(ping_name):
"""
Return a link to the documentation entry for the requested Glean SDK
built-in ping.
"""
if ping_name not in pings.RESERVED_PING_NAMES:
return ""
return "https://mozilla.github.io/glean/book/user/pings/{}.html".format(ping_name)
def if_empty(ping_name, custom_pings_cache={}):
return (
custom_pings_cache.get(ping_name)
and custom_pings_cache[ping_name].send_if_empty
)
def ping_reasons(ping_name, custom_pings_cache):
"""
Returns the reasons dictionary for the ping.
"""
if ping_name == "all-pings":
return {}
elif ping_name in custom_pings_cache:
return custom_pings_cache[ping_name].reasons
return {}
def output_markdown(objs, output_dir, options={}):
"""
Given a tree of objects, output Markdown docs to `output_dir`.
This produces a single `metrics.md`. The file contains a table of
contents and a section for each ping metrics are collected for.
:param objects: A tree of objects (metrics and pings) as returned from
`parser.parse_objects`.
:param output_dir: Path to an output directory to write to.
:param options: options dictionary, with the following optional key:
- `project_title`: The projects title.
"""
# Build a dictionary that associates pings with their metrics.
#
# {
# "baseline": [
# { ... metric data ... },
# ...
# ],
# "metrics": [
# { ... metric data ... },
# ...
# ],
# ...
# }
#
# This also builds a dictionary of custom pings, if available.
custom_pings_cache = defaultdict()
metrics_by_pings = defaultdict(list)
for category_key, category_val in objs.items():
for obj in category_val.values():
# Filter out custom pings. We will need them for extracting
# the description
if isinstance(obj, pings.Ping):
custom_pings_cache[obj.name] = obj
if obj.send_if_empty:
metrics_by_pings[obj.name] = []
elif obj.is_internal_metric():
# This is an internal Glean metric, and we don't
# want docs for it.
continue
else:
# If we get here, obj is definitely a metric we want
# docs for.
for ping_name in obj.send_in_pings:
metrics_by_pings[ping_name].append(obj)
# Sort the metrics by their identifier, to make them show up nicely
# in the docs and to make generated docs reproducible.
for ping_name in metrics_by_pings:
metrics_by_pings[ping_name] = sorted(
metrics_by_pings[ping_name], key=lambda x: x.identifier()
)
project_title = options.get("project_title", "this project")
template = util.get_jinja2_template(
"markdown.jinja2",
filters=(
("extra_info", extra_info),
("metrics_docs", metrics_docs),
("ping_desc", lambda x: ping_desc(x, custom_pings_cache)),
("ping_send_if_empty", lambda x: if_empty(x, custom_pings_cache)),
("ping_docs", ping_docs),
("ping_reasons", lambda x: ping_reasons(x, custom_pings_cache)),
),
)
filename = "metrics.md"
filepath = output_dir / filename
with filepath.open("w", encoding="utf-8") as fd:
fd.write(
template.render(
metrics_by_pings=metrics_by_pings, project_title=project_title
)
)
# Jinja2 squashes the final newline, so we explicitly add it
fd.write("\n")

311
third_party/python/glean_parser/glean_parser/metrics.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,311 @@
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Classes for each of the high-level metric types.
"""
import enum
import sys
from . import util
# Import a backport of PEP487 to support __init_subclass__
if sys.version_info < (3, 6):
import pep487
base_object = pep487.PEP487Object
else:
base_object = object
class Lifetime(enum.Enum):
ping = 0
user = 1
application = 2
class Metric(base_object):
glean_internal_metric_cat = "glean.internal.metrics"
metric_types = {}
default_store_names = ["metrics"]
def __init__(
self,
type,
category,
name,
bugs,
description,
notification_emails,
expires,
data_reviews=None,
version=0,
disabled=False,
lifetime="ping",
send_in_pings=None,
unit="",
gecko_datapoint="",
no_lint=None,
_config=None,
_validated=False,
):
# Avoid cyclical import
from . import parser
self.type = type
self.category = category
self.name = name
self.bugs = bugs
self.description = description
self.notification_emails = notification_emails
self.expires = expires
if data_reviews is None:
data_reviews = []
self.data_reviews = data_reviews
self.version = version
self.disabled = disabled
self.lifetime = getattr(Lifetime, lifetime)
if send_in_pings is None:
send_in_pings = ["default"]
self.send_in_pings = send_in_pings
self.unit = unit
self.gecko_datapoint = gecko_datapoint
if no_lint is None:
no_lint = []
self.no_lint = no_lint
# _validated indicates whether this metric has already been jsonschema
# validated (but not any of the Python-level validation).
if not _validated:
data = {
"$schema": parser.METRICS_ID,
self.category: {self.name: self.serialize()},
}
for error in parser.validate(data):
raise ValueError(error)
# Metrics in the special category "glean.internal.metrics" need to have
# an empty category string when identifying the metrics in the ping.
if self.category == Metric.glean_internal_metric_cat:
self.category = ""
def __init_subclass__(cls, **kwargs):
# Create a mapping of all of the subclasses of this class
if cls not in Metric.metric_types and hasattr(cls, "typename"):
Metric.metric_types[cls.typename] = cls
super().__init_subclass__(**kwargs)
@classmethod
def make_metric(cls, category, name, metric_info, config={}, validated=False):
"""
Given a metric_info dictionary from metrics.yaml, return a metric
instance.
:param: category The category the metric lives in
:param: name The name of the metric
:param: metric_info A dictionary of the remaining metric parameters
:param: config A dictionary containing commandline configuration
parameters
:param: validated True if the metric has already gone through
jsonschema validation
:return: A new Metric instance.
"""
metric_type = metric_info["type"]
return cls.metric_types[metric_type](
category=category,
name=name,
_validated=validated,
_config=config,
**metric_info
)
def serialize(self):
"""
Serialize the metric back to JSON object model.
"""
d = self.__dict__.copy()
# Convert enum fields back to strings
for key, val in d.items():
if isinstance(val, enum.Enum):
d[key] = d[key].name
if isinstance(val, set):
d[key] = sorted(list(val))
del d["name"]
del d["category"]
return d
def identifier(self):
"""
Create an identifier unique for this metric.
Generally, category.name; however, Glean internal
metrics only use name.
"""
if not self.category:
return self.name
return ".".join((self.category, self.name))
def is_disabled(self):
return self.disabled or self.is_expired()
def is_expired(self):
return util.is_expired(self.expires)
@staticmethod
def validate_expires(expires):
return util.validate_expires(expires)
def is_internal_metric(self):
return self.category in (Metric.glean_internal_metric_cat, "")
class Boolean(Metric):
typename = "boolean"
class String(Metric):
typename = "string"
class StringList(Metric):
typename = "string_list"
class Counter(Metric):
typename = "counter"
class Quantity(Metric):
typename = "quantity"
class TimeUnit(enum.Enum):
nanosecond = 0
microsecond = 1
millisecond = 2
second = 3
minute = 4
hour = 5
day = 6
class TimeBase(Metric):
def __init__(self, *args, **kwargs):
self.time_unit = getattr(TimeUnit, kwargs.pop("time_unit", "millisecond"))
super().__init__(*args, **kwargs)
class Timespan(TimeBase):
typename = "timespan"
class TimingDistribution(TimeBase):
typename = "timing_distribution"
class MemoryUnit(enum.Enum):
byte = 0
kilobyte = 1
megabyte = 2
gigabyte = 3
class MemoryDistribution(Metric):
typename = "memory_distribution"
def __init__(self, *args, **kwargs):
self.memory_unit = getattr(MemoryUnit, kwargs.pop("memory_unit", "byte"))
super().__init__(*args, **kwargs)
class HistogramType(enum.Enum):
linear = 0
exponential = 1
class CustomDistribution(Metric):
typename = "custom_distribution"
def __init__(self, *args, **kwargs):
self.range_min = kwargs.pop("range_min", 1)
self.range_max = kwargs.pop("range_max")
self.bucket_count = kwargs.pop("bucket_count")
self.histogram_type = getattr(
HistogramType, kwargs.pop("histogram_type", "exponential")
)
super().__init__(*args, **kwargs)
class Datetime(TimeBase):
typename = "datetime"
class Event(Metric):
typename = "event"
default_store_names = ["events"]
_generate_enums = [("extra_keys", "Keys")]
def __init__(self, *args, **kwargs):
self.extra_keys = kwargs.pop("extra_keys", {})
self.validate_extra_keys(self.extra_keys, kwargs.get("_config", {}))
super().__init__(*args, **kwargs)
@property
def allowed_extra_keys(self):
# Sort keys so that output is deterministic
return sorted(list(self.extra_keys.keys()))
@staticmethod
def validate_extra_keys(extra_keys, config):
if not config.get("allow_reserved") and any(
k.startswith("glean.") for k in extra_keys.keys()
):
raise ValueError(
"Extra keys beginning with 'glean.' are reserved for "
"Glean internal use."
)
class Uuid(Metric):
typename = "uuid"
class Labeled(Metric):
labeled = True
def __init__(self, *args, **kwargs):
labels = kwargs.pop("labels", None)
if labels is not None:
self.ordered_labels = labels
self.labels = set(labels)
else:
self.ordered_labels = None
self.labels = None
super().__init__(*args, **kwargs)
def serialize(self):
"""
Serialize the metric back to JSON object model.
"""
d = super().serialize()
d["labels"] = self.ordered_labels
del d["ordered_labels"]
return d
class LabeledBoolean(Labeled, Boolean):
typename = "labeled_boolean"
class LabeledString(Labeled, String):
typename = "labeled_string"
class LabeledCounter(Labeled, Counter):
typename = "labeled_counter"

321
third_party/python/glean_parser/glean_parser/parser.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,321 @@
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Code for parsing metrics.yaml files.
"""
from collections import OrderedDict
import functools
from pathlib import Path
import textwrap
import jsonschema
from jsonschema.exceptions import ValidationError
from .metrics import Metric
from .pings import Ping, RESERVED_PING_NAMES
from . import util
ROOT_DIR = Path(__file__).parent
SCHEMAS_DIR = ROOT_DIR / "schemas"
METRICS_ID = "moz://mozilla.org/schemas/glean/metrics/1-0-0"
PINGS_ID = "moz://mozilla.org/schemas/glean/pings/1-0-0"
FILE_TYPES = {METRICS_ID: "metrics", PINGS_ID: "pings"}
def _update_validator(validator):
"""
Adds some custom validators to the jsonschema validator that produce
nicer error messages.
"""
def required(validator, required, instance, schema):
if not validator.is_type(instance, "object"):
return
missing_properties = set(
property for property in required if property not in instance
)
if len(missing_properties):
missing_properties = sorted(list(missing_properties))
yield ValidationError(
"Missing required properties: {}".format(", ".join(missing_properties))
)
validator.VALIDATORS["required"] = required
def _load_file(filepath):
"""
Load a metrics.yaml or pings.yaml format file.
"""
try:
content = util.load_yaml_or_json(filepath, ordered_dict=True)
except Exception as e:
yield util.format_error(filepath, "", textwrap.fill(str(e)))
return {}, None
if content is None:
yield util.format_error(
filepath, "", "'{}' file can not be empty.".format(filepath)
)
return {}, None
if content == {}:
return {}, None
filetype = FILE_TYPES.get(content.get("$schema"))
for error in validate(content, filepath):
content = {}
yield error
return content, filetype
@functools.lru_cache(maxsize=1)
def _load_schemas():
"""
Load all of the known schemas from disk, and put them in a map based on the
schema's $id.
"""
schemas = {}
for schema_path in SCHEMAS_DIR.glob("*.yaml"):
schema = util.load_yaml_or_json(schema_path)
resolver = util.get_null_resolver(schema)
validator_class = jsonschema.validators.validator_for(schema)
_update_validator(validator_class)
validator_class.check_schema(schema)
validator = validator_class(schema, resolver=resolver)
schemas[schema["$id"]] = (schema, validator)
return schemas
def _get_schema(schema_id, filepath="<input>"):
"""
Get the schema for the given schema $id.
"""
schemas = _load_schemas()
if schema_id not in schemas:
raise ValueError(
util.format_error(
filepath,
"",
"$schema key must be one of {}".format(", ".join(schemas.keys())),
)
)
return schemas[schema_id]
def _get_schema_for_content(content, filepath):
"""
Get the appropriate schema for the given JSON content.
"""
return _get_schema(content.get("$schema"), filepath)
def get_parameter_doc(key):
"""
Returns documentation about a specific metric parameter.
"""
schema, _ = _get_schema(METRICS_ID)
return schema["definitions"]["metric"]["properties"][key]["description"]
def get_ping_parameter_doc(key):
"""
Returns documentation about a specific ping parameter.
"""
schema, _ = _get_schema(PINGS_ID)
return schema["additionalProperties"]["properties"][key]["description"]
def validate(content, filepath="<input>"):
"""
Validate the given content against the appropriate schema.
"""
try:
schema, validator = _get_schema_for_content(content, filepath)
except ValueError as e:
yield str(e)
else:
yield from (
util.format_error(filepath, "", util.pprint_validation_error(e))
for e in validator.iter_errors(content)
)
def _instantiate_metrics(all_objects, sources, content, filepath, config):
"""
Load a list of metrics.yaml files, convert the JSON information into Metric
objects, and merge them into a single tree.
"""
global_no_lint = content.get("no_lint", [])
for category_key, category_val in content.items():
if category_key.startswith("$"):
continue
if category_key == "no_lint":
continue
if not config.get("allow_reserved") and category_key.split(".")[0] == "glean":
yield util.format_error(
filepath,
"For category '{}'".format(category_key),
"Categories beginning with 'glean' are reserved for "
"Glean internal use.",
)
continue
all_objects.setdefault(category_key, OrderedDict())
for metric_key, metric_val in category_val.items():
try:
metric_obj = Metric.make_metric(
category_key, metric_key, metric_val, validated=True, config=config
)
except Exception as e:
yield util.format_error(
filepath,
"On instance {}.{}".format(category_key, metric_key),
str(e),
)
metric_obj = None
else:
if (
not config.get("allow_reserved")
and "all-pings" in metric_obj.send_in_pings
):
yield util.format_error(
filepath,
"On instance {}.{}".format(category_key, metric_key),
'Only internal metrics may specify "all-pings" '
'in "send_in_pings"',
)
metric_obj = None
if metric_obj is not None:
metric_obj.no_lint = list(set(metric_obj.no_lint + global_no_lint))
already_seen = sources.get((category_key, metric_key))
if already_seen is not None:
# We've seen this metric name already
yield util.format_error(
filepath,
"",
("Duplicate metric name '{}.{}'" "already defined in '{}'").format(
category_key, metric_key, already_seen
),
)
else:
all_objects[category_key][metric_key] = metric_obj
sources[(category_key, metric_key)] = filepath
def _instantiate_pings(all_objects, sources, content, filepath, config):
"""
Load a list of pings.yaml files, convert the JSON information into Ping
objects.
"""
for ping_key, ping_val in content.items():
if ping_key.startswith("$"):
continue
if not config.get("allow_reserved"):
if ping_key in RESERVED_PING_NAMES:
yield util.format_error(
filepath,
"For ping '{}'".format(ping_key),
"Ping uses a reserved name ({})".format(RESERVED_PING_NAMES),
)
continue
ping_val["name"] = ping_key
try:
ping_obj = Ping(**ping_val)
except Exception as e:
yield util.format_error(
filepath, "On instance '{}'".format(ping_key), str(e)
)
ping_obj = None
already_seen = sources.get(ping_key)
if already_seen is not None:
# We've seen this ping name already
yield util.format_error(
filepath,
"",
("Duplicate ping name '{}'" "already defined in '{}'").format(
ping_key, already_seen
),
)
else:
all_objects.setdefault("pings", {})[ping_key] = ping_obj
sources[ping_key] = filepath
def _preprocess_objects(objs, config):
"""
Preprocess the object tree to better set defaults.
"""
for category in objs.values():
for obj in category.values():
if not config.get("do_not_disable_expired", False) and hasattr(
obj, "is_disabled"
):
obj.disabled = obj.is_disabled()
if hasattr(obj, "send_in_pings"):
if "default" in obj.send_in_pings:
obj.send_in_pings = obj.default_store_names + [
x for x in obj.send_in_pings if x != "default"
]
obj.send_in_pings = sorted(list(set(obj.send_in_pings)))
return objs
@util.keep_value
def parse_objects(filepaths, config={}):
"""
Parse one or more metrics.yaml and/or pings.yaml files, returning a tree of
`metrics.Metric` and `pings.Ping` instances.
The result is a generator over any errors. If there are no errors, the
actual metrics can be obtained from `result.value`. For example::
result = metrics.parse_metrics(filepaths)
for err in result:
print(err)
all_metrics = result.value
The result value is a dictionary of category names to categories, where
each category is a dictionary from metric name to `metrics.Metric`
instances. There is also the special category `pings` containing all
of the `pings.Ping` instances.
:param filepaths: list of Path objects to metrics.yaml and/or pings.yaml
files
:param config: A dictionary of options that change parsing behavior.
Supported keys are:
- `allow_reserved`: Allow values reserved for internal Glean use.
- `do_not_disable_expired`: Don't mark expired metrics as disabled.
This is useful when you want to retain the original "disabled"
value from the `metrics.yaml`, rather than having it overridden when
the metric expires.
"""
all_objects = OrderedDict()
sources = {}
filepaths = util.ensure_list(filepaths)
for filepath in filepaths:
content, filetype = yield from _load_file(filepath)
if filetype == "metrics":
yield from _instantiate_metrics(
all_objects, sources, content, filepath, config
)
elif filetype == "pings":
yield from _instantiate_pings(
all_objects, sources, content, filepath, config
)
return _preprocess_objects(all_objects, config)

78
third_party/python/glean_parser/glean_parser/pings.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,78 @@
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Classes for managing the description of pings.
"""
import sys
# Import a backport of PEP487 to support __init_subclass__
if sys.version_info < (3, 6):
import pep487
base_object = pep487.PEP487Object
else:
base_object = object
RESERVED_PING_NAMES = ["baseline", "metrics", "events", "deletion_request"]
class Ping(base_object):
def __init__(
self,
name,
description,
bugs,
notification_emails,
data_reviews=None,
include_client_id=False,
send_if_empty=False,
reasons=None,
_validated=False,
):
# Avoid cyclical import
from . import parser
self.name = name
self.description = description
self.bugs = bugs
self.notification_emails = notification_emails
if data_reviews is None:
data_reviews = []
self.data_reviews = data_reviews
self.include_client_id = include_client_id
self.send_if_empty = send_if_empty
if reasons is None:
reasons = {}
self.reasons = reasons
# _validated indicates whether this metric has already been jsonschema
# validated (but not any of the Python-level validation).
if not _validated:
data = {"$schema": parser.PINGS_ID, self.name: self.serialize()}
for error in parser.validate(data):
raise ValueError(error)
_generate_enums = [("reason_codes", "ReasonCodes")]
@property
def type(self):
return "ping"
@property
def reason_codes(self):
return sorted(list(self.reasons.keys()))
def serialize(self):
"""
Serialize the metric back to JSON object model.
"""
d = self.__dict__.copy()
del d["name"]
return d

Просмотреть файл

@ -0,0 +1,520 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
---
$schema: http://json-schema.org/draft-07/schema#
title: Metrics
description: |
Schema for the metrics.yaml files for Mozilla's Glean telemetry SDK.
The top-level of the `metrics.yaml` file has a key defining each category of
metrics. Categories must be snake_case, and they may also have dots `.` to
define subcategories.
$id: moz://mozilla.org/schemas/glean/metrics/1-0-0
definitions:
token:
type: string
pattern: "^[A-Za-z_][A-Za-z0-9_\\.]*$"
snake_case:
type: string
pattern: "^[a-z_][a-z0-9_]*$"
dotted_snake_case:
type: string
pattern: "^[a-z_][a-z0-9_]{0,29}(\\.[a-z_][a-z0-9_]{0,29})*$"
maxLength: 40
kebab_case:
type: string
# Bug 1601270; we allow 3 specific existing snake_cased ping names for now,
# but these special cases can be removed once the number of legacy clients
# sufficiently dwindles, likely in 2020H2.
pattern: "^[a-z][a-z0-9-]{0,29}$\
|^deletion_request$|^bookmarks_sync$|^history_sync$|^session_end$|^all_pings$|^glean_.*$"
long_id:
allOf:
- $ref: "#/definitions/snake_case"
- maxLength: 40
short_id:
allOf:
- $ref: "#/definitions/snake_case"
- maxLength: 30
labeled_metric_id:
type: string
pattern: "^[a-z_][a-z0-9_-]{0,29}(\\.[a-z_][a-z0-9_-]{0,29})*$"
maxLength: 71 # Note: this should be category + metric + 1
metric:
description: |
Describes a single metric.
See https://mozilla.github.io/glean_parser/metrics-yaml.html
type: object
additionalProperties: false
properties:
type:
title: Metric type
description: |
**Required.**
Specifies the type of a metric, like "counter" or "event". This
defines which operations are valid for the metric, how it is stored
and how data analysis tooling displays it.
The supported types are:
- `event`: Record a specific event (with optional metadata).
Additional properties: `extra_keys`.
- `boolean`: A metric storing values of true or false.
- `string`: A metric storing Unicode string values.
- `string_list`: a list of Unicode strings.
- `counter`: A numeric value that can only be incremented.
- `quantity`: A numeric value that is set directly. Only allowed for
metrics coming from GeckoView.
- `timespan`: Represents a time interval. Additional properties:
`time_unit`_.
- `timing_distribution`: Record the distribution of multiple
timings. Additional properties: `time_unit`_.
- `datetime`: A date/time value. Represented as an ISO datetime in
UTC. Additional properties: `time_unit`_.
- `uuid`: Record a UUID v4.
- `memory_distribution`: A histogram for recording memory usage
values. Additional properties: `memory_unit`_.
- `custom_distribution`: A histogram with a custom range and number
of buckets. This metric type is for legacy support only and is
only allowed for metrics coming from GeckoView. Additional
properties: `range_min`_, `range_max`_, `bucket_count`_,
`histogram_type`_.
- Additionally, labeled versions of many metric types are supported.
These support the `labels`_ parameter, allowing multiple instances
of the metric to be stored at a given set of labels. The labeled
metric types include:
`labeled_boolean`, `labeled_string`, `labeled_counter`.
type: string
enum:
- event
- boolean
- string
- string_list
- counter
- quantity
- timespan
- timing_distribution
- custom_distribution
- memory_distribution
- datetime
- uuid
- labeled_boolean
- labeled_string
- labeled_counter
description:
title: Description
description: |
**Required.**
A textual description of what this metric does, what it means, and its
edge cases or any other helpful information.
Descriptions may contain [markdown
syntax](https://www.markdownguide.org/basic-syntax/).
type: string
lifetime:
title: Lifetime
description: |
Defines the lifetime of the metric. It must be one of the following
values:
- `ping` (default): The metric is reset each time it is sent in a
ping.
- `user`: The metric contains a property that is part of the user's
profile and is never reset.
- `application`: The metric contains a property that is related to the
application, and is reset only at application restarts.
enum:
- ping
- user
- application
default: ping
send_in_pings:
title: Send in pings
description: |
Which pings the metric should be sent on. If not specified, the metric
is sent on the "default ping", which is the `events` ping for events,
and the `metrics` ping for everything else. Most metrics don't need to
specify this.
(There is an additional special value of `all-pings` for internal
Glean metrics only that is used to indicate that a metric may appear
in any ping.)
type: array
items:
$ref: "#/definitions/kebab_case"
default:
- default
notification_emails:
title: Notification emails
description: |
**Required.**
A list of email addresses to notify for important events with the
metric or when people with context or ownership for the metric need to
be contacted.
type: array
minItems: 1
items:
type: string
format: email
bugs:
title: Related bugs
description: |
**Required.**
A list of bug URLs (e.g. Bugzilla and Github) that are relevant to
this metric, e.g., tracking its original implementation or later
changes to it.
Using bug numbers alone is deprecated and will be an error in the
future. Each entry should be a full URL to the bug in its tracker.
type: array
minItems: 1
items:
anyOf:
- type: integer
- type: string
format: uri
data_reviews:
title: Review references
description: |
**Required.**
A list of URIs to any data collection reviews relevant to the metric.
type: array
items:
type: string
format: uri
disabled:
title: Disabled
description: |
If `true`, the metric is disabled, and any metric collection on it
will be silently ignored at runtime.
type: boolean
default: false
expires:
title: Expires
description: |
**Required.**
May be one of the following values:
- `<build date>`: An ISO date `yyyy-mm-dd` in UTC on which the
metric expires. For example, `2019-03-13`. This date is checked at
build time. Except in special cases, this form should be used so
that the metric automatically "sunsets" after a period of time.
- `never`: This metric never expires.
- `expired`: This metric is manually expired.
type: string
pattern: "(never)|(expired)|([0-9]{4}-[0-9]{2}-[0-9]{2})"
version:
title: Metric version
description: |
The version of the metric. A monotonically increasing value. If not
provided, defaults to 0.
time_unit:
title: Time unit
description: |
Specifies the unit that the metric will be stored and displayed in. If
not provided, it defaults to milliseconds. Time values are sent to the
backend as integers, so `time_unit`_ determines the maximum resolution
at which timespans are recorded. Times are always truncated, not
rounded, to the nearest time unit. For example, a measurement of 25 ns
will be returned as 0 ms if `time_unit` is `"millisecond"`.
Valid when `type`_ is `timespan`, `timing_distribution` or `datetime`.
enum:
- nanosecond
- microsecond
- millisecond
- second
- minute
- hour
- day
memory_unit:
title: Memory unit
description: |
The unit that the incoming memory size values are recorded in.
The units are the power-of-2 units, so "kilobyte" is correctly a
"kibibyte".
- kilobyte == 2^10 == 1,024 bytes
- megabyte == 2^20 == 1,048,576 bytes
- gigabyte == 2^30 == 1,073,741,824 bytes
Values are automatically converted to and transmitted as bytes.
Valid when `type`_ is `memory_distribution`.
enum:
- byte
- kilobyte
- megabyte
- gigabyte
labels:
title: Labels
description: |
A list of labels for a labeled metric. If provided, the labels are
enforced at run time, and recording to an unknown label is recorded
to the special label ``__other__``. If not provided, the labels
may be anything, but using too many unique labels will put some
labels in the special label ``__other__``.
Valid with any of the labeled metric types.
anyOf:
- type: array
uniqueItems: true
items:
$ref: "#/definitions/labeled_metric_id"
maxItems: 16
- type: "null"
extra_keys:
title: Extra keys
description: |
The acceptable keys on the "extra" object sent with events. This is an
object mapping the key to an object containing metadata about the key.
This metadata object has the following keys:
- `description`: **Required.** A description of the key.
Valid when `type`_ is `event`.
type: object
propertyNames:
$ref: "#/definitions/dotted_snake_case"
additionalProperties:
type: object
properties:
description:
type: string
required:
- description
default: {}
gecko_datapoint:
title: Gecko Datapoint
description: |
This is a Gecko-specific property. It is the name of the Gecko metric
to accumulate the data from, when using the Glean SDK in a product
using GeckoView. See bug 1566356 for more context.
type: string
range_min:
title: Range minimum
description: |
The minimum value of a custom distribution.
Valid when `type`_ is `custom_distribution`.
type: number
default: 1
range_max:
title: Range maximum
description: |
The maximum value of a custom distribution.
Required when `type`_ is `custom_distribution`.
type: number
bucket_count:
title: Bucket count
description: |
The number of buckets to include in a custom distribution.
Required when `type`_ is `custom_distribution`.
type: number
minimum: 1
maximum: 100
histogram_type:
title: Histogram type
description: |
The type of histogram bucketing to use:
- `linear`: The buckets are linearly spaced within the range.
- `exponential`: The buckets use the natural logarithmic so the
smaller-valued buckets are smaller in size than the higher-valued
buckets.
Required when `type`_ is `custom_distribution`.
enum:
- linear
- exponential
unit:
title: Unit
description: |
The unit of the metric, for metrics that don't already require a
meaningful unit, such as `time_unit`.
This is provided for informational purposes only and doesn't have any
effect on data collection.
type: string
no_lint:
title: Lint checks to skip
description: |
This parameter lists any lint checks to skip for this metric only.
type: array
items:
type: string
required:
- type
- bugs
- description
- notification_emails
- data_reviews
- expires
type: object
propertyNames:
anyOf:
- allOf:
- $ref: "#/definitions/dotted_snake_case"
- not:
description: "'pings' is reserved as a category name."
const: pings
- enum: ['$schema']
properties:
$schema:
type: string
format: url
no_lint:
title: Lint checks to skip globally
description: |
This parameter lists any lint checks to skip for this whole file.
type: array
items:
type: string
additionalProperties:
type: object
propertyNames:
anyOf:
- $ref: "#/definitions/short_id"
additionalProperties:
allOf:
- $ref: "#/definitions/metric"
-
if:
properties:
type:
const: event
then:
properties:
lifetime:
description: |
Event metrics must have ping lifetime.
const: ping
- if:
not:
properties:
type:
enum:
- timing_distribution
- custom_distribution
- memory_distribution
- quantity
- boolean
- string
- labeled_counter
then:
properties:
gecko_datapoint:
description: |
`gecko_datapoint` is only allowed for `timing_distribution`,
`custom_distribution`, `memory_distribution`, `quantity`,
`boolean`, `string` and `labeled_counter`.
maxLength: 0
-
if:
properties:
type:
enum:
- custom_distribution
- quantity
then:
required:
- gecko_datapoint
description: |
`custom_distribution` and `quantity` is only allowed for Gecko
metrics.
-
if:
properties:
type:
const: custom_distribution
then:
required:
- range_max
- bucket_count
- histogram_type
description: |
`custom_distribution` is missing required parameters `range_max`,
`bucket_count` and `histogram_type`.
-
if:
properties:
type:
const: memory_distribution
then:
required:
- memory_unit
description: |
`memory_distribution` is missing required parameter `memory_unit`.
-
if:
properties:
type:
const: quantity
then:
required:
- unit
description: |
`quantity` is missing required parameter `unit`.

Просмотреть файл

@ -0,0 +1,141 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
---
$schema: http://json-schema.org/draft-07/schema#
title: Pings
description: |
Schema for the pings.yaml files for Mozilla's Glean telemetry SDK.
The top-level of the `pings.yaml` file has a key defining the name of each
ping. The values contain metadata about that ping. Ping names must be
kebab-case per https://docs.telemetry.mozilla.org/cookbooks/new_ping.html
$id: moz://mozilla.org/schemas/glean/pings/1-0-0
definitions:
dotted_snake_case:
type: string
pattern: "^[a-z_][a-z0-9_]{0,29}(\\.[a-z_][a-z0-9_]{0,29})*$"
maxLength: 40
kebab_case:
type: string
# Bug 1601270; we allow 3 specific existing snake_cased ping names for now,
# but these special cases can be removed once the number of legacy clients
# sufficiently dwindles, likely in 2020H2.
pattern: "^[a-z][a-z0-9-]{0,29}$\
|^deletion_request$|^bookmarks_sync$|^history_sync$|^session_end$|^all_pings$|^glean_.*$"
type: object
propertyNames:
allOf:
- anyOf:
- $ref: "#/definitions/kebab_case"
- enum: ['$schema']
- not:
enum: ['all-pings']
properties:
$schema:
type: string
format: url
additionalProperties:
type: object
properties:
description:
title: Description
description: |
**Required.**
A textual description of the purpose of this ping and what it contains.
Descriptions may contain [markdown
syntax](https://www.markdownguide.org/basic-syntax/).
type: string
include_client_id:
title: Include client id
description: |
**Required.**
When `true`, include the `client_id` value in the ping.
type: boolean
send_if_empty:
title: Send if empty
description: |
When `false` a ping is sent only if it contains data (the default).
When `true` a ping is sent even if it contains no data.
type: boolean
notification_emails:
title: Notification emails
description: |
**Required.**
A list of email addresses to notify for important events with the
ping or when people with context or ownership for the ping need to
be contacted.
type: array
minItems: 1
items:
type: string
format: email
bugs:
title: Related bugs
description: |
**Required.**
A list of bugs (e.g. Bugzilla and Github) that are relevant to this
ping, e.g., tracking its original implementation or later changes to
it.
If a number, it is an ID to an issue in the default tracker (e.g.
Mozilla's Bugzilla instance). If a string, it must be a URI to a bug
page in a tracker.
type: array
minItems: 1
items:
anyOf:
- type: integer
- type: string
format: uri
data_reviews:
title: Review references
description: |
**Required.**
A list of URIs to any data collection reviews relevant to the ping.
type: array
items:
type: string
format: uri
reasons:
title: The reasons this ping can be sent.
description: |
A list of reasons that the ping might be triggered. Sent in the ping's
`ping_info.reason` field.
Specified as a mapping from reason codes (which are short strings), to
a textual description of the reason.
type: object
propertyNames:
type: string
maxLength: 30
additionalProperties:
type: string
required:
- description
- include_client_id
- bugs
- notification_emails
- data_reviews
additionalProperties: false

174
third_party/python/glean_parser/glean_parser/swift.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,174 @@
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Outputter to generate Swift code for metrics.
"""
import enum
import json
from . import pings
from . import util
from collections import defaultdict
# An (imcomplete) list of reserved keywords in Swift.
# These will be replaced in generated code by their escaped form.
SWIFT_RESERVED_NAMES = ["internal", "typealias"]
def swift_datatypes_filter(value):
"""
A Jinja2 filter that renders Swift literals.
Based on Python's JSONEncoder, but overrides:
- dicts to use `[key: value]`
- sets to use `[...]`
- enums to use the like-named Swift enum
"""
class SwiftEncoder(json.JSONEncoder):
def iterencode(self, value):
if isinstance(value, dict):
yield "["
first = True
for key, subvalue in value.items():
if not first:
yield ", "
yield from self.iterencode(key)
yield ": "
yield from self.iterencode(subvalue)
first = False
yield "]"
elif isinstance(value, enum.Enum):
yield ("." + util.camelize(value.name))
elif isinstance(value, set):
yield "["
first = True
for subvalue in sorted(list(value)):
if not first:
yield ", "
yield from self.iterencode(subvalue)
first = False
yield "]"
elif value is None:
yield "nil"
else:
yield from super().iterencode(value)
return "".join(SwiftEncoder().iterencode(value))
def type_name(obj):
"""
Returns the Swift type to use for a given metric or ping object.
"""
generate_enums = getattr(obj, "_generate_enums", [])
if len(generate_enums):
template_args = []
for member, suffix in generate_enums:
if len(getattr(obj, member)):
template_args.append(util.Camelize(obj.name) + suffix)
else:
if suffix == "Keys":
template_args.append("NoExtraKeys")
else:
template_args.append("No" + suffix)
return "{}<{}>".format(class_name(obj.type), ", ".join(template_args))
return class_name(obj.type)
def class_name(obj_type):
"""
Returns the Swift class name for a given metric or ping type.
"""
if obj_type == "ping":
return "Ping"
if obj_type.startswith("labeled_"):
obj_type = obj_type[8:]
return util.Camelize(obj_type) + "MetricType"
def variable_name(var):
"""
Returns a valid Swift variable name, escaping keywords if necessary.
"""
if var in SWIFT_RESERVED_NAMES:
return "`" + var + "`"
else:
return var
def output_swift(objs, output_dir, options={}):
"""
Given a tree of objects, output Swift code to `output_dir`.
:param objects: A tree of objects (metrics and pings) as returned from
`parser.parse_objects`.
:param output_dir: Path to an output directory to write to.
:param options: options dictionary, with the following optional keys:
- namespace: The namespace to generate metrics in
- glean_namespace: The namespace to import Glean from
- allow_reserved: When True, this is a Glean-internal build
"""
template = util.get_jinja2_template(
"swift.jinja2",
filters=(
("swift", swift_datatypes_filter),
("type_name", type_name),
("class_name", class_name),
("variable_name", variable_name),
),
)
# The object parameters to pass to constructors.
# **CAUTION**: This list needs to be in the order the type constructor expects them.
# The `test_order_of_fields` test checks that the generated code is valid.
# **DO NOT CHANGE THE ORDER OR ADD NEW FIELDS IN THE MIDDLE**
extra_args = [
"category",
"name",
"send_in_pings",
"lifetime",
"disabled",
"time_unit",
"allowed_extra_keys",
"reason_codes",
]
namespace = options.get("namespace", "GleanMetrics")
glean_namespace = options.get("glean_namespace", "Glean")
for category_key, category_val in objs.items():
filename = util.Camelize(category_key) + ".swift"
filepath = output_dir / filename
custom_pings = defaultdict()
for obj in category_val.values():
if isinstance(obj, pings.Ping):
custom_pings[obj.name] = obj
has_labeled_metrics = any(
getattr(metric, "labeled", False) for metric in category_val.values()
)
with filepath.open("w", encoding="utf-8") as fd:
fd.write(
template.render(
category_name=category_key,
objs=category_val,
extra_args=extra_args,
namespace=namespace,
glean_namespace=glean_namespace,
has_labeled_metrics=has_labeled_metrics,
is_ping_type=len(custom_pings) > 0,
allow_reserved=options.get("allow_reserved", False)
)
)
# Jinja2 squashes the final newline, so we explicitly add it
fd.write("\n")

Просмотреть файл

@ -0,0 +1,124 @@
// -*- mode: kotlin -*-
/*
* AUTOGENERATED BY glean_parser. DO NOT EDIT.
*/
{# The rendered markdown is autogenerated, but this
Jinja2 template is not. Please file bugs! #}
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
@file:Suppress("PackageNaming", "MaxLineLength")
package {{ namespace }}
import {{ glean_namespace }}.private.BooleanMetricType // ktlint-disable import-ordering no-unused-imports
import {{ glean_namespace }}.private.CounterMetricType // ktlint-disable import-ordering no-unused-imports
import {{ glean_namespace }}.private.HistogramMetricBase // ktlint-disable import-ordering no-unused-imports
import {{ glean_namespace }}.private.LabeledMetricType // ktlint-disable import-ordering no-unused-imports
import {{ glean_namespace }}.private.QuantityMetricType // ktlint-disable import-ordering no-unused-imports
import {{ glean_namespace }}.private.StringMetricType // ktlint-disable import-ordering no-unused-imports
/*
* This class performs the mapping between Gecko metrics and Glean SDK
* metric types.
*/
internal object GleanGeckoMetricsMapping {
// Support exfiltration of Gecko histograms from products using both the
// Glean SDK and GeckoView. See bug 1566356 for more context.
@Suppress("UNUSED_PARAMETER")
fun getHistogram(geckoMetricName: String): HistogramMetricBase? {
{% if 'histograms' in gecko_metrics %}
return when (geckoMetricName) {
{% for category in gecko_metrics['histograms'].keys()|sort %}
// From {{ category|Camelize }}.kt
{% for metric in gecko_metrics['histograms'][category] %}
"{{ metric.gecko_datapoint }}" -> {{ category|Camelize }}.{{ metric.name|camelize }}
{% endfor %}
{%- endfor %}
else -> null
}
{% else %}
return null
{% endif %}
}
// Support exfiltration of Gecko categorical histograms from products using
// both the Glean SDK and GeckoView. See bug 1571740 for more context.
@Suppress("UNUSED_PARAMETER")
fun getCategoricalMetric(
geckoMetricName: String
): LabeledMetricType<CounterMetricType>? {
{% if 'categoricals' in gecko_metrics %}
return when (geckoMetricName) {
{% for category in gecko_metrics['categoricals'].keys()|sort %}
// From {{ category|Camelize }}.kt
{% for metric in gecko_metrics['categoricals'][category] %}
"{{ metric.gecko_datapoint }}" -> {{ category|Camelize }}.{{ metric.name|camelize }}
{% endfor %}
{%- endfor %}
else -> null
}
{% else %}
return null
{% endif %}
}
// Support exfiltration of Gecko boolean scalars from products using both the
// Glean SDK and GeckoView. See bug 1579365 for more context.
@Suppress("UNUSED_PARAMETER")
fun getBooleanScalar(geckoMetricName: String): BooleanMetricType? {
{% if 'boolean' in gecko_metrics %}
return when (geckoMetricName) {
{% for category in gecko_metrics['boolean'].keys()|sort %}
// From {{ category|Camelize }}.kt
{% for metric in gecko_metrics['boolean'][category] %}
"{{ metric.gecko_datapoint }}" -> {{ category|Camelize }}.{{ metric.name|camelize }}
{% endfor %}
{%- endfor %}
else -> null
}
{% else %}
return null
{% endif %}
}
// Support exfiltration of Gecko string scalars from products using both the
// Glean SDK and GeckoView. See bug 1579365 for more context.
@Suppress("UNUSED_PARAMETER")
fun getStringScalar(geckoMetricName: String): StringMetricType? {
{% if 'string' in gecko_metrics %}
return when (geckoMetricName) {
{% for category in gecko_metrics['string'].keys()|sort %}
// From {{ category|Camelize }}.kt
{% for metric in gecko_metrics['string'][category] %}
"{{ metric.gecko_datapoint }}" -> {{ category|Camelize }}.{{ metric.name|camelize }}
{% endfor %}
{%- endfor %}
else -> null
}
{% else %}
return null
{% endif %}
}
// Support exfiltration of Gecko quantity scalars from products using both the
// Glean SDK and GeckoView. See bug 1579365 for more context.
@Suppress("UNUSED_PARAMETER")
fun getQuantityScalar(geckoMetricName: String): QuantityMetricType? {
{% if 'quantity' in gecko_metrics %}
return when (geckoMetricName) {
{% for category in gecko_metrics['quantity'].keys()|sort %}
// From {{ category|Camelize }}.kt
{% for metric in gecko_metrics['quantity'][category] %}
"{{ metric.gecko_datapoint }}" -> {{ category|Camelize }}.{{ metric.name|camelize }}
{% endfor %}
{%- endfor %}
else -> null
}
{% else %}
return null
{% endif %}
}
}

Просмотреть файл

@ -0,0 +1,81 @@
// -*- mode: kotlin -*-
/*
* AUTOGENERATED BY glean_parser. DO NOT EDIT.
*/
{# The rendered markdown is autogenerated, but this
Jinja2 template is not. Please file bugs! #}
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
{% macro obj_declaration(obj, suffix='', access='', lazy=False) %}
{% if (access != "private ") -%}
@get:JvmName("{{ obj.name|camelize }}{{ suffix }}")
{% endif -%}
{{ access }}val {{ obj.name|camelize }}{{ suffix }}: {{ obj|type_name }}{% if lazy %} by lazy { {%- else %} ={% endif %}
{{ obj|type_name }}(
{% for arg_name in extra_args if obj[arg_name] is defined %}
{{ arg_name|camelize }} = {{ obj[arg_name]|kotlin }}{{ "," if not loop.last }}
{% endfor %}
)
{% if lazy %} }{% endif %}{% endmacro %}
/* ktlint-disable no-blank-line-before-rbrace */
@file:Suppress("PackageNaming", "MaxLineLength")
package {{ namespace }}
import {{ glean_namespace }}.private.HistogramType // ktlint-disable import-ordering no-unused-imports
import {{ glean_namespace }}.private.Lifetime // ktlint-disable import-ordering no-unused-imports
import {{ glean_namespace }}.private.MemoryUnit // ktlint-disable import-ordering no-unused-imports
import {{ glean_namespace }}.private.NoExtraKeys // ktlint-disable import-ordering no-unused-imports
import {{ glean_namespace }}.private.NoReasonCodes // ktlint-disable import-ordering no-unused-imports
import {{ glean_namespace }}.private.TimeUnit // ktlint-disable import-ordering no-unused-imports
{% for obj_type in obj_types %}
import {{ glean_namespace }}.private.{{ obj_type }} // ktlint-disable import-ordering
{% endfor %}
{% if has_labeled_metrics %}
import {{ glean_namespace }}.private.LabeledMetricType // ktlint-disable import-ordering
{% endif %}
internal object {{ category_name|Camelize }} {
{% for obj in objs.values() %}
{% if obj|attr("_generate_enums") %}
{% for name, suffix in obj["_generate_enums"] %}
{% if obj|attr(name)|length %}
@Suppress("ClassNaming", "EnumNaming")
enum class {{ obj.name|camelize }}{{ suffix }} {
{% for key in obj|attr(name) %}
{{ key|camelize }}{{ "," if not loop.last }}
{% endfor %}
}
{% endif %}
{% endfor %}
{% endif %}
{% endfor %}
{% for obj in objs.values() %}
{% if obj.labeled %}
{{ obj_declaration(obj, 'Label', 'private ') }}
/**
* {{ obj.description|wordwrap() | replace('\n', '\n * ') }}
*/
val {{ obj.name|camelize }}: LabeledMetricType<{{ obj|type_name }}> by lazy {
LabeledMetricType(
category = {{ obj.category|kotlin }},
name = {{ obj.name|kotlin }},
subMetric = {{ obj.name|camelize }}Label,
disabled = {{ obj.is_disabled()|kotlin }},
lifetime = {{ obj.lifetime|kotlin }},
sendInPings = {{ obj.send_in_pings|kotlin }},
labels = {{ obj.labels|kotlin }}
)
}
{% else %}
/**
* {{ obj.description|wordwrap() | replace('\n', '\n * ') }}
*/
{{ obj_declaration(obj, lazy=obj.type != 'ping') }}
{% endif %}
{%- endfor %}
}

Просмотреть файл

@ -0,0 +1,69 @@
<!-- AUTOGENERATED BY glean_parser. DO NOT EDIT. -->
{# The rendered markdown is autogenerated, but this
Jinja2 template is not. Please file bugs! #}
# Metrics
This document enumerates the metrics collected by {{ project_title }}.
This project may depend on other projects which also collect metrics.
This means you might have to go searching through the dependency tree to get a full picture of everything collected by this project.
# Pings
{% for ping_name in metrics_by_pings.keys()|sort %}
- [{{ ping_name }}]({{ '#' }}{{ ping_name|replace(" ","-") }})
{% endfor %}
{% for ping_name in metrics_by_pings.keys()|sort %}
{% raw %}##{% endraw %} {{ ping_name }}
{% if ping_name|ping_desc and ping_name|ping_desc|length > 0 %}
{{ ping_name|ping_desc }}
{% if ping_name|ping_docs|length > 0 %}
See the Glean SDK documentation for the [`{{ ping_name }}` ping]({{ ping_name|ping_docs }}).
{% endif %}
{% endif %}
{% if ping_name|ping_send_if_empty %}
This ping is sent if empty.
{% endif %}
{% if ping_name|ping_reasons %}
Reasons this ping may be sent:
{% for (reason, desc) in ping_name|ping_reasons|dictsort %}
- `{{ reason }}`: {{ desc|indent(6, indentfirst=False) }}
{% endfor %}
{% endif %}
{% if metrics_by_pings[ping_name] %}
The following metrics are added to the ping:
| Name | Type | Description | Data reviews | Extras | Expiration |
| --- | --- | --- | --- | --- | --- |
{% for metric in metrics_by_pings[ping_name] %}
| {{ metric.identifier() }} |
{{- '['}}{{ metric.type }}]({{ metric.type|metrics_docs }}) |
{{- metric.description|replace("\n", " ") }} |
{%- for data_review in metric.data_reviews %}
[{{ loop.index }}]({{ data_review }}){{ ", " if not loop.last }}
{%- endfor -%} |
{%- if metric|extra_info -%}
<ul>
{%- for property, desc in metric|extra_info %}
<li>{{ property }}{%- if desc is not none -%}: {{ desc|replace("\n", " ") }}{%- endif -%}</li>
{%- endfor -%}
</ul>
{%- endif -%} |
{{- metric.expires }} |
{% endfor %}
{% else %}
This ping contains no metrics.
{% endif %}
{% endfor %}
<!-- AUTOGENERATED BY glean_parser. DO NOT EDIT. -->
{# The rendered markdown is autogenerated, but this
Jinja2 template is not. Please file bugs! #}

Просмотреть файл

@ -0,0 +1,105 @@
// -*- mode: Swift -*-
// AUTOGENERATED BY glean_parser. DO NOT EDIT.
{# The rendered markdown is autogenerated, but this
Jinja2 template is not. Please file bugs! #}
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
{% macro obj_declaration(obj, suffix='', access='') %}
{{ access }}static let {{ obj.name|camelize|variable_name }}{{ suffix }} = {{ obj|type_name }}(
{% for arg_name in extra_args if obj[arg_name] is defined %}
{{ arg_name|camelize }}: {{ obj[arg_name]|swift }}{{ "," if not loop.last }}
{% endfor %}
)
{% endmacro %}
{% if not allow_reserved %}
import {{ glean_namespace }}
{% endif %}
// swiftlint:disable superfluous_disable_command
// swiftlint:disable nesting
// swiftlint:disable line_length
// swiftlint:disable identifier_name
// swiftlint:disable force_try
extension {{ namespace }} {
{% if is_ping_type %}
class {{ category_name|Camelize }} {
public static let shared = {{ category_name|Camelize }}()
private init() {
// Intentionally left private, no external user can instantiate a new global object.
}
{% for obj in objs.values() %}
{% if obj|attr("_generate_enums") %}
{% for name, suffix in obj["_generate_enums"] %}
{% if obj|attr(name)|length %}
enum {{ obj.name|Camelize }}{{ suffix }}: Int, ReasonCodes {
{% for key in obj|attr(name) %}
case {{ key|camelize|variable_name }} = {{ loop.index-1 }}
{% endfor %}
public func index() -> Int {
return self.rawValue
}
}
{% endif %}
{% endfor %}
{% endif %}
/// {{ obj.description|wordwrap() | replace('\n', '\n /// ') }}
let {{ obj.name|camelize|variable_name }} = {{obj|type_name}}(
name: {{ obj.name|swift }},
includeClientId: {{obj.include_client_id|swift}},
sendIfEmpty: {{obj.send_if_empty|swift}},
reasonCodes: {{obj.reason_codes|swift}}
)
{% endfor %}
}
{% else %}
enum {{ category_name|Camelize }} {
{% for obj in objs.values() %}
{% if obj|attr("_generate_enums") %}
{% for name, suffix in obj["_generate_enums"] %}
{% if obj|attr(name)|length %}
enum {{ obj.name|Camelize }}{{ suffix }}: Int32, ExtraKeys {
{% for key in obj|attr(name) %}
case {{ key|camelize|variable_name }} = {{ loop.index-1 }}
{% endfor %}
public func index() -> Int32 {
return self.rawValue
}
}
{% endif %}
{% endfor %}
{% endif %}
{% endfor %}
{% for obj in objs.values() %}
{% if obj.labeled %}
{{ obj_declaration(obj, 'Label', 'private ') }}
/// {{ obj.description|wordwrap() | replace('\n', '\n /// ') }}
static let {{ obj.name|camelize|variable_name }} = try! LabeledMetricType<{{ obj|type_name }}>(
category: {{ obj.category|swift }},
name: {{ obj.name|swift }},
sendInPings: {{ obj.send_in_pings|swift }},
lifetime: {{ obj.lifetime|swift }},
disabled: {{ obj.is_disabled()|swift }},
subMetric: {{ obj.name|camelize }}Label,
labels: {{ obj.labels|swift }}
)
{% else %}
/// {{ obj.description|wordwrap() | replace('\n', '\n /// ') }}
{{ obj_declaration(obj) }}
{% endif %}
{% endfor %}
}
{% endif %}
}

101
third_party/python/glean_parser/glean_parser/translate.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,101 @@
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
High-level interface for translating `metrics.yaml` into other formats.
"""
from pathlib import Path
import os
import shutil
import sys
import tempfile
from . import lint
from . import parser
from . import kotlin
from . import markdown
from . import swift
from . import util
# Each outputter in the table has the following keys:
# - "output_func": the main function of the outputter, the one which
# does the actual translation.
# - "clear_output_dir": a flag to clear the target directory before moving there
# the generated files.
OUTPUTTERS = {
"kotlin": {
"output_func": kotlin.output_kotlin,
"clear_output_dir": True,
"extensions": ["*.kt"],
},
"markdown": {"output_func": markdown.output_markdown, "clear_output_dir": False},
"swift": {
"output_func": swift.output_swift,
"clear_output_dir": True,
"extensions": ["*.swift"],
},
}
def translate(input_filepaths, output_format, output_dir, options={}, parser_config={}):
"""
Translate the files in `input_filepaths` to the given `output_format` and
put the results in `output_dir`.
:param input_filepaths: list of paths to input metrics.yaml files
:param output_format: the name of the output formats
:param output_dir: the path to the output directory
:param options: dictionary of options. The available options are backend
format specific.
:param parser_config: A dictionary of options that change parsing behavior.
See `parser.parse_metrics` for more info.
"""
if output_format not in OUTPUTTERS:
raise ValueError("Unknown output format '{}'".format(output_format))
all_objects = parser.parse_objects(input_filepaths, parser_config)
if util.report_validation_errors(all_objects):
return 1
if lint.lint_metrics(all_objects.value, parser_config):
print(
"NOTE: These warnings will become errors in a future release of Glean.",
file=sys.stderr,
)
# allow_reserved is also relevant to the translators, so copy it there
if parser_config.get("allow_reserved"):
options["allow_reserved"] = True
# Write everything out to a temporary directory, and then move it to the
# real directory, for transactional integrity.
with tempfile.TemporaryDirectory() as tempdir:
tempdir_path = Path(tempdir)
OUTPUTTERS[output_format]["output_func"](
all_objects.value, tempdir_path, options
)
if OUTPUTTERS[output_format]["clear_output_dir"]:
if output_dir.is_file():
output_dir.unlink()
elif output_dir.is_dir():
for extensions in OUTPUTTERS[output_format]["extensions"]:
for filepath in output_dir.glob(extensions):
filepath.unlink()
if len(list(output_dir.iterdir())):
print("Extra contents found in '{}'.".format(output_dir))
# We can't use shutil.copytree alone if the directory already exists.
# However, if it doesn't exist, make sure to create one otherwise
# shutil.copy will fail.
os.makedirs(str(output_dir), exist_ok=True)
for filename in tempdir_path.glob("*"):
shutil.copy(str(filename), str(output_dir))
return 0

376
third_party/python/glean_parser/glean_parser/util.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,376 @@
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from collections import OrderedDict
import datetime
import functools
import json
from pathlib import Path
import sys
import textwrap
import urllib.request
import appdirs
import diskcache
import jinja2
import jsonschema
from jsonschema import _utils
import yaml
if sys.version_info < (3, 7):
import iso8601
TESTING_MODE = "pytest" in sys.modules
# Adapted from
# https://stackoverflow.com/questions/34667108/ignore-dates-and-times-while-parsing-yaml
class _NoDatesSafeLoader(yaml.SafeLoader):
@classmethod
def remove_implicit_resolver(cls, tag_to_remove):
"""
Remove implicit resolvers for a particular tag
Takes care not to modify resolvers in super classes.
We want to load datetimes as strings, not dates, because we
go on to serialise as json which doesn't have the advanced types
of yaml, and leads to incompatibilities down the track.
"""
if "yaml_implicit_resolvers" not in cls.__dict__:
cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
for first_letter, mappings in cls.yaml_implicit_resolvers.items():
cls.yaml_implicit_resolvers[first_letter] = [
(tag, regexp) for tag, regexp in mappings if tag != tag_to_remove
]
# Since we use JSON schema to validate, and JSON schema doesn't support
# datetimes, we don't want the YAML loader to give us datetimes -- just
# strings.
_NoDatesSafeLoader.remove_implicit_resolver("tag:yaml.org,2002:timestamp")
if sys.version_info < (3, 7):
# In Python prior to 3.7, dictionary order is not preserved. However, we
# want the metrics to appear in the output in the same order as they are in
# the metrics.yaml file, so on earlier versions of Python we must use an
# OrderedDict object.
def ordered_yaml_load(stream):
class OrderedLoader(_NoDatesSafeLoader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return OrderedDict(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, construct_mapping
)
return yaml.load(stream, OrderedLoader)
def ordered_yaml_dump(data, **kwargs):
class OrderedDumper(yaml.Dumper):
pass
def _dict_representer(dumper, data):
return dumper.represent_mapping(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, data.items()
)
OrderedDumper.add_representer(OrderedDict, _dict_representer)
return yaml.dump(data, Dumper=OrderedDumper, **kwargs)
else:
def ordered_yaml_load(stream):
return yaml.load(stream, Loader=_NoDatesSafeLoader)
def ordered_yaml_dump(data, **kwargs):
return yaml.dump(data, **kwargs)
def load_yaml_or_json(path, ordered_dict=False):
"""
Load the content from either a .json or .yaml file, based on the filename
extension.
:param path: `pathlib.Path` object
:rtype object: The tree of objects as a result of parsing the file.
:raises ValueError: The file is neither a .json, .yml or .yaml file.
"""
# If in py.test, support bits of literal JSON/YAML content
if TESTING_MODE and isinstance(path, dict):
return path
if not path.is_file():
return {}
if path.suffix == ".json":
with path.open("r") as fd:
return json.load(fd)
elif path.suffix in (".yml", ".yaml", ".yamlx"):
with path.open("r") as fd:
if ordered_dict:
return ordered_yaml_load(fd)
else:
return yaml.load(fd, Loader=_NoDatesSafeLoader)
else:
raise ValueError("Unknown file extension {}".format(path.suffix))
def ensure_list(value):
"""
Ensures that the value is a list. If it is anything but a list or tuple, a
list with a single element containing only value is returned.
"""
if not isinstance(value, (list, tuple)):
return [value]
return value
def to_camel_case(input, capitalize_first_letter):
"""
Convert the value to camelCase.
This additionally replaces any '.' with '_'. The first letter is capitalized
depending on `capitalize_first_letter`.
"""
sanitized_input = input.replace(".", "_").replace("-", "_")
# Filter out any empty token. This could happen due to leading '_' or
# consecutive '__'.
tokens = [s.capitalize() for s in sanitized_input.split("_") if len(s) != 0]
# If we're not meant to capitalize the first letter, then lowercase it.
if not capitalize_first_letter:
tokens[0] = tokens[0].lower()
# Finally join the tokens and capitalize.
return ''.join(tokens)
def camelize(value):
"""
Convert the value to camelCase (with a lower case first letter).
This is a thin wrapper around inflection.camelize that handles dots in
addition to underscores.
"""
return to_camel_case(value, False)
def Camelize(value):
"""
Convert the value to CamelCase (with an upper case first letter).
This is a thin wrapper around inflection.camelize that handles dots in
addition to underscores.
"""
return to_camel_case(value, True)
@functools.lru_cache()
def get_jinja2_template(template_name, filters=()):
"""
Get a Jinja2 template that ships with glean_parser.
The template has extra filters for camel-casing identifiers.
:param template_name: Name of a file in ``glean_parser/templates``
:param filters: tuple of 2-tuple. A tuple of (name, func) pairs defining
additional filters.
"""
env = jinja2.Environment(
loader=jinja2.PackageLoader("glean_parser", "templates"),
trim_blocks=True,
lstrip_blocks=True,
)
env.filters["camelize"] = camelize
env.filters["Camelize"] = Camelize
for filter_name, filter_func in filters:
env.filters[filter_name] = filter_func
return env.get_template(template_name)
def keep_value(f):
"""
Wrap a generator so the value it returns (rather than yields), will be
accessible on the .value attribute when the generator is exhausted.
"""
class ValueKeepingGenerator(object):
def __init__(self, g):
self.g = g
self.value = None
def __iter__(self):
self.value = yield from self.g
@functools.wraps(f)
def g(*args, **kwargs):
return ValueKeepingGenerator(f(*args, **kwargs))
return g
def get_null_resolver(schema):
"""
Returns a JSON Pointer resolver that does nothing.
This lets us handle the moz: URLs in our schemas.
"""
class NullResolver(jsonschema.RefResolver):
def resolve_remote(self, uri):
if uri in self.store:
return self.store[uri]
if uri == "":
return self.referrer
return NullResolver.from_schema(schema)
def fetch_remote_url(url, cache=True):
"""
Fetches the contents from an HTTP url or local file path, and optionally
caches it to disk.
"""
is_http = url.startswith("http")
if not is_http:
with open(url, "r", encoding="utf-8") as fd:
contents = fd.read()
return contents
if cache:
cache_dir = appdirs.user_cache_dir("glean_parser", "mozilla")
with diskcache.Cache(cache_dir) as dc:
if url in dc:
return dc[url]
contents = urllib.request.urlopen(url).read()
# On Python 3.5, urlopen does not handle the unicode decoding for us. This
# is ok because we control these files and we know they are in UTF-8,
# however, this wouldn't be correct in general.
if sys.version_info < (3, 6):
contents = contents.decode("utf8")
if cache:
with diskcache.Cache(cache_dir) as dc:
dc[url] = contents
return contents
_unset = _utils.Unset()
def pprint_validation_error(error):
"""
A version of jsonschema's ValidationError __str__ method that doesn't
include the schema fragment that failed. This makes the error messages
much more succinct.
It also shows any subschemas of anyOf/allOf that failed, if any (what
jsonschema calls "context").
"""
essential_for_verbose = (
error.validator,
error.validator_value,
error.instance,
error.schema,
)
if any(m is _unset for m in essential_for_verbose):
return textwrap.fill(error.message)
instance = error.instance
for path in list(error.relative_path)[::-1]:
if isinstance(path, str):
instance = {path: instance}
else:
instance = [instance]
yaml_instance = ordered_yaml_dump(instance, width=72, default_flow_style=False)
parts = ["```", yaml_instance.rstrip(), "```", "", textwrap.fill(error.message)]
if error.context:
parts.extend(
textwrap.fill(x.message, initial_indent=" ", subsequent_indent=" ")
for x in error.context
)
description = error.schema.get("description")
if description:
parts.extend(["", "Documentation for this node:", _utils.indent(description)])
return "\n".join(parts)
def format_error(filepath, header, content):
"""
Format a jsonshema validation error.
"""
if isinstance(filepath, Path):
filepath = filepath.resolve()
else:
filepath = "<string>"
if header:
return "{}: {}\n{}".format(filepath, header, _utils.indent(content))
else:
return "{}:\n{}".format(filepath, _utils.indent(content))
def is_expired(expires):
"""
Parses the `expires` field in a metric or ping and returns whether
the object should be considered expired.
"""
if expires == "never":
return False
elif expires == "expired":
return True
else:
try:
if sys.version_info < (3, 7):
date = iso8601.parse_date(expires).date()
else:
date = datetime.date.fromisoformat(expires)
except ValueError:
raise ValueError(
(
"Invalid expiration date '{}'. "
"Must be of the form yyyy-mm-dd in UTC."
).format(expires)
)
return date <= datetime.datetime.utcnow().date()
def validate_expires(expires):
"""
Raises ValueError if `expires` is not valid.
"""
if expires in ("never", "expired"):
return
if sys.version_info < (3, 7):
iso8601.parse_date(expires)
else:
datetime.date.fromisoformat(expires)
def report_validation_errors(all_objects):
"""
Report any validation errors found to the console.
"""
found_error = False
for error in all_objects:
found_error = True
print("=" * 78, file=sys.stderr)
print(error, file=sys.stderr)
return found_error

74
third_party/python/glean_parser/glean_parser/validate_ping.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,74 @@
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Validates the contents of a Glean ping against the schema.
"""
import functools
import io
import json
from pathlib import Path
import sys
import jsonschema
from . import util
ROOT_DIR = Path(__file__).parent
SCHEMAS_DIR = ROOT_DIR / "schemas"
@functools.lru_cache(maxsize=1)
def _get_ping_schema(schema_url):
contents = util.fetch_remote_url(schema_url)
return json.loads(contents)
def _validate_ping(ins, outs, schema_url):
schema = _get_ping_schema(schema_url)
resolver = util.get_null_resolver(schema)
document = json.load(ins)
validator_class = jsonschema.validators.validator_for(schema)
validator = validator_class(schema, resolver=resolver)
has_error = 0
for error in validator.iter_errors(document):
outs.write("=" * 76)
outs.write("\n")
outs.write(util.format_error("", "", util.pprint_validation_error(error)))
outs.write("\n")
has_error = 1
return has_error
def validate_ping(ins, outs=None, schema_url=None):
"""
Validates the contents of a Glean ping.
:param ins: Input stream or file path to the ping contents to validate
:param outs: Output stream to write errors to. (Defaults to stdout)
:param schema_url: HTTP URL or local filesystem path to Glean ping schema.
Defaults to the current version of the schema in
mozilla-pipeline-schemas.
:rtype: int 1 if any errors occurred, otherwise 0.
"""
if schema_url is None:
raise TypeError("Missing required argument 'schema_url'")
if outs is None:
outs = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8")
if isinstance(ins, (str, bytes, Path)):
with open(ins, "r") as fd:
return _validate_ping(fd, outs, schema_url=schema_url)
else:
return _validate_ping(ins, outs, schema_url=schema_url)

Просмотреть файл

@ -0,0 +1,13 @@
Copyright 2017-2019 Jason R. Coombs, Barry Warsaw
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

Просмотреть файл

@ -0,0 +1,65 @@
Metadata-Version: 2.1
Name: importlib-metadata
Version: 1.5.0
Summary: Read metadata from Python packages
Home-page: http://importlib-metadata.readthedocs.io/
Author: Barry Warsaw
Author-email: barry@python.org
License: Apache Software License
Platform: UNKNOWN
Classifier: Development Status :: 3 - Alpha
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: Apache Software License
Classifier: Topic :: Software Development :: Libraries
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 2
Requires-Python: !=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7
Requires-Dist: zipp (>=0.5)
Requires-Dist: pathlib2 ; python_version < "3"
Requires-Dist: contextlib2 ; python_version < "3"
Requires-Dist: configparser (>=3.5) ; python_version < "3"
Provides-Extra: docs
Requires-Dist: sphinx ; extra == 'docs'
Requires-Dist: rst.linker ; extra == 'docs'
Provides-Extra: testing
Requires-Dist: packaging ; extra == 'testing'
Requires-Dist: importlib-resources ; (python_version < "3.7") and extra == 'testing'
=========================
``importlib_metadata``
=========================
``importlib_metadata`` is a library to access the metadata for a Python
package. It is intended to be ported to Python 3.8.
Usage
=====
See the `online documentation <https://importlib_metadata.readthedocs.io/>`_
for usage details.
`Finder authors
<https://docs.python.org/3/reference/import.html#finders-and-loaders>`_ can
also add support for custom package installers. See the above documentation
for details.
Caveats
=======
This project primarily supports third-party packages installed by PyPA
tools (or other conforming packages). It does not support:
- Packages in the stdlib.
- Packages installed without metadata.
Project details
===============
* Project home: https://gitlab.com/python-devs/importlib_metadata
* Report bugs at: https://gitlab.com/python-devs/importlib_metadata/issues
* Code hosting: https://gitlab.com/python-devs/importlib_metadata.git
* Documentation: http://importlib_metadata.readthedocs.io/

Просмотреть файл

@ -0,0 +1,21 @@
importlib_metadata/__init__.py,sha256=09MTlbzRH9XUpar5uODOPdevOQ0HgR5DJsapV32I-DY,18117
importlib_metadata/_compat.py,sha256=wnOChfVj2Vx9gSQNe8BF5Tddy1VDxzDfsC5iyV4I8_0,3884
importlib_metadata/docs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
importlib_metadata/docs/changelog.rst,sha256=Ikau3lhoqOcqeMEnRmtrufO61ZTsyDtVYdfWRggonwA,7755
importlib_metadata/docs/conf.py,sha256=m-b6Mju5gFkpSHh-lyJ4iwqf_8t4LjYYFRumtutQSZc,5578
importlib_metadata/docs/index.rst,sha256=rbXrDkLAKLIDccqME5u9CCMEfMKprqzQOkIOuwOnfz4,1907
importlib_metadata/docs/using.rst,sha256=tlh7M8y0hIRB0cYIflhVFQtdQSfm-Q4GE1luXCU4lIY,9286
importlib_metadata/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
importlib_metadata/tests/fixtures.py,sha256=XescaYiWeK6sOwoP8DckmRjeGKg-eaISRY49gpyWxGY,5329
importlib_metadata/tests/test_api.py,sha256=YMAGTsRENrtvpw2CSLmRndJMBeT4q_M0GSe-QsnnMZ4,5544
importlib_metadata/tests/test_integration.py,sha256=vhbAi9zmaOa8b0OWSZGETWpn5542wXabfDVPd2lcIaY,1275
importlib_metadata/tests/test_main.py,sha256=n1gzecrIaoHb5mup8pY-YT74LwwEy5LeNWkb7CZVxjY,7983
importlib_metadata/tests/test_zip.py,sha256=qG3IquiTFLSrUtpxEJblqiUtgEcOTfjU2yM35REk0fo,2372
importlib_metadata/tests/data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
importlib_metadata/tests/data/example-21.12-py3-none-any.whl,sha256=I-kYufETid-tDYyR8f1OFJ3t5u_Io23k0cbQxJTUN4I,1455
importlib_metadata/tests/data/example-21.12-py3.6.egg,sha256=-EeugFAijkdUO9xyQHTZkQwZoFXK0_QxICBj6R5AAJo,1497
importlib_metadata-1.5.0.dist-info/LICENSE,sha256=wNe6dAchmJ1VvVB8D9oTc-gHHadCuaSBAev36sYEM6U,571
importlib_metadata-1.5.0.dist-info/METADATA,sha256=puK9j4_6OcxogVH7AvbjHxugq8e51AtHQb-UKkW-6cM,2093
importlib_metadata-1.5.0.dist-info/WHEEL,sha256=8zNYZbwQSXoB9IfXOjPfeNwvAsALAjffgk27FqvCWbo,110
importlib_metadata-1.5.0.dist-info/top_level.txt,sha256=CO3fD9yylANiXkrMo4qHLV_mqXL2sC5JFKgt1yWAT-A,19
importlib_metadata-1.5.0.dist-info/RECORD,,

Просмотреть файл

@ -0,0 +1,6 @@
Wheel-Version: 1.0
Generator: bdist_wheel (0.33.6)
Root-Is-Purelib: true
Tag: py2-none-any
Tag: py3-none-any

Просмотреть файл

@ -0,0 +1 @@
importlib_metadata

Просмотреть файл

@ -0,0 +1,591 @@
from __future__ import unicode_literals, absolute_import
import io
import os
import re
import abc
import csv
import sys
import zipp
import operator
import functools
import itertools
import posixpath
import collections
from ._compat import (
install,
NullFinder,
ConfigParser,
suppress,
map,
FileNotFoundError,
IsADirectoryError,
NotADirectoryError,
PermissionError,
pathlib,
ModuleNotFoundError,
MetaPathFinder,
email_message_from_string,
PyPy_repr,
)
from importlib import import_module
from itertools import starmap
__metaclass__ = type
__all__ = [
'Distribution',
'DistributionFinder',
'PackageNotFoundError',
'distribution',
'distributions',
'entry_points',
'files',
'metadata',
'requires',
'version',
]
class PackageNotFoundError(ModuleNotFoundError):
"""The package was not found."""
class EntryPoint(
PyPy_repr,
collections.namedtuple('EntryPointBase', 'name value group')):
"""An entry point as defined by Python packaging conventions.
See `the packaging docs on entry points
<https://packaging.python.org/specifications/entry-points/>`_
for more information.
"""
pattern = re.compile(
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+))?\s*'
r'(?P<extras>\[.*\])?\s*$'
)
"""
A regular expression describing the syntax for an entry point,
which might look like:
- module
- package.module
- package.module:attribute
- package.module:object.attribute
- package.module:attr [extra1, extra2]
Other combinations are possible as well.
The expression is lenient about whitespace around the ':',
following the attr, and following any extras.
"""
def load(self):
"""Load the entry point from its definition. If only a module
is indicated by the value, return that module. Otherwise,
return the named object.
"""
match = self.pattern.match(self.value)
module = import_module(match.group('module'))
attrs = filter(None, (match.group('attr') or '').split('.'))
return functools.reduce(getattr, attrs, module)
@property
def extras(self):
match = self.pattern.match(self.value)
return list(re.finditer(r'\w+', match.group('extras') or ''))
@classmethod
def _from_config(cls, config):
return [
cls(name, value, group)
for group in config.sections()
for name, value in config.items(group)
]
@classmethod
def _from_text(cls, text):
config = ConfigParser(delimiters='=')
# case sensitive: https://stackoverflow.com/q/1611799/812183
config.optionxform = str
try:
config.read_string(text)
except AttributeError: # pragma: nocover
# Python 2 has no read_string
config.readfp(io.StringIO(text))
return EntryPoint._from_config(config)
def __iter__(self):
"""
Supply iter so one may construct dicts of EntryPoints easily.
"""
return iter((self.name, self))
def __reduce__(self):
return (
self.__class__,
(self.name, self.value, self.group),
)
class PackagePath(pathlib.PurePosixPath):
"""A reference to a path in a package"""
def read_text(self, encoding='utf-8'):
with self.locate().open(encoding=encoding) as stream:
return stream.read()
def read_binary(self):
with self.locate().open('rb') as stream:
return stream.read()
def locate(self):
"""Return a path-like object for this path"""
return self.dist.locate_file(self)
class FileHash:
def __init__(self, spec):
self.mode, _, self.value = spec.partition('=')
def __repr__(self):
return '<FileHash mode: {} value: {}>'.format(self.mode, self.value)
class Distribution:
"""A Python distribution package."""
@abc.abstractmethod
def read_text(self, filename):
"""Attempt to load metadata file given by the name.
:param filename: The name of the file in the distribution info.
:return: The text if found, otherwise None.
"""
@abc.abstractmethod
def locate_file(self, path):
"""
Given a path to a file in this distribution, return a path
to it.
"""
@classmethod
def from_name(cls, name):
"""Return the Distribution for the given package name.
:param name: The name of the distribution package to search for.
:return: The Distribution instance (or subclass thereof) for the named
package, if found.
:raises PackageNotFoundError: When the named package's distribution
metadata cannot be found.
"""
for resolver in cls._discover_resolvers():
dists = resolver(DistributionFinder.Context(name=name))
dist = next(dists, None)
if dist is not None:
return dist
else:
raise PackageNotFoundError(name)
@classmethod
def discover(cls, **kwargs):
"""Return an iterable of Distribution objects for all packages.
Pass a ``context`` or pass keyword arguments for constructing
a context.
:context: A ``DistributionFinder.Context`` object.
:return: Iterable of Distribution objects for all packages.
"""
context = kwargs.pop('context', None)
if context and kwargs:
raise ValueError("cannot accept context and kwargs")
context = context or DistributionFinder.Context(**kwargs)
return itertools.chain.from_iterable(
resolver(context)
for resolver in cls._discover_resolvers()
)
@staticmethod
def at(path):
"""Return a Distribution for the indicated metadata path
:param path: a string or path-like object
:return: a concrete Distribution instance for the path
"""
return PathDistribution(pathlib.Path(path))
@staticmethod
def _discover_resolvers():
"""Search the meta_path for resolvers."""
declared = (
getattr(finder, 'find_distributions', None)
for finder in sys.meta_path
)
return filter(None, declared)
@property
def metadata(self):
"""Return the parsed metadata for this Distribution.
The returned object will have keys that name the various bits of
metadata. See PEP 566 for details.
"""
text = (
self.read_text('METADATA')
or self.read_text('PKG-INFO')
# This last clause is here to support old egg-info files. Its
# effect is to just end up using the PathDistribution's self._path
# (which points to the egg-info file) attribute unchanged.
or self.read_text('')
)
return email_message_from_string(text)
@property
def version(self):
"""Return the 'Version' metadata for the distribution package."""
return self.metadata['Version']
@property
def entry_points(self):
return EntryPoint._from_text(self.read_text('entry_points.txt'))
@property
def files(self):
"""Files in this distribution.
:return: List of PackagePath for this distribution or None
Result is `None` if the metadata file that enumerates files
(i.e. RECORD for dist-info or SOURCES.txt for egg-info) is
missing.
Result may be empty if the metadata exists but is empty.
"""
file_lines = self._read_files_distinfo() or self._read_files_egginfo()
def make_file(name, hash=None, size_str=None):
result = PackagePath(name)
result.hash = FileHash(hash) if hash else None
result.size = int(size_str) if size_str else None
result.dist = self
return result
return file_lines and list(starmap(make_file, csv.reader(file_lines)))
def _read_files_distinfo(self):
"""
Read the lines of RECORD
"""
text = self.read_text('RECORD')
return text and text.splitlines()
def _read_files_egginfo(self):
"""
SOURCES.txt might contain literal commas, so wrap each line
in quotes.
"""
text = self.read_text('SOURCES.txt')
return text and map('"{}"'.format, text.splitlines())
@property
def requires(self):
"""Generated requirements specified for this Distribution"""
reqs = self._read_dist_info_reqs() or self._read_egg_info_reqs()
return reqs and list(reqs)
def _read_dist_info_reqs(self):
return self.metadata.get_all('Requires-Dist')
def _read_egg_info_reqs(self):
source = self.read_text('requires.txt')
return source and self._deps_from_requires_text(source)
@classmethod
def _deps_from_requires_text(cls, source):
section_pairs = cls._read_sections(source.splitlines())
sections = {
section: list(map(operator.itemgetter('line'), results))
for section, results in
itertools.groupby(section_pairs, operator.itemgetter('section'))
}
return cls._convert_egg_info_reqs_to_simple_reqs(sections)
@staticmethod
def _read_sections(lines):
section = None
for line in filter(None, lines):
section_match = re.match(r'\[(.*)\]$', line)
if section_match:
section = section_match.group(1)
continue
yield locals()
@staticmethod
def _convert_egg_info_reqs_to_simple_reqs(sections):
"""
Historically, setuptools would solicit and store 'extra'
requirements, including those with environment markers,
in separate sections. More modern tools expect each
dependency to be defined separately, with any relevant
extras and environment markers attached directly to that
requirement. This method converts the former to the
latter. See _test_deps_from_requires_text for an example.
"""
def make_condition(name):
return name and 'extra == "{name}"'.format(name=name)
def parse_condition(section):
section = section or ''
extra, sep, markers = section.partition(':')
if extra and markers:
markers = '({markers})'.format(markers=markers)
conditions = list(filter(None, [markers, make_condition(extra)]))
return '; ' + ' and '.join(conditions) if conditions else ''
for section, deps in sections.items():
for dep in deps:
yield dep + parse_condition(section)
class DistributionFinder(MetaPathFinder):
"""
A MetaPathFinder capable of discovering installed distributions.
"""
class Context:
"""
Keyword arguments presented by the caller to
``distributions()`` or ``Distribution.discover()``
to narrow the scope of a search for distributions
in all DistributionFinders.
Each DistributionFinder may expect any parameters
and should attempt to honor the canonical
parameters defined below when appropriate.
"""
name = None
"""
Specific name for which a distribution finder should match.
A name of ``None`` matches all distributions.
"""
def __init__(self, **kwargs):
vars(self).update(kwargs)
@property
def path(self):
"""
The path that a distribution finder should search.
Typically refers to Python package paths and defaults
to ``sys.path``.
"""
return vars(self).get('path', sys.path)
@abc.abstractmethod
def find_distributions(self, context=Context()):
"""
Find distributions.
Return an iterable of all Distribution instances capable of
loading the metadata for packages matching the ``context``,
a DistributionFinder.Context instance.
"""
class FastPath:
"""
Micro-optimized class for searching a path for
children.
"""
def __init__(self, root):
self.root = root
self.base = os.path.basename(root).lower()
def joinpath(self, child):
return pathlib.Path(self.root, child)
def children(self):
with suppress(Exception):
return os.listdir(self.root or '')
with suppress(Exception):
return self.zip_children()
return []
def zip_children(self):
zip_path = zipp.Path(self.root)
names = zip_path.root.namelist()
self.joinpath = zip_path.joinpath
return (
posixpath.split(child)[0]
for child in names
)
def is_egg(self, search):
base = self.base
return (
base == search.versionless_egg_name
or base.startswith(search.prefix)
and base.endswith('.egg'))
def search(self, name):
for child in self.children():
n_low = child.lower()
if (n_low in name.exact_matches
or n_low.startswith(name.prefix)
and n_low.endswith(name.suffixes)
# legacy case:
or self.is_egg(name) and n_low == 'egg-info'):
yield self.joinpath(child)
class Prepared:
"""
A prepared search for metadata on a possibly-named package.
"""
normalized = ''
prefix = ''
suffixes = '.dist-info', '.egg-info'
exact_matches = [''][:0]
versionless_egg_name = ''
def __init__(self, name):
self.name = name
if name is None:
return
self.normalized = name.lower().replace('-', '_')
self.prefix = self.normalized + '-'
self.exact_matches = [
self.normalized + suffix for suffix in self.suffixes]
self.versionless_egg_name = self.normalized + '.egg'
@install
class MetadataPathFinder(NullFinder, DistributionFinder):
"""A degenerate finder for distribution packages on the file system.
This finder supplies only a find_distributions() method for versions
of Python that do not have a PathFinder find_distributions().
"""
def find_distributions(self, context=DistributionFinder.Context()):
"""
Find distributions.
Return an iterable of all Distribution instances capable of
loading the metadata for packages matching ``context.name``
(or all names if ``None`` indicated) along the paths in the list
of directories ``context.path``.
"""
found = self._search_paths(context.name, context.path)
return map(PathDistribution, found)
@classmethod
def _search_paths(cls, name, paths):
"""Find metadata directories in paths heuristically."""
return itertools.chain.from_iterable(
path.search(Prepared(name))
for path in map(FastPath, paths)
)
class PathDistribution(Distribution):
def __init__(self, path):
"""Construct a distribution from a path to the metadata directory.
:param path: A pathlib.Path or similar object supporting
.joinpath(), __div__, .parent, and .read_text().
"""
self._path = path
def read_text(self, filename):
with suppress(FileNotFoundError, IsADirectoryError, KeyError,
NotADirectoryError, PermissionError):
return self._path.joinpath(filename).read_text(encoding='utf-8')
read_text.__doc__ = Distribution.read_text.__doc__
def locate_file(self, path):
return self._path.parent / path
def distribution(distribution_name):
"""Get the ``Distribution`` instance for the named package.
:param distribution_name: The name of the distribution package as a string.
:return: A ``Distribution`` instance (or subclass thereof).
"""
return Distribution.from_name(distribution_name)
def distributions(**kwargs):
"""Get all ``Distribution`` instances in the current environment.
:return: An iterable of ``Distribution`` instances.
"""
return Distribution.discover(**kwargs)
def metadata(distribution_name):
"""Get the metadata for the named package.
:param distribution_name: The name of the distribution package to query.
:return: An email.Message containing the parsed metadata.
"""
return Distribution.from_name(distribution_name).metadata
def version(distribution_name):
"""Get the version string for the named package.
:param distribution_name: The name of the distribution package to query.
:return: The version string for the package as defined in the package's
"Version" metadata key.
"""
return distribution(distribution_name).version
def entry_points():
"""Return EntryPoint objects for all installed packages.
:return: EntryPoint objects for all installed packages.
"""
eps = itertools.chain.from_iterable(
dist.entry_points for dist in distributions())
by_group = operator.attrgetter('group')
ordered = sorted(eps, key=by_group)
grouped = itertools.groupby(ordered, by_group)
return {
group: tuple(eps)
for group, eps in grouped
}
def files(distribution_name):
"""Return a list of files for the named package.
:param distribution_name: The name of the distribution package to query.
:return: List of files composing the distribution.
"""
return distribution(distribution_name).files
def requires(distribution_name):
"""
Return a list of requirements for the named package.
:return: An iterator of requirements, suitable for
packaging.requirement.Requirement.
"""
return distribution(distribution_name).requires
__version__ = version(__name__)

Просмотреть файл

@ -0,0 +1,131 @@
from __future__ import absolute_import
import io
import abc
import sys
import email
if sys.version_info > (3,): # pragma: nocover
import builtins
from configparser import ConfigParser
from contextlib import suppress
FileNotFoundError = builtins.FileNotFoundError
IsADirectoryError = builtins.IsADirectoryError
NotADirectoryError = builtins.NotADirectoryError
PermissionError = builtins.PermissionError
map = builtins.map
else: # pragma: nocover
from backports.configparser import ConfigParser
from itertools import imap as map # type: ignore
from contextlib2 import suppress # noqa
FileNotFoundError = IOError, OSError
IsADirectoryError = IOError, OSError
NotADirectoryError = IOError, OSError
PermissionError = IOError, OSError
if sys.version_info > (3, 5): # pragma: nocover
import pathlib
else: # pragma: nocover
import pathlib2 as pathlib
try:
ModuleNotFoundError = builtins.FileNotFoundError
except (NameError, AttributeError): # pragma: nocover
ModuleNotFoundError = ImportError # type: ignore
if sys.version_info >= (3,): # pragma: nocover
from importlib.abc import MetaPathFinder
else: # pragma: nocover
class MetaPathFinder(object):
__metaclass__ = abc.ABCMeta
__metaclass__ = type
__all__ = [
'install', 'NullFinder', 'MetaPathFinder', 'ModuleNotFoundError',
'pathlib', 'ConfigParser', 'map', 'suppress', 'FileNotFoundError',
'NotADirectoryError', 'email_message_from_string',
]
def install(cls):
"""
Class decorator for installation on sys.meta_path.
Adds the backport DistributionFinder to sys.meta_path and
attempts to disable the finder functionality of the stdlib
DistributionFinder.
"""
sys.meta_path.append(cls())
disable_stdlib_finder()
return cls
def disable_stdlib_finder():
"""
Give the backport primacy for discovering path-based distributions
by monkey-patching the stdlib O_O.
See #91 for more background for rationale on this sketchy
behavior.
"""
def matches(finder):
return (
getattr(finder, '__module__', None) == '_frozen_importlib_external'
and hasattr(finder, 'find_distributions')
)
for finder in filter(matches, sys.meta_path): # pragma: nocover
del finder.find_distributions
class NullFinder:
"""
A "Finder" (aka "MetaClassFinder") that never finds any modules,
but may find distributions.
"""
@staticmethod
def find_spec(*args, **kwargs):
return None
# In Python 2, the import system requires finders
# to have a find_module() method, but this usage
# is deprecated in Python 3 in favor of find_spec().
# For the purposes of this finder (i.e. being present
# on sys.meta_path but having no other import
# system functionality), the two methods are identical.
find_module = find_spec
def py2_message_from_string(text): # nocoverpy3
# Work around https://bugs.python.org/issue25545 where
# email.message_from_string cannot handle Unicode on Python 2.
io_buffer = io.StringIO(text)
return email.message_from_file(io_buffer)
email_message_from_string = (
py2_message_from_string
if sys.version_info < (3,) else
email.message_from_string
)
class PyPy_repr:
"""
Override repr for EntryPoint objects on PyPy to avoid __iter__ access.
Ref #97, #102.
"""
affected = hasattr(sys, 'pypy_version_info')
def __compat_repr__(self): # pragma: nocover
def make_param(name):
value = getattr(self, name)
return '{name}={value!r}'.format(**locals())
params = ', '.join(map(make_param, self._fields))
return 'EntryPoint({params})'.format(**locals())
if affected: # pragma: nocover
__repr__ = __compat_repr__
del affected

Просмотреть файл

Просмотреть файл

@ -0,0 +1,257 @@
=========================
importlib_metadata NEWS
=========================
v1.5.0
======
* Additional performance optimizations in FastPath now
saves an additional 20% on a typical call.
* Correct for issue where PyOxidizer finder has no
``__module__`` attribute. Closes #110.
v1.4.0
======
* Through careful optimization, ``distribution()`` is
3-4x faster. Thanks to Antony Lee for the
contribution. Closes #95.
* When searching through ``sys.path``, if any error
occurs attempting to list a path entry, that entry
is skipped, making the system much more lenient
to errors. Closes #94.
v1.3.0
======
* Improve custom finders documentation. Closes #105.
v1.2.0
======
* Once again, drop support for Python 3.4. Ref #104.
v1.1.3
======
* Restored support for Python 3.4 due to improper version
compatibility declarations in the v1.1.0 and v1.1.1
releases. Closes #104.
v1.1.2
======
* Repaired project metadata to correctly declare the
``python_requires`` directive. Closes #103.
v1.1.1
======
* Fixed ``repr(EntryPoint)`` on PyPy 3 also. Closes #102.
v1.1.0
======
* Dropped support for Python 3.4.
* EntryPoints are now pickleable. Closes #96.
* Fixed ``repr(EntryPoint)`` on PyPy 2. Closes #97.
v1.0.0
======
* Project adopts semver for versioning.
* Removed compatibility shim introduced in 0.23.
* For better compatibility with the stdlib implementation and to
avoid the same distributions being discovered by the stdlib and
backport implementations, the backport now disables the
stdlib DistributionFinder during initialization (import time).
Closes #91 and closes #100.
0.23
====
* Added a compatibility shim to prevent failures on beta releases
of Python before the signature changed to accept the
"context" parameter on find_distributions. This workaround
will have a limited lifespan, not to extend beyond release of
Python 3.8 final.
0.22
====
* Renamed ``package`` parameter to ``distribution_name``
as `recommended <https://bugs.python.org/issue34632#msg349423>`_
in the following functions: ``distribution``, ``metadata``,
``version``, ``files``, and ``requires``. This
backward-incompatible change is expected to have little impact
as these functions are assumed to be primarily used with
positional parameters.
0.21
====
* ``importlib.metadata`` now exposes the ``DistributionFinder``
metaclass and references it in the docs for extending the
search algorithm.
* Add ``Distribution.at`` for constructing a Distribution object
from a known metadata directory on the file system. Closes #80.
* Distribution finders now receive a context object that
supplies ``.path`` and ``.name`` properties. This change
introduces a fundamental backward incompatibility for
any projects implementing a ``find_distributions`` method
on a ``MetaPathFinder``. This new layer of abstraction
allows this context to be supplied directly or constructed
on demand and opens the opportunity for a
``find_distributions`` method to solicit additional
context from the caller. Closes #85.
0.20
====
* Clarify in the docs that calls to ``.files`` could return
``None`` when the metadata is not present. Closes #69.
* Return all requirements and not just the first for dist-info
packages. Closes #67.
0.19
====
* Restrain over-eager egg metadata resolution.
* Add support for entry points with colons in the name. Closes #75.
0.18
====
* Parse entry points case sensitively. Closes #68
* Add a version constraint on the backport configparser package. Closes #66
0.17
====
* Fix a permission problem in the tests on Windows.
0.16
====
* Don't crash if there exists an EGG-INFO directory on sys.path.
0.15
====
* Fix documentation.
0.14
====
* Removed ``local_distribution`` function from the API.
**This backward-incompatible change removes this
behavior summarily**. Projects should remove their
reliance on this behavior. A replacement behavior is
under review in the `pep517 project
<https://github.com/pypa/pep517>`_. Closes #42.
0.13
====
* Update docstrings to match PEP 8. Closes #63.
* Merged modules into one module. Closes #62.
0.12
====
* Add support for eggs. !65; Closes #19.
0.11
====
* Support generic zip files (not just wheels). Closes #59
* Support zip files with multiple distributions in them. Closes #60
* Fully expose the public API in ``importlib_metadata.__all__``.
0.10
====
* The ``Distribution`` ABC is now officially part of the public API.
Closes #37.
* Fixed support for older single file egg-info formats. Closes #43.
* Fixed a testing bug when ``$CWD`` has spaces in the path. Closes #50.
* Add Python 3.8 to the ``tox`` testing matrix.
0.9
===
* Fixed issue where entry points without an attribute would raise an
Exception. Closes #40.
* Removed unused ``name`` parameter from ``entry_points()``. Closes #44.
* ``DistributionFinder`` classes must now be instantiated before
being placed on ``sys.meta_path``.
0.8
===
* This library can now discover/enumerate all installed packages. **This
backward-incompatible change alters the protocol finders must
implement to support distribution package discovery.** Closes #24.
* The signature of ``find_distributions()`` on custom installer finders
should now accept two parameters, ``name`` and ``path`` and
these parameters must supply defaults.
* The ``entry_points()`` method no longer accepts a package name
but instead returns all entry points in a dictionary keyed by the
``EntryPoint.group``. The ``resolve`` method has been removed. Instead,
call ``EntryPoint.load()``, which has the same semantics as
``pkg_resources`` and ``entrypoints``. **This is a backward incompatible
change.**
* Metadata is now always returned as Unicode text regardless of
Python version. Closes #29.
* This library can now discover metadata for a 'local' package (found
in the current-working directory). Closes #27.
* Added ``files()`` function for resolving files from a distribution.
* Added a new ``requires()`` function, which returns the requirements
for a package suitable for parsing by
``packaging.requirements.Requirement``. Closes #18.
* The top-level ``read_text()`` function has been removed. Use
``PackagePath.read_text()`` on instances returned by the ``files()``
function. **This is a backward incompatible change.**
* Release dates are now automatically injected into the changelog
based on SCM tags.
0.7
===
* Fixed issue where packages with dashes in their names would
not be discovered. Closes #21.
* Distribution lookup is now case-insensitive. Closes #20.
* Wheel distributions can no longer be discovered by their module
name. Like Path distributions, they must be indicated by their
distribution package name.
0.6
===
* Removed ``importlib_metadata.distribution`` function. Now
the public interface is primarily the utility functions exposed
in ``importlib_metadata.__all__``. Closes #14.
* Added two new utility functions ``read_text`` and
``metadata``.
0.5
===
* Updated README and removed details about Distribution
class, now considered private. Closes #15.
* Added test suite support for Python 3.4+.
* Fixed SyntaxErrors on Python 3.4 and 3.5. !12
* Fixed errors on Windows joining Path elements. !15
0.4
===
* Housekeeping.
0.3
===
* Added usage documentation. Closes #8
* Add support for getting metadata from wheels on ``sys.path``. Closes #9
0.2
===
* Added ``importlib_metadata.entry_points()``. Closes #1
* Added ``importlib_metadata.resolve()``. Closes #12
* Add support for Python 2.7. Closes #4
0.1
===
* Initial release.
..
Local Variables:
mode: change-log-mode
indent-tabs-mode: nil
sentence-end-double-space: t
fill-column: 78
coding: utf-8
End:

Просмотреть файл

@ -0,0 +1,185 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# importlib_metadata documentation build configuration file, created by
# sphinx-quickstart on Thu Nov 30 10:21:00 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'rst.linker',
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'importlib_metadata'
copyright = '2017-2019, Jason R. Coombs, Barry Warsaw'
author = 'Jason R. Coombs, Barry Warsaw'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'default'
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'importlib_metadatadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'importlib_metadata.tex',
'importlib\\_metadata Documentation',
'Brett Cannon, Barry Warsaw', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'importlib_metadata', 'importlib_metadata Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'importlib_metadata', 'importlib_metadata Documentation',
author, 'importlib_metadata', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'importlib_resources': (
'https://importlib-resources.readthedocs.io/en/latest/', None
),
}
# For rst.linker, inject release dates into changelog.rst
link_files = {
'changelog.rst': dict(
replace=[
dict(
pattern=r'^(?m)((?P<scm_version>v?\d+(\.\d+){1,2}))\n[-=]+\n',
with_scm='{text}\n{rev[timestamp]:%Y-%m-%d}\n\n',
),
],
),
}

Просмотреть файл

@ -0,0 +1,50 @@
===============================
Welcome to importlib_metadata
===============================
``importlib_metadata`` is a library which provides an API for accessing an
installed package's metadata (see :pep:`566`), such as its entry points or its top-level
name. This functionality intends to replace most uses of ``pkg_resources``
`entry point API`_ and `metadata API`_. Along with :mod:`importlib.resources` in
Python 3.7 and newer (backported as :doc:`importlib_resources <importlib_resources:index>` for older
versions of Python), this can eliminate the need to use the older and less
efficient ``pkg_resources`` package.
``importlib_metadata`` is a backport of Python 3.8's standard library
:doc:`importlib.metadata <library/importlib.metadata>` module for Python 2.7, and 3.4 through 3.7. Users of
Python 3.8 and beyond are encouraged to use the standard library module.
When imported on Python 3.8 and later, ``importlib_metadata`` replaces the
DistributionFinder behavior from the stdlib, but leaves the API in tact.
Developers looking for detailed API descriptions should refer to the Python
3.8 standard library documentation.
The documentation here includes a general :ref:`usage <using>` guide.
.. toctree::
:maxdepth: 2
:caption: Contents:
using.rst
changelog (links).rst
Project details
===============
* Project home: https://gitlab.com/python-devs/importlib_metadata
* Report bugs at: https://gitlab.com/python-devs/importlib_metadata/issues
* Code hosting: https://gitlab.com/python-devs/importlib_metadata.git
* Documentation: http://importlib_metadata.readthedocs.io/
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
.. _`entry point API`: https://setuptools.readthedocs.io/en/latest/pkg_resources.html#entry-points
.. _`metadata API`: https://setuptools.readthedocs.io/en/latest/pkg_resources.html#metadata-api

Просмотреть файл

@ -0,0 +1,252 @@
.. _using:
=================================
Using :mod:`!importlib_metadata`
=================================
``importlib_metadata`` is a library that provides for access to installed
package metadata. Built in part on Python's import system, this library
intends to replace similar functionality in the `entry point
API`_ and `metadata API`_ of ``pkg_resources``. Along with
:mod:`importlib.resources` in Python 3.7
and newer (backported as :doc:`importlib_resources <importlib_resources:index>` for older versions of
Python), this can eliminate the need to use the older and less efficient
``pkg_resources`` package.
By "installed package" we generally mean a third-party package installed into
Python's ``site-packages`` directory via tools such as `pip
<https://pypi.org/project/pip/>`_. Specifically,
it means a package with either a discoverable ``dist-info`` or ``egg-info``
directory, and metadata defined by :pep:`566` or its older specifications.
By default, package metadata can live on the file system or in zip archives on
:data:`sys.path`. Through an extension mechanism, the metadata can live almost
anywhere.
Overview
========
Let's say you wanted to get the version string for a package you've installed
using ``pip``. We start by creating a virtual environment and installing
something into it::
$ python3 -m venv example
$ source example/bin/activate
(example) $ pip install importlib_metadata
(example) $ pip install wheel
You can get the version string for ``wheel`` by running the following::
(example) $ python
>>> from importlib_metadata import version
>>> version('wheel')
'0.32.3'
You can also get the set of entry points keyed by group, such as
``console_scripts``, ``distutils.commands`` and others. Each group contains a
sequence of :ref:`EntryPoint <entry-points>` objects.
You can get the :ref:`metadata for a distribution <metadata>`::
>>> list(metadata('wheel'))
['Metadata-Version', 'Name', 'Version', 'Summary', 'Home-page', 'Author', 'Author-email', 'Maintainer', 'Maintainer-email', 'License', 'Project-URL', 'Project-URL', 'Project-URL', 'Keywords', 'Platform', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Requires-Python', 'Provides-Extra', 'Requires-Dist', 'Requires-Dist']
You can also get a :ref:`distribution's version number <version>`, list its
:ref:`constituent files <files>`, and get a list of the distribution's
:ref:`requirements`.
Functional API
==============
This package provides the following functionality via its public API.
.. _entry-points:
Entry points
------------
The ``entry_points()`` function returns a dictionary of all entry points,
keyed by group. Entry points are represented by ``EntryPoint`` instances;
each ``EntryPoint`` has a ``.name``, ``.group``, and ``.value`` attributes and
a ``.load()`` method to resolve the value::
>>> eps = entry_points()
>>> list(eps)
['console_scripts', 'distutils.commands', 'distutils.setup_keywords', 'egg_info.writers', 'setuptools.installation']
>>> scripts = eps['console_scripts']
>>> wheel = [ep for ep in scripts if ep.name == 'wheel'][0]
>>> wheel
EntryPoint(name='wheel', value='wheel.cli:main', group='console_scripts')
>>> main = wheel.load()
>>> main
<function main at 0x103528488>
The ``group`` and ``name`` are arbitrary values defined by the package author
and usually a client will wish to resolve all entry points for a particular
group. Read `the setuptools docs
<https://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins>`_
for more information on entrypoints, their definition, and usage.
.. _metadata:
Distribution metadata
---------------------
Every distribution includes some metadata, which you can extract using the
``metadata()`` function::
>>> wheel_metadata = metadata('wheel')
The keys of the returned data structure [#f1]_ name the metadata keywords, and
their values are returned unparsed from the distribution metadata::
>>> wheel_metadata['Requires-Python']
'>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*'
.. _version:
Distribution versions
---------------------
The ``version()`` function is the quickest way to get a distribution's version
number, as a string::
>>> version('wheel')
'0.32.3'
.. _files:
Distribution files
------------------
You can also get the full set of files contained within a distribution. The
``files()`` function takes a distribution package name and returns all of the
files installed by this distribution. Each file object returned is a
``PackagePath``, a :class:`pathlib.Path` derived object with additional ``dist``,
``size``, and ``hash`` properties as indicated by the metadata. For example::
>>> util = [p for p in files('wheel') if 'util.py' in str(p)][0]
>>> util
PackagePath('wheel/util.py')
>>> util.size
859
>>> util.dist
<importlib_metadata._hooks.PathDistribution object at 0x101e0cef0>
>>> util.hash
<FileHash mode: sha256 value: bYkw5oMccfazVCoYQwKkkemoVyMAFoR34mmKBx8R1NI>
Once you have the file, you can also read its contents::
>>> print(util.read_text())
import base64
import sys
...
def as_bytes(s):
if isinstance(s, text_type):
return s.encode('utf-8')
return s
In the case where the metadata file listing files
(RECORD or SOURCES.txt) is missing, ``files()`` will
return ``None``. The caller may wish to wrap calls to
``files()`` in `always_iterable
<https://more-itertools.readthedocs.io/en/stable/api.html#more_itertools.always_iterable>`_
or otherwise guard against this condition if the target
distribution is not known to have the metadata present.
.. _requirements:
Distribution requirements
-------------------------
To get the full set of requirements for a distribution, use the ``requires()``
function::
>>> requires('wheel')
["pytest (>=3.0.0) ; extra == 'test'", "pytest-cov ; extra == 'test'"]
Distributions
=============
While the above API is the most common and convenient usage, you can get all
of that information from the ``Distribution`` class. A ``Distribution`` is an
abstract object that represents the metadata for a Python package. You can
get the ``Distribution`` instance::
>>> from importlib_metadata import distribution
>>> dist = distribution('wheel')
Thus, an alternative way to get the version number is through the
``Distribution`` instance::
>>> dist.version
'0.32.3'
There are all kinds of additional metadata available on the ``Distribution``
instance::
>>> d.metadata['Requires-Python']
'>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*'
>>> d.metadata['License']
'MIT'
The full set of available metadata is not described here. See :pep:`566`
for additional details.
Extending the search algorithm
==============================
Because package metadata is not available through :data:`sys.path` searches, or
package loaders directly, the metadata for a package is found through import
system `finders`_. To find a distribution package's metadata,
``importlib.metadata`` queries the list of :term:`meta path finders <meta path finder>` on
:data:`sys.meta_path`.
By default ``importlib_metadata`` installs a finder for distribution packages
found on the file system. This finder doesn't actually find any *packages*,
but it can find the packages' metadata.
The abstract class :py:class:`importlib.abc.MetaPathFinder` defines the
interface expected of finders by Python's import system.
``importlib_metadata`` extends this protocol by looking for an optional
``find_distributions`` callable on the finders from
:data:`sys.meta_path` and presents this extended interface as the
``DistributionFinder`` abstract base class, which defines this abstract
method::
@abc.abstractmethod
def find_distributions(context=DistributionFinder.Context()):
"""Return an iterable of all Distribution instances capable of
loading the metadata for packages for the indicated ``context``.
"""
The ``DistributionFinder.Context`` object provides ``.path`` and ``.name``
properties indicating the path to search and names to match and may
supply other relevant context.
What this means in practice is that to support finding distribution package
metadata in locations other than the file system, subclass
``Distribution`` and implement the abstract methods. Then from
a custom finder, return instances of this derived ``Distribution`` in the
``find_distributions()`` method.
.. _`entry point API`: https://setuptools.readthedocs.io/en/latest/pkg_resources.html#entry-points
.. _`metadata API`: https://setuptools.readthedocs.io/en/latest/pkg_resources.html#metadata-api
.. _`finders`: https://docs.python.org/3/reference/import.html#finders-and-loaders
.. rubric:: Footnotes
.. [#f1] Technically, the returned distribution metadata object is an
:class:`email.message.EmailMessage`
instance, but this is an implementation detail, and not part of the
stable API. You should only use dictionary-like methods and syntax
to access the metadata contents.

1
third_party/python/iso8601/iso8601/__init__.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1 @@
from .iso8601 import *

214
third_party/python/iso8601/iso8601/iso8601.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,214 @@
"""ISO 8601 date time string parsing
Basic usage:
>>> import iso8601
>>> iso8601.parse_date("2007-01-25T12:00:00Z")
datetime.datetime(2007, 1, 25, 12, 0, tzinfo=<iso8601.Utc ...>)
>>>
"""
import datetime
from decimal import Decimal
import sys
import re
__all__ = ["parse_date", "ParseError", "UTC",
"FixedOffset"]
if sys.version_info >= (3, 0, 0):
_basestring = str
else:
_basestring = basestring
# Adapted from http://delete.me.uk/2005/03/iso8601.html
ISO8601_REGEX = re.compile(
r"""
(?P<year>[0-9]{4})
(
(
(-(?P<monthdash>[0-9]{1,2}))
|
(?P<month>[0-9]{2})
(?!$) # Don't allow YYYYMM
)
(
(
(-(?P<daydash>[0-9]{1,2}))
|
(?P<day>[0-9]{2})
)
(
(
(?P<separator>[ T])
(?P<hour>[0-9]{2})
(:{0,1}(?P<minute>[0-9]{2})){0,1}
(
:{0,1}(?P<second>[0-9]{1,2})
([.,](?P<second_fraction>[0-9]+)){0,1}
){0,1}
(?P<timezone>
Z
|
(
(?P<tz_sign>[-+])
(?P<tz_hour>[0-9]{2})
:{0,1}
(?P<tz_minute>[0-9]{2}){0,1}
)
){0,1}
){0,1}
)
){0,1} # YYYY-MM
){0,1} # YYYY only
$
""",
re.VERBOSE
)
class ParseError(Exception):
"""Raised when there is a problem parsing a date string"""
if sys.version_info >= (3, 2, 0):
UTC = datetime.timezone.utc
def FixedOffset(offset_hours, offset_minutes, name):
return datetime.timezone(
datetime.timedelta(
hours=offset_hours, minutes=offset_minutes),
name)
else:
# Yoinked from python docs
ZERO = datetime.timedelta(0)
class Utc(datetime.tzinfo):
"""UTC Timezone
"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
def __repr__(self):
return "<iso8601.Utc>"
UTC = Utc()
class FixedOffset(datetime.tzinfo):
"""Fixed offset in hours and minutes from UTC
"""
def __init__(self, offset_hours, offset_minutes, name):
self.__offset_hours = offset_hours # Keep for later __getinitargs__
self.__offset_minutes = offset_minutes # Keep for later __getinitargs__
self.__offset = datetime.timedelta(
hours=offset_hours, minutes=offset_minutes)
self.__name = name
def __eq__(self, other):
if isinstance(other, FixedOffset):
return (
(other.__offset == self.__offset)
and
(other.__name == self.__name)
)
return NotImplemented
def __getinitargs__(self):
return (self.__offset_hours, self.__offset_minutes, self.__name)
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return ZERO
def __repr__(self):
return "<FixedOffset %r %r>" % (self.__name, self.__offset)
def to_int(d, key, default_to_zero=False, default=None, required=True):
"""Pull a value from the dict and convert to int
:param default_to_zero: If the value is None or empty, treat it as zero
:param default: If the value is missing in the dict use this default
"""
value = d.get(key) or default
if (value in ["", None]) and default_to_zero:
return 0
if value is None:
if required:
raise ParseError("Unable to read %s from %s" % (key, d))
else:
return int(value)
def parse_timezone(matches, default_timezone=UTC):
"""Parses ISO 8601 time zone specs into tzinfo offsets
"""
if matches["timezone"] == "Z":
return UTC
# This isn't strictly correct, but it's common to encounter dates without
# timezones so I'll assume the default (which defaults to UTC).
# Addresses issue 4.
if matches["timezone"] is None:
return default_timezone
sign = matches["tz_sign"]
hours = to_int(matches, "tz_hour")
minutes = to_int(matches, "tz_minute", default_to_zero=True)
description = "%s%02d:%02d" % (sign, hours, minutes)
if sign == "-":
hours = -hours
minutes = -minutes
return FixedOffset(hours, minutes, description)
def parse_date(datestring, default_timezone=UTC):
"""Parses ISO 8601 dates into datetime objects
The timezone is parsed from the date string. However it is quite common to
have dates without a timezone (not strictly correct). In this case the
default timezone specified in default_timezone is used. This is UTC by
default.
:param datestring: The date to parse as a string
:param default_timezone: A datetime tzinfo instance to use when no timezone
is specified in the datestring. If this is set to
None then a naive datetime object is returned.
:returns: A datetime.datetime instance
:raises: ParseError when there is a problem parsing the date or
constructing the datetime instance.
"""
if not isinstance(datestring, _basestring):
raise ParseError("Expecting a string %r" % datestring)
m = ISO8601_REGEX.match(datestring)
if not m:
raise ParseError("Unable to parse date string %r" % datestring)
groups = m.groupdict()
tz = parse_timezone(groups, default_timezone=default_timezone)
groups["second_fraction"] = int(Decimal("0.%s" % (groups["second_fraction"] or 0)) * Decimal("1000000.0"))
try:
return datetime.datetime(
year=to_int(groups, "year"),
month=to_int(groups, "month", default=to_int(groups, "monthdash", required=False, default=1)),
day=to_int(groups, "day", default=to_int(groups, "daydash", required=False, default=1)),
hour=to_int(groups, "hour", default_to_zero=True),
minute=to_int(groups, "minute", default_to_zero=True),
second=to_int(groups, "second", default_to_zero=True),
microsecond=groups["second_fraction"],
tzinfo=tz,
)
except Exception as e:
raise ParseError(e)

102
third_party/python/iso8601/iso8601/test_iso8601.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,102 @@
# coding=UTF-8
from __future__ import absolute_import
import copy
import datetime
import pickle
import pytest
from iso8601 import iso8601
def test_iso8601_regex():
assert iso8601.ISO8601_REGEX.match("2006-10-11T00:14:33Z")
def test_fixedoffset_eq():
# See https://bitbucket.org/micktwomey/pyiso8601/issues/19
datetime.tzinfo() == iso8601.FixedOffset(2, 0, '+2:00')
def test_parse_no_timezone_different_default():
tz = iso8601.FixedOffset(2, 0, "test offset")
d = iso8601.parse_date("2007-01-01T08:00:00", default_timezone=tz)
assert d == datetime.datetime(2007, 1, 1, 8, 0, 0, 0, tz)
assert d.tzinfo == tz
def test_parse_utc_different_default():
"""Z should mean 'UTC', not 'default'.
"""
tz = iso8601.FixedOffset(2, 0, "test offset")
d = iso8601.parse_date("2007-01-01T08:00:00Z", default_timezone=tz)
assert d == datetime.datetime(2007, 1, 1, 8, 0, 0, 0, iso8601.UTC)
@pytest.mark.parametrize("invalid_date, error_string", [
("2013-10-", "Unable to parse date string"),
("2013-", "Unable to parse date string"),
("", "Unable to parse date string"),
(None, "Expecting a string"),
("wibble", "Unable to parse date string"),
("23", "Unable to parse date string"),
("131015T142533Z", "Unable to parse date string"),
("131015", "Unable to parse date string"),
("20141", "Unable to parse date string"),
("201402", "Unable to parse date string"),
("2007-06-23X06:40:34.00Z", "Unable to parse date string"), # https://code.google.com/p/pyiso8601/issues/detail?id=14
("2007-06-23 06:40:34.00Zrubbish", "Unable to parse date string"), # https://code.google.com/p/pyiso8601/issues/detail?id=14
("20114-01-03T01:45:49", "Unable to parse date string"),
])
def test_parse_invalid_date(invalid_date, error_string):
assert isinstance(invalid_date, str) or invalid_date is None # Why? 'cos I've screwed up the parametrize before :)
with pytest.raises(iso8601.ParseError) as exc:
iso8601.parse_date(invalid_date)
assert exc.errisinstance(iso8601.ParseError)
assert str(exc.value).startswith(error_string)
@pytest.mark.parametrize("valid_date,expected_datetime,isoformat", [
("2007-06-23 06:40:34.00Z", datetime.datetime(2007, 6, 23, 6, 40, 34, 0, iso8601.UTC), "2007-06-23T06:40:34+00:00"), # Handle a separator other than T
("1997-07-16T19:20+01:00", datetime.datetime(1997, 7, 16, 19, 20, 0, 0, iso8601.FixedOffset(1, 0, "+01:00")), "1997-07-16T19:20:00+01:00"), # Parse with no seconds
("2007-01-01T08:00:00", datetime.datetime(2007, 1, 1, 8, 0, 0, 0, iso8601.UTC), "2007-01-01T08:00:00+00:00"), # Handle timezone-less dates. Assumes UTC. http://code.google.com/p/pyiso8601/issues/detail?id=4
("2006-10-20T15:34:56.123+02:30", datetime.datetime(2006, 10, 20, 15, 34, 56, 123000, iso8601.FixedOffset(2, 30, "+02:30")), None),
("2006-10-20T15:34:56Z", datetime.datetime(2006, 10, 20, 15, 34, 56, 0, iso8601.UTC), "2006-10-20T15:34:56+00:00"),
("2007-5-7T11:43:55.328Z", datetime.datetime(2007, 5, 7, 11, 43, 55, 328000, iso8601.UTC), "2007-05-07T11:43:55.328000+00:00"), # http://code.google.com/p/pyiso8601/issues/detail?id=6
("2006-10-20T15:34:56.123Z", datetime.datetime(2006, 10, 20, 15, 34, 56, 123000, iso8601.UTC), "2006-10-20T15:34:56.123000+00:00"),
("2013-10-15T18:30Z", datetime.datetime(2013, 10, 15, 18, 30, 0, 0, iso8601.UTC), "2013-10-15T18:30:00+00:00"),
("2013-10-15T22:30+04", datetime.datetime(2013, 10, 15, 22, 30, 0, 0, iso8601.FixedOffset(4, 0, "+04:00")), "2013-10-15T22:30:00+04:00"), # <time>±hh:mm
("2013-10-15T1130-0700", datetime.datetime(2013, 10, 15, 11, 30, 0, 0, iso8601.FixedOffset(-7, 0, "-07:00")), "2013-10-15T11:30:00-07:00"), # <time>±hhmm
("2013-10-15T1130+0700", datetime.datetime(2013, 10, 15, 11, 30, 0, 0, iso8601.FixedOffset(+7, 0, "+07:00")), "2013-10-15T11:30:00+07:00"), # <time>±hhmm
("2013-10-15T1130+07", datetime.datetime(2013, 10, 15, 11, 30, 0, 0, iso8601.FixedOffset(+7, 0, "+07:00")), "2013-10-15T11:30:00+07:00"), # <time>±hh
("2013-10-15T1130-07", datetime.datetime(2013, 10, 15, 11, 30, 0, 0, iso8601.FixedOffset(-7, 0, "-07:00")), "2013-10-15T11:30:00-07:00"), # <time>±hh
("2013-10-15T15:00-03:30", datetime.datetime(2013, 10, 15, 15, 0, 0, 0, iso8601.FixedOffset(-3, -30, "-03:30")), "2013-10-15T15:00:00-03:30"),
("2013-10-15T183123Z", datetime.datetime(2013, 10, 15, 18, 31, 23, 0, iso8601.UTC), "2013-10-15T18:31:23+00:00"), # hhmmss
("2013-10-15T1831Z", datetime.datetime(2013, 10, 15, 18, 31, 0, 0, iso8601.UTC), "2013-10-15T18:31:00+00:00"), # hhmm
("2013-10-15T18Z", datetime.datetime(2013, 10, 15, 18, 0, 0, 0, iso8601.UTC), "2013-10-15T18:00:00+00:00"), # hh
("2013-10-15", datetime.datetime(2013, 10, 15, 0, 0, 0, 0, iso8601.UTC), "2013-10-15T00:00:00+00:00"), # YYYY-MM-DD
("20131015T18:30Z", datetime.datetime(2013, 10, 15, 18, 30, 0, 0, iso8601.UTC), "2013-10-15T18:30:00+00:00"), # YYYYMMDD
("2012-12-19T23:21:28.512400+00:00", datetime.datetime(2012, 12, 19, 23, 21, 28, 512400, iso8601.FixedOffset(0, 0, "+00:00")), "2012-12-19T23:21:28.512400+00:00"), # https://code.google.com/p/pyiso8601/issues/detail?id=21
("2006-10-20T15:34:56.123+0230", datetime.datetime(2006, 10, 20, 15, 34, 56, 123000, iso8601.FixedOffset(2, 30, "+02:30")), "2006-10-20T15:34:56.123000+02:30"), # https://code.google.com/p/pyiso8601/issues/detail?id=18
("19950204", datetime.datetime(1995, 2, 4, tzinfo=iso8601.UTC), "1995-02-04T00:00:00+00:00"), # https://code.google.com/p/pyiso8601/issues/detail?id=1
("2010-07-20 15:25:52.520701+00:00", datetime.datetime(2010, 7, 20, 15, 25, 52, 520701, iso8601.FixedOffset(0, 0, "+00:00")), "2010-07-20T15:25:52.520701+00:00"), # https://code.google.com/p/pyiso8601/issues/detail?id=17
("2010-06-12", datetime.datetime(2010, 6, 12, tzinfo=iso8601.UTC), "2010-06-12T00:00:00+00:00"), # https://code.google.com/p/pyiso8601/issues/detail?id=16
("1985-04-12T23:20:50.52-05:30", datetime.datetime(1985, 4, 12, 23, 20, 50, 520000, iso8601.FixedOffset(-5, -30, "-05:30")), "1985-04-12T23:20:50.520000-05:30"), # https://bitbucket.org/micktwomey/pyiso8601/issue/8/015-parses-negative-timezones-incorrectly
("1997-08-29T06:14:00.000123Z", datetime.datetime(1997, 8, 29, 6, 14, 0, 123, iso8601.UTC), "1997-08-29T06:14:00.000123+00:00"), # https://bitbucket.org/micktwomey/pyiso8601/issue/9/regression-parsing-microseconds
("2014-02", datetime.datetime(2014, 2, 1, 0, 0, 0, 0, iso8601.UTC), "2014-02-01T00:00:00+00:00"), # https://bitbucket.org/micktwomey/pyiso8601/issue/14/regression-yyyy-mm-no-longer-parses
("2014", datetime.datetime(2014, 1, 1, 0, 0, 0, 0, iso8601.UTC), "2014-01-01T00:00:00+00:00"), # YYYY
("1997-08-29T06:14:00,000123Z", datetime.datetime(1997, 8, 29, 6, 14, 0, 123, iso8601.UTC), "1997-08-29T06:14:00.000123+00:00"), # Use , as decimal separator
])
def test_parse_valid_date(valid_date, expected_datetime, isoformat):
parsed = iso8601.parse_date(valid_date)
assert parsed.year == expected_datetime.year
assert parsed.month == expected_datetime.month
assert parsed.day == expected_datetime.day
assert parsed.hour == expected_datetime.hour
assert parsed.minute == expected_datetime.minute
assert parsed.second == expected_datetime.second
assert parsed.microsecond == expected_datetime.microsecond
assert parsed.tzinfo == expected_datetime.tzinfo
assert parsed == expected_datetime
assert parsed.isoformat() == expected_datetime.isoformat()
copy.deepcopy(parsed) # ensure it's deep copy-able
pickle.dumps(parsed) # ensure it pickles
if isoformat:
assert parsed.isoformat() == isoformat
assert iso8601.parse_date(parsed.isoformat()) == parsed # Test round trip

Просмотреть файл

@ -0,0 +1,19 @@
Copyright (c) 2013 Julian Berman
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

Просмотреть файл

@ -0,0 +1,224 @@
Metadata-Version: 2.1
Name: jsonschema
Version: 3.2.0
Summary: An implementation of JSON Schema validation for Python
Home-page: https://github.com/Julian/jsonschema
Author: Julian Berman
Author-email: Julian@GrayVines.com
License: UNKNOWN
Project-URL: Docs, https://python-jsonschema.readthedocs.io/en/latest/
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: MIT License
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: Implementation :: CPython
Classifier: Programming Language :: Python :: Implementation :: PyPy
Requires-Dist: attrs (>=17.4.0)
Requires-Dist: pyrsistent (>=0.14.0)
Requires-Dist: setuptools
Requires-Dist: six (>=1.11.0)
Requires-Dist: functools32 ; python_version < "3"
Requires-Dist: importlib-metadata ; python_version < "3.8"
Provides-Extra: format
Requires-Dist: idna ; extra == 'format'
Requires-Dist: jsonpointer (>1.13) ; extra == 'format'
Requires-Dist: rfc3987 ; extra == 'format'
Requires-Dist: strict-rfc3339 ; extra == 'format'
Requires-Dist: webcolors ; extra == 'format'
Provides-Extra: format_nongpl
Requires-Dist: idna ; extra == 'format_nongpl'
Requires-Dist: jsonpointer (>1.13) ; extra == 'format_nongpl'
Requires-Dist: webcolors ; extra == 'format_nongpl'
Requires-Dist: rfc3986-validator (>0.1.0) ; extra == 'format_nongpl'
Requires-Dist: rfc3339-validator ; extra == 'format_nongpl'
==========
jsonschema
==========
|PyPI| |Pythons| |Travis| |AppVeyor| |Codecov| |ReadTheDocs|
.. |PyPI| image:: https://img.shields.io/pypi/v/jsonschema.svg
:alt: PyPI version
:target: https://pypi.org/project/jsonschema/
.. |Pythons| image:: https://img.shields.io/pypi/pyversions/jsonschema.svg
:alt: Supported Python versions
:target: https://pypi.org/project/jsonschema/
.. |Travis| image:: https://travis-ci.com/Julian/jsonschema.svg?branch=master
:alt: Travis build status
:target: https://travis-ci.com/Julian/jsonschema
.. |AppVeyor| image:: https://ci.appveyor.com/api/projects/status/adtt0aiaihy6muyn/branch/master?svg=true
:alt: AppVeyor build status
:target: https://ci.appveyor.com/project/Julian/jsonschema
.. |Codecov| image:: https://codecov.io/gh/Julian/jsonschema/branch/master/graph/badge.svg
:alt: Codecov Code coverage
:target: https://codecov.io/gh/Julian/jsonschema
.. |ReadTheDocs| image:: https://readthedocs.org/projects/python-jsonschema/badge/?version=stable&style=flat
:alt: ReadTheDocs status
:target: https://python-jsonschema.readthedocs.io/en/stable/
``jsonschema`` is an implementation of `JSON Schema <https://json-schema.org>`_
for Python (supporting 2.7+ including Python 3).
.. code-block:: python
>>> from jsonschema import validate
>>> # A sample schema, like what we'd get from json.load()
>>> schema = {
... "type" : "object",
... "properties" : {
... "price" : {"type" : "number"},
... "name" : {"type" : "string"},
... },
... }
>>> # If no exception is raised by validate(), the instance is valid.
>>> validate(instance={"name" : "Eggs", "price" : 34.99}, schema=schema)
>>> validate(
... instance={"name" : "Eggs", "price" : "Invalid"}, schema=schema,
... ) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValidationError: 'Invalid' is not of type 'number'
It can also be used from console:
.. code-block:: bash
$ jsonschema -i sample.json sample.schema
Features
--------
* Full support for
`Draft 7 <https://python-jsonschema.readthedocs.io/en/latest/validate/#jsonschema.Draft7Validator>`_,
`Draft 6 <https://python-jsonschema.readthedocs.io/en/latest/validate/#jsonschema.Draft6Validator>`_,
`Draft 4 <https://python-jsonschema.readthedocs.io/en/latest/validate/#jsonschema.Draft4Validator>`_
and
`Draft 3 <https://python-jsonschema.readthedocs.io/en/latest/validate/#jsonschema.Draft3Validator>`_
* `Lazy validation <https://python-jsonschema.readthedocs.io/en/latest/validate/#jsonschema.IValidator.iter_errors>`_
that can iteratively report *all* validation errors.
* `Programmatic querying <https://python-jsonschema.readthedocs.io/en/latest/errors/>`_
of which properties or items failed validation.
Installation
------------
``jsonschema`` is available on `PyPI <https://pypi.org/project/jsonschema/>`_. You can install using `pip <https://pip.pypa.io/en/stable/>`_:
.. code-block:: bash
$ pip install jsonschema
Demo
----
Try ``jsonschema`` interactively in this online demo:
.. image:: https://user-images.githubusercontent.com/1155573/56745335-8b158a00-6750-11e9-8776-83fa675939c4.png
:target: https://notebooks.ai/demo/gh/Julian/jsonschema
:alt: Open Live Demo
Online demo Notebook will look similar to this:
.. image:: https://user-images.githubusercontent.com/1155573/56820861-5c1c1880-6823-11e9-802a-ce01c5ec574f.gif
:alt: Open Live Demo
:width: 480 px
Release Notes
-------------
v3.1 brings support for ECMA 262 dialect regular expressions
throughout schemas, as recommended by the specification. Big
thanks to @Zac-HD for authoring support in a new `js-regex
<https://pypi.org/project/js-regex/>`_ library.
Running the Test Suite
----------------------
If you have ``tox`` installed (perhaps via ``pip install tox`` or your
package manager), running ``tox`` in the directory of your source
checkout will run ``jsonschema``'s test suite on all of the versions
of Python ``jsonschema`` supports. If you don't have all of the
versions that ``jsonschema`` is tested under, you'll likely want to run
using ``tox``'s ``--skip-missing-interpreters`` option.
Of course you're also free to just run the tests on a single version with your
favorite test runner. The tests live in the ``jsonschema.tests`` package.
Benchmarks
----------
``jsonschema``'s benchmarks make use of `pyperf
<https://pyperf.readthedocs.io>`_.
Running them can be done via ``tox -e perf``, or by invoking the ``pyperf``
commands externally (after ensuring that both it and ``jsonschema`` itself are
installed)::
$ python -m pyperf jsonschema/benchmarks/test_suite.py --hist --output results.json
To compare to a previous run, use::
$ python -m pyperf compare_to --table reference.json results.json
See the ``pyperf`` documentation for more details.
Community
---------
There's a `mailing list <https://groups.google.com/forum/#!forum/jsonschema>`_
for this implementation on Google Groups.
Please join, and feel free to send questions there.
Contributing
------------
I'm Julian Berman.
``jsonschema`` is on `GitHub <https://github.com/Julian/jsonschema>`_.
Get in touch, via GitHub or otherwise, if you've got something to contribute,
it'd be most welcome!
You can also generally find me on Freenode (nick: ``tos9``) in various
channels, including ``#python``.
If you feel overwhelmingly grateful, you can also woo me with beer money
via Google Pay with the email in my GitHub profile.
And for companies who appreciate ``jsonschema`` and its continued support
and growth, ``jsonschema`` is also now supportable via `TideLift
<https://tidelift.com/subscription/pkg/pypi-jsonschema?utm_source=pypi-j
sonschema&utm_medium=referral&utm_campaign=readme>`_.

34
third_party/python/jsonschema/jsonschema-3.2.0.dist-info/RECORD поставляемый Normal file
Просмотреть файл

@ -0,0 +1,34 @@
jsonschema/__init__.py,sha256=dHAr_pQLbbDFoRnbVMrQVztVUvnBFgFlm7bU82pMvOk,934
jsonschema/__main__.py,sha256=in4bbzfixCAyGe3RhBwhQVZnGkruszNedcbmwxGyJgc,39
jsonschema/_format.py,sha256=vwD1v7S8BmJvSF5y0o6dbPgjAyzt07PZpyO3pvNVVgQ,11691
jsonschema/_legacy_validators.py,sha256=kYcYiHfRV-aQtIQv2qe_71L3QFs3LiJ3v69ifteAN4E,4584
jsonschema/_reflect.py,sha256=gggQrcrf5FRoyhgdE6ggJ4n2FQHEzWS4CS-cm9bYcqI,5023
jsonschema/_types.py,sha256=t2naRRhuTKIUIB0GMR9kOp2la2aVqeT2tFlVAobndmg,4490
jsonschema/_utils.py,sha256=ezZJMQ0eU4oPvkTmZi6g5vsCtIFRhb8vN4Y9S4uQwW8,5168
jsonschema/_validators.py,sha256=UDYawpxK8f_rIeEBXZtwr0tlxi3OH1Zt2ca0zAxjNdk,11703
jsonschema/cli.py,sha256=3Vc8ptc2GD7zDxK2F-kamqmrE9f35a2KVDGR1p1acUU,2310
jsonschema/compat.py,sha256=37gSA8MmAR65zlqzsSEB-0ObZk_I2TF7z1kp9zmkskg,1353
jsonschema/exceptions.py,sha256=ukWIE7aEES8Kh0UaUP9turpUkV2ZzXEN8CwfRObzlMA,10450
jsonschema/validators.py,sha256=RIZTQyZxhWwsyIIRFQGEjLzq38LlyzzzdYUl9jxzV0M,29400
jsonschema/benchmarks/__init__.py,sha256=A0sQrxDBVHSyQ-8ru3L11hMXf3q9gVuB9x_YgHb4R9M,70
jsonschema/benchmarks/issue232.py,sha256=-azAUmrP75f0uj0x2zEdBc3-DhQw3XX9UQVDCyhBKRk,541
jsonschema/benchmarks/json_schema_test_suite.py,sha256=okRE6ACue2C0Hd1dMhnpZ0bc3AoZdDd8cw2lwTnbzwU,343
jsonschema/schemas/draft3.json,sha256=PdtCu2s06Va3hV9cX5A5-rvye50SVF__NrvxG0vuzz0,4564
jsonschema/schemas/draft4.json,sha256=ODL-0W3kv7MmqL3tm3GJguuVxN1QNO1GtBcpWE3ok98,5399
jsonschema/schemas/draft6.json,sha256=wp386fVINcOgbAOzxdXsDtp3cGVo-cTffPvHVmpRAG0,4437
jsonschema/schemas/draft7.json,sha256=PVOSCIJhYGxVm2A_OFMpyfGrRbXWZ-uZBodFOwVdQF4,4819
jsonschema/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
jsonschema/tests/_helpers.py,sha256=3c-b9CK0cdGfhtuUhzM1AjtqPtR2VFvfcKC6G2g0a-0,157
jsonschema/tests/_suite.py,sha256=6lxDHOyjJfCjdn9vfOLcUpXtNl0vLIljrinSFi1tRhc,6728
jsonschema/tests/test_cli.py,sha256=djw7ZD6zm5_8FgsAr9XyYk4zErIEoPRs8SzBe5nYcWY,4727
jsonschema/tests/test_exceptions.py,sha256=zw9bd_al5zOzAm8nJ0IqeymiweH6i8k1AN3CB7t618A,15348
jsonschema/tests/test_format.py,sha256=ob0QDop_nwRwiLs1P6sGsf6ZITik00CWhe1pL8JRiA0,2982
jsonschema/tests/test_jsonschema_test_suite.py,sha256=8uiplgvQq5yFvtvWxbyqyr7HMYRCx6jNE3OiU-u8AEk,8464
jsonschema/tests/test_types.py,sha256=lntWPZ86fwo_aNKbfCueX5R2xdwrYYN7Zo5C0-ppk-0,5902
jsonschema/tests/test_validators.py,sha256=R_zhsDKG5r66LE1OVlzdcPyKRWKgc07e6NVWxQkrRiQ,60394
jsonschema-3.2.0.dist-info/COPYING,sha256=T5KgFaE8TRoEC-8BiqE0MLTxvHO0Gxa7hGw0Z2bedDk,1057
jsonschema-3.2.0.dist-info/METADATA,sha256=os_TL7tiSfPYDMKYoAqoNsw_yMkDJmCL2bqhp-csNR0,7760
jsonschema-3.2.0.dist-info/WHEEL,sha256=8zNYZbwQSXoB9IfXOjPfeNwvAsALAjffgk27FqvCWbo,110
jsonschema-3.2.0.dist-info/entry_points.txt,sha256=KaVUBBSLyzi5naUkVg-r3q6T_igdLgaHY6Mm3oLX73s,52
jsonschema-3.2.0.dist-info/top_level.txt,sha256=jGoNS61vDONU8U7p0Taf-y_8JVG1Z2CJ5Eif6zMN_cw,11
jsonschema-3.2.0.dist-info/RECORD,,

Просмотреть файл

@ -0,0 +1,6 @@
Wheel-Version: 1.0
Generator: bdist_wheel (0.33.6)
Root-Is-Purelib: true
Tag: py2-none-any
Tag: py3-none-any

Просмотреть файл

@ -0,0 +1,3 @@
[console_scripts]
jsonschema = jsonschema.cli:main

Просмотреть файл

@ -0,0 +1 @@
jsonschema

34
third_party/python/jsonschema/jsonschema/__init__.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,34 @@
"""
An implementation of JSON Schema for Python
The main functionality is provided by the validator classes for each of the
supported JSON Schema versions.
Most commonly, `validate` is the quickest way to simply validate a given
instance under a schema, and will create a validator for you.
"""
from jsonschema.exceptions import (
ErrorTree, FormatError, RefResolutionError, SchemaError, ValidationError
)
from jsonschema._format import (
FormatChecker,
draft3_format_checker,
draft4_format_checker,
draft6_format_checker,
draft7_format_checker,
)
from jsonschema._types import TypeChecker
from jsonschema.validators import (
Draft3Validator,
Draft4Validator,
Draft6Validator,
Draft7Validator,
RefResolver,
validate,
)
try:
from importlib import metadata
except ImportError: # for Python<3.8
import importlib_metadata as metadata
__version__ = metadata.version("jsonschema")

2
third_party/python/jsonschema/jsonschema/__main__.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,2 @@
from jsonschema.cli import main
main()

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше