Backed out 2 changesets (bug 1473648, bug 1476661) for mochitest failure on testing/mochitest/tests/python/test_basic_mochitest_plain.py

Backed out changeset dbb14978acf8 (bug 1473648)
Backed out changeset de933cb380f9 (bug 1476661)
This commit is contained in:
Dorel Luca 2018-08-01 16:01:05 +03:00
Родитель c0f96019e1
Коммит 57113ba2b4
189 изменённых файлов: 61 добавлений и 45350 удалений

Просмотреть файл

@ -60,12 +60,8 @@ mozilla.pth:tools
mozilla.pth:testing/web-platform
mozilla.pth:testing/web-platform/tests/tools/third_party/html5lib
mozilla.pth:testing/web-platform/tests/tools/third_party/webencodings
mozilla.pth:testing/web-platform/tests/tools/third_party/h2
mozilla.pth:testing/web-platform/tests/tools/third_party/hpack
mozilla.pth:testing/web-platform/tests/tools/third_party/hyperframe
mozilla.pth:testing/web-platform/tests/tools/third_party/certifi
mozilla.pth:testing/web-platform/tests/tools/wptserve
mozilla.pth:testing/web-platform/tests/tools/wptrunner
mozilla.pth:testing/web-platform/tests/tools/wptserve
mozilla.pth:testing/web-platform/tests/tools/six
mozilla.pth:testing/xpcshell
mozilla.pth:third_party/python/mock-1.0.0

Просмотреть файл

@ -220,12 +220,6 @@ ARCHIVE_FILES = {
'pattern': '**',
'dest': 'tools/wptserve',
},
{
'source': buildconfig.topsrcdir,
'base': 'testing/web-platform/tests/tools/third_party',
'pattern': '**',
'dest': 'tools/wpt_third_party',
},
{
'source': buildconfig.topsrcdir,
'base': 'python/mozterm',

Просмотреть файл

@ -1,11 +1,6 @@
-r mozbase_requirements.txt
../tools/wptserve
../tools/wpt_third_party/certifi
../tools/wpt_third_party/enum
../tools/wpt_third_party/h2
../tools/wpt_third_party/hyperframe
../tools/wpt_third_party/hpack
../marionette/client
../marionette/harness/marionette_harness/runner/mixins/browsermob-proxy-py
../marionette/harness

Просмотреть файл

@ -152,7 +152,6 @@ class FirefoxUITests(TestingMixin, VCSToolsScript, CodeCoverageMixin):
'mozbase/*',
'tools/mozterm/*',
'tools/wptserve/*',
'tools/wpt_third_party/*',
'mozpack/*',
'mozbuild/*',
]

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -17,12 +17,5 @@ sys.path.insert(0, os.path.join(here, "third_party", "py"))
sys.path.insert(0, os.path.join(here, "third_party", "pytest", "src"))
sys.path.insert(0, os.path.join(here, "third_party", "six"))
sys.path.insert(0, os.path.join(here, "third_party", "webencodings"))
sys.path.insert(0, os.path.join(here, "third_party", "h2"))
sys.path.insert(0, os.path.join(here, "third_party", "hpack"))
sys.path.insert(0, os.path.join(here, "third_party", "hyperframe"))
sys.path.insert(0, os.path.join(here, "third_party", "certifi"))
sys.path.insert(0, os.path.join(here, "webdriver"))
sys.path.insert(0, os.path.join(here, "wptrunner"))
if sys.version_info[0] == 2:
sys.path.insert(0, os.path.join(here, "third_party", "enum"))

Просмотреть файл

@ -27,7 +27,7 @@ from wptserve import stash
from wptserve import config
from wptserve.logger import set_logger
from wptserve.handlers import filesystem_path, wrap_pipeline
from wptserve.utils import get_port, HTTPException, http2_compatible
from wptserve.utils import get_port, HTTPException
from mod_pywebsocket import standalone as pywebsocket
@ -491,17 +491,11 @@ def start_servers(host, ports, paths, routes, bind_address, config, **kwargs):
for scheme, ports in ports.items():
assert len(ports) == {"http":2}.get(scheme, 1)
# TODO Not very ideal, look into removing it in the future
# Check that python 2.7.15 is being used for HTTP/2.0
if scheme == 'http2' and not http2_compatible():
continue
for port in ports:
if port is None:
continue
init_func = {"http":start_http_server,
"https":start_https_server,
"http2":start_http2_server,
"ws":start_ws_server,
"wss":start_wss_server}[scheme]
@ -672,9 +666,6 @@ def iter_procs(servers):
def build_config(override_path=None, **kwargs):
rv = ConfigBuilder()
if kwargs.get("h2"):
rv._default["ports"]["http2"] = [9000]
if override_path and os.path.exists(override_path):
with open(override_path) as f:
override_obj = json.load(f)
@ -728,7 +719,7 @@ class ConfigBuilder(config.ConfigBuilder):
"http": [8000, "auto"],
"https": [8443],
"ws": ["auto"],
"wss": ["auto"],
"wss": ["auto"]
},
"check_subdomains": True,
"log_level": "debug",

Просмотреть файл

@ -1,21 +0,0 @@
This packge contains a modified version of ca-bundle.crt:
ca-bundle.crt -- Bundle of CA Root Certificates
Certificate data from Mozilla as of: Thu Nov 3 19:04:19 2011#
This is a bundle of X.509 certificates of public Certificate Authorities
(CA). These were automatically extracted from Mozilla's root certificates
file (certdata.txt). This file can be found in the mozilla source tree:
http://mxr.mozilla.org/mozilla/source/security/nss/lib/ckfw/builtins/certdata.txt?raw=1#
It contains the certificates in PEM format and therefore
can be directly used with curl / libcurl / php_curl, or with
an Apache+mod_ssl webserver for SSL client authentication.
Just configure this file as the SSLCACertificateFile.#
***** BEGIN LICENSE BLOCK *****
This Source Code Form is subject to the terms of the Mozilla Public License,
v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain
one at http://mozilla.org/MPL/2.0/.
***** END LICENSE BLOCK *****
@(#) $RCSfile: certdata.txt,v $ $Revision: 1.80 $ $Date: 2011/11/03 15:11:58 $

Просмотреть файл

@ -1 +0,0 @@
include MANIFEST.in README.rst LICENSE certifi/cacert.pem

Просмотреть файл

@ -1,69 +0,0 @@
Metadata-Version: 1.1
Name: certifi
Version: 2018.4.16
Summary: Python package for providing Mozilla's CA Bundle.
Home-page: http://certifi.io/
Author: Kenneth Reitz
Author-email: me@kennethreitz.com
License: MPL-2.0
Description: Certifi: Python SSL Certificates
================================
`Certifi`_ is a carefully curated collection of Root Certificates for
validating the trustworthiness of SSL certificates while verifying the identity
of TLS hosts. It has been extracted from the `Requests`_ project.
Installation
------------
``certifi`` is available on PyPI. Simply install it with ``pip``::
$ pip install certifi
Usage
-----
To reference the installed certificate authority (CA) bundle, you can use the
built-in function::
>>> import certifi
>>> certifi.where()
'/usr/local/lib/python2.7/site-packages/certifi/cacert.pem'
Enjoy!
1024-bit Root Certificates
~~~~~~~~~~~~~~~~~~~~~~~~~~
Browsers and certificate authorities have concluded that 1024-bit keys are
unacceptably weak for certificates, particularly root certificates. For this
reason, Mozilla has removed any weak (i.e. 1024-bit key) certificate from its
bundle, replacing it with an equivalent strong (i.e. 2048-bit or greater key)
certificate from the same CA. Because Mozilla removed these certificates from
its bundle, ``certifi`` removed them as well.
In previous versions, ``certifi`` provided the ``certifi.old_where()`` function
to intentionally re-add the 1024-bit roots back into your bundle. This was not
recommended in production and therefore was removed. To assist in migrating old
code, the function ``certifi.old_where()`` continues to exist as an alias of
``certifi.where()``. Please update your code to use ``certifi.where()``
instead. ``certifi.old_where()`` will be removed in 2018.
.. _`Certifi`: http://certifi.io/en/latest/
.. _`Requests`: http://docs.python-requests.org/en/latest/
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)
Classifier: Natural Language :: English
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.6
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.3
Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6

Просмотреть файл

@ -1,46 +0,0 @@
Certifi: Python SSL Certificates
================================
`Certifi`_ is a carefully curated collection of Root Certificates for
validating the trustworthiness of SSL certificates while verifying the identity
of TLS hosts. It has been extracted from the `Requests`_ project.
Installation
------------
``certifi`` is available on PyPI. Simply install it with ``pip``::
$ pip install certifi
Usage
-----
To reference the installed certificate authority (CA) bundle, you can use the
built-in function::
>>> import certifi
>>> certifi.where()
'/usr/local/lib/python2.7/site-packages/certifi/cacert.pem'
Enjoy!
1024-bit Root Certificates
~~~~~~~~~~~~~~~~~~~~~~~~~~
Browsers and certificate authorities have concluded that 1024-bit keys are
unacceptably weak for certificates, particularly root certificates. For this
reason, Mozilla has removed any weak (i.e. 1024-bit key) certificate from its
bundle, replacing it with an equivalent strong (i.e. 2048-bit or greater key)
certificate from the same CA. Because Mozilla removed these certificates from
its bundle, ``certifi`` removed them as well.
In previous versions, ``certifi`` provided the ``certifi.old_where()`` function
to intentionally re-add the 1024-bit roots back into your bundle. This was not
recommended in production and therefore was removed. To assist in migrating old
code, the function ``certifi.old_where()`` continues to exist as an alias of
``certifi.where()``. Please update your code to use ``certifi.where()``
instead. ``certifi.old_where()`` will be removed in 2018.
.. _`Certifi`: http://certifi.io/en/latest/
.. _`Requests`: http://docs.python-requests.org/en/latest/

Просмотреть файл

@ -1,69 +0,0 @@
Metadata-Version: 1.1
Name: certifi
Version: 2018.4.16
Summary: Python package for providing Mozilla's CA Bundle.
Home-page: http://certifi.io/
Author: Kenneth Reitz
Author-email: me@kennethreitz.com
License: MPL-2.0
Description: Certifi: Python SSL Certificates
================================
`Certifi`_ is a carefully curated collection of Root Certificates for
validating the trustworthiness of SSL certificates while verifying the identity
of TLS hosts. It has been extracted from the `Requests`_ project.
Installation
------------
``certifi`` is available on PyPI. Simply install it with ``pip``::
$ pip install certifi
Usage
-----
To reference the installed certificate authority (CA) bundle, you can use the
built-in function::
>>> import certifi
>>> certifi.where()
'/usr/local/lib/python2.7/site-packages/certifi/cacert.pem'
Enjoy!
1024-bit Root Certificates
~~~~~~~~~~~~~~~~~~~~~~~~~~
Browsers and certificate authorities have concluded that 1024-bit keys are
unacceptably weak for certificates, particularly root certificates. For this
reason, Mozilla has removed any weak (i.e. 1024-bit key) certificate from its
bundle, replacing it with an equivalent strong (i.e. 2048-bit or greater key)
certificate from the same CA. Because Mozilla removed these certificates from
its bundle, ``certifi`` removed them as well.
In previous versions, ``certifi`` provided the ``certifi.old_where()`` function
to intentionally re-add the 1024-bit roots back into your bundle. This was not
recommended in production and therefore was removed. To assist in migrating old
code, the function ``certifi.old_where()`` continues to exist as an alias of
``certifi.where()``. Please update your code to use ``certifi.where()``
instead. ``certifi.old_where()`` will be removed in 2018.
.. _`Certifi`: http://certifi.io/en/latest/
.. _`Requests`: http://docs.python-requests.org/en/latest/
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)
Classifier: Natural Language :: English
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.6
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.3
Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6

Просмотреть файл

@ -1,14 +0,0 @@
LICENSE
MANIFEST.in
README.rst
setup.cfg
setup.py
certifi/__init__.py
certifi/__main__.py
certifi/cacert.pem
certifi/core.py
certifi.egg-info/PKG-INFO
certifi.egg-info/SOURCES.txt
certifi.egg-info/dependency_links.txt
certifi.egg-info/not-zip-safe
certifi.egg-info/top_level.txt

Просмотреть файл

@ -1 +0,0 @@

Просмотреть файл

@ -1 +0,0 @@

Просмотреть файл

@ -1 +0,0 @@
certifi

Просмотреть файл

@ -1,3 +0,0 @@
from .core import where, old_where
__version__ = "2018.04.16"

Просмотреть файл

@ -1,2 +0,0 @@
from certifi import where
print(where())

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,37 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
certifi.py
~~~~~~~~~~
This module returns the installation location of cacert.pem.
"""
import os
import warnings
class DeprecatedBundleWarning(DeprecationWarning):
"""
The weak security bundle is being deprecated. Please bother your service
provider to get them to stop using cross-signed roots.
"""
def where():
f = os.path.dirname(__file__)
return os.path.join(f, 'cacert.pem')
def old_where():
warnings.warn(
"The weak security bundle has been removed. certifi.old_where() is now an alias "
"of certifi.where(). Please update your code to use certifi.where() instead. "
"certifi.old_where() will be removed in 2018.",
DeprecatedBundleWarning
)
return where()
if __name__ == '__main__':
print(where())

Просмотреть файл

@ -1,11 +0,0 @@
[bdist_wheel]
universal = 1
[metadata]
license_file = LICENSE
[egg_info]
tag_build =
tag_date = 0
tag_svn_revision = 0

Просмотреть файл

@ -1,67 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import with_statement
import re
import os
import sys
# While I generally consider it an antipattern to try and support both
# setuptools and distutils with a single setup.py, in this specific instance
# where certifi is a dependency of setuptools, it can create a circular
# dependency when projects attempt to unbundle stuff from setuptools and pip.
# Though we don't really support that, it makes things easier if we do this and
# should hopefully cause less issues for end users.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
version_regex = r'__version__ = ["\']([^"\']*)["\']'
with open('certifi/__init__.py', 'r') as f:
text = f.read()
match = re.search(version_regex, text)
if match:
VERSION = match.group(1)
else:
raise RuntimeError("No version number found!")
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist bdist_wheel upload')
sys.exit()
required = []
setup(
name='certifi',
version=VERSION,
description='Python package for providing Mozilla\'s CA Bundle.',
long_description=open('README.rst').read(),
author='Kenneth Reitz',
author_email='me@kennethreitz.com',
url='http://certifi.io/',
packages=[
'certifi',
],
package_dir={'certifi': 'certifi'},
package_data={'certifi': ['*.pem']},
# data_files=[('certifi', ['certifi/cacert.pem'])],
include_package_data=True,
zip_safe=False,
license='MPL-2.0',
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
),
)

Просмотреть файл

@ -1,9 +0,0 @@
exclude enum/*
include setup.py
include README
include enum/__init__.py
include enum/test.py
include enum/LICENSE
include enum/README
include enum/doc/enum.pdf
include enum/doc/enum.rst

Просмотреть файл

@ -1,62 +0,0 @@
Metadata-Version: 1.1
Name: enum34
Version: 1.1.6
Summary: Python 3.4 Enum backported to 3.3, 3.2, 3.1, 2.7, 2.6, 2.5, and 2.4
Home-page: https://bitbucket.org/stoneleaf/enum34
Author: Ethan Furman
Author-email: ethan@stoneleaf.us
License: BSD License
Description: enum --- support for enumerations
========================================
An enumeration is a set of symbolic names (members) bound to unique, constant
values. Within an enumeration, the members can be compared by identity, and
the enumeration itself can be iterated over.
from enum import Enum
class Fruit(Enum):
apple = 1
banana = 2
orange = 3
list(Fruit)
# [<Fruit.apple: 1>, <Fruit.banana: 2>, <Fruit.orange: 3>]
len(Fruit)
# 3
Fruit.banana
# <Fruit.banana: 2>
Fruit['banana']
# <Fruit.banana: 2>
Fruit(2)
# <Fruit.banana: 2>
Fruit.banana is Fruit['banana'] is Fruit(2)
# True
Fruit.banana.name
# 'banana'
Fruit.banana.value
# 2
Repository and Issue Tracker at https://bitbucket.org/stoneleaf/enum34.
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: BSD License
Classifier: Programming Language :: Python
Classifier: Topic :: Software Development
Classifier: Programming Language :: Python :: 2.4
Classifier: Programming Language :: Python :: 2.5
Classifier: Programming Language :: Python :: 2.6
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3.3
Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
Provides: enum

Просмотреть файл

@ -1,3 +0,0 @@
enum34 is the new Python stdlib enum module available in Python 3.4
backported for previous versions of Python from 2.4 to 3.3.
tested on 2.6, 2.7, and 3.3+

Просмотреть файл

@ -1,32 +0,0 @@
Copyright (c) 2013, Ethan Furman.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
Redistributions of source code must retain the above
copyright notice, this list of conditions and the
following disclaimer.
Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials
provided with the distribution.
Neither the name Ethan Furman nor the names of any
contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.

Просмотреть файл

@ -1,3 +0,0 @@
enum34 is the new Python stdlib enum module available in Python 3.4
backported for previous versions of Python from 2.4 to 3.3.
tested on 2.6, 2.7, and 3.3+

Просмотреть файл

@ -1,837 +0,0 @@
"""Python Enumerations"""
import sys as _sys
__all__ = ['Enum', 'IntEnum', 'unique']
version = 1, 1, 6
pyver = float('%s.%s' % _sys.version_info[:2])
try:
any
except NameError:
def any(iterable):
for element in iterable:
if element:
return True
return False
try:
from collections import OrderedDict
except ImportError:
OrderedDict = None
try:
basestring
except NameError:
# In Python 2 basestring is the ancestor of both str and unicode
# in Python 3 it's just str, but was missing in 3.1
basestring = str
try:
unicode
except NameError:
# In Python 3 unicode no longer exists (it's just str)
unicode = str
class _RouteClassAttributeToGetattr(object):
"""Route attribute access on a class to __getattr__.
This is a descriptor, used to define attributes that act differently when
accessed through an instance and through a class. Instance access remains
normal, but access to an attribute through a class will be routed to the
class's __getattr__ method; this is done by raising AttributeError.
"""
def __init__(self, fget=None):
self.fget = fget
def __get__(self, instance, ownerclass=None):
if instance is None:
raise AttributeError()
return self.fget(instance)
def __set__(self, instance, value):
raise AttributeError("can't set attribute")
def __delete__(self, instance):
raise AttributeError("can't delete attribute")
def _is_descriptor(obj):
"""Returns True if obj is a descriptor, False otherwise."""
return (
hasattr(obj, '__get__') or
hasattr(obj, '__set__') or
hasattr(obj, '__delete__'))
def _is_dunder(name):
"""Returns True if a __dunder__ name, False otherwise."""
return (name[:2] == name[-2:] == '__' and
name[2:3] != '_' and
name[-3:-2] != '_' and
len(name) > 4)
def _is_sunder(name):
"""Returns True if a _sunder_ name, False otherwise."""
return (name[0] == name[-1] == '_' and
name[1:2] != '_' and
name[-2:-1] != '_' and
len(name) > 2)
def _make_class_unpicklable(cls):
"""Make the given class un-picklable."""
def _break_on_call_reduce(self, protocol=None):
raise TypeError('%r cannot be pickled' % self)
cls.__reduce_ex__ = _break_on_call_reduce
cls.__module__ = '<unknown>'
class _EnumDict(dict):
"""Track enum member order and ensure member names are not reused.
EnumMeta will use the names found in self._member_names as the
enumeration member names.
"""
def __init__(self):
super(_EnumDict, self).__init__()
self._member_names = []
def __setitem__(self, key, value):
"""Changes anything not dundered or not a descriptor.
If a descriptor is added with the same name as an enum member, the name
is removed from _member_names (this may leave a hole in the numerical
sequence of values).
If an enum member name is used twice, an error is raised; duplicate
values are not checked for.
Single underscore (sunder) names are reserved.
Note: in 3.x __order__ is simply discarded as a not necessary piece
leftover from 2.x
"""
if pyver >= 3.0 and key in ('_order_', '__order__'):
return
elif key == '__order__':
key = '_order_'
if _is_sunder(key):
if key != '_order_':
raise ValueError('_names_ are reserved for future Enum use')
elif _is_dunder(key):
pass
elif key in self._member_names:
# descriptor overwriting an enum?
raise TypeError('Attempted to reuse key: %r' % key)
elif not _is_descriptor(value):
if key in self:
# enum overwriting a descriptor?
raise TypeError('Key already defined as: %r' % self[key])
self._member_names.append(key)
super(_EnumDict, self).__setitem__(key, value)
# Dummy value for Enum as EnumMeta explicity checks for it, but of course until
# EnumMeta finishes running the first time the Enum class doesn't exist. This
# is also why there are checks in EnumMeta like `if Enum is not None`
Enum = None
class EnumMeta(type):
"""Metaclass for Enum"""
@classmethod
def __prepare__(metacls, cls, bases):
return _EnumDict()
def __new__(metacls, cls, bases, classdict):
# an Enum class is final once enumeration items have been defined; it
# cannot be mixed with other types (int, float, etc.) if it has an
# inherited __new__ unless a new __new__ is defined (or the resulting
# class will fail).
if type(classdict) is dict:
original_dict = classdict
classdict = _EnumDict()
for k, v in original_dict.items():
classdict[k] = v
member_type, first_enum = metacls._get_mixins_(bases)
__new__, save_new, use_args = metacls._find_new_(classdict, member_type,
first_enum)
# save enum items into separate mapping so they don't get baked into
# the new class
members = dict((k, classdict[k]) for k in classdict._member_names)
for name in classdict._member_names:
del classdict[name]
# py2 support for definition order
_order_ = classdict.get('_order_')
if _order_ is None:
if pyver < 3.0:
try:
_order_ = [name for (name, value) in sorted(members.items(), key=lambda item: item[1])]
except TypeError:
_order_ = [name for name in sorted(members.keys())]
else:
_order_ = classdict._member_names
else:
del classdict['_order_']
if pyver < 3.0:
_order_ = _order_.replace(',', ' ').split()
aliases = [name for name in members if name not in _order_]
_order_ += aliases
# check for illegal enum names (any others?)
invalid_names = set(members) & set(['mro'])
if invalid_names:
raise ValueError('Invalid enum member name(s): %s' % (
', '.join(invalid_names), ))
# save attributes from super classes so we know if we can take
# the shortcut of storing members in the class dict
base_attributes = set([a for b in bases for a in b.__dict__])
# create our new Enum type
enum_class = super(EnumMeta, metacls).__new__(metacls, cls, bases, classdict)
enum_class._member_names_ = [] # names in random order
if OrderedDict is not None:
enum_class._member_map_ = OrderedDict()
else:
enum_class._member_map_ = {} # name->value map
enum_class._member_type_ = member_type
# Reverse value->name map for hashable values.
enum_class._value2member_map_ = {}
# instantiate them, checking for duplicates as we go
# we instantiate first instead of checking for duplicates first in case
# a custom __new__ is doing something funky with the values -- such as
# auto-numbering ;)
if __new__ is None:
__new__ = enum_class.__new__
for member_name in _order_:
value = members[member_name]
if not isinstance(value, tuple):
args = (value, )
else:
args = value
if member_type is tuple: # special case for tuple enums
args = (args, ) # wrap it one more time
if not use_args or not args:
enum_member = __new__(enum_class)
if not hasattr(enum_member, '_value_'):
enum_member._value_ = value
else:
enum_member = __new__(enum_class, *args)
if not hasattr(enum_member, '_value_'):
enum_member._value_ = member_type(*args)
value = enum_member._value_
enum_member._name_ = member_name
enum_member.__objclass__ = enum_class
enum_member.__init__(*args)
# If another member with the same value was already defined, the
# new member becomes an alias to the existing one.
for name, canonical_member in enum_class._member_map_.items():
if canonical_member.value == enum_member._value_:
enum_member = canonical_member
break
else:
# Aliases don't appear in member names (only in __members__).
enum_class._member_names_.append(member_name)
# performance boost for any member that would not shadow
# a DynamicClassAttribute (aka _RouteClassAttributeToGetattr)
if member_name not in base_attributes:
setattr(enum_class, member_name, enum_member)
# now add to _member_map_
enum_class._member_map_[member_name] = enum_member
try:
# This may fail if value is not hashable. We can't add the value
# to the map, and by-value lookups for this value will be
# linear.
enum_class._value2member_map_[value] = enum_member
except TypeError:
pass
# If a custom type is mixed into the Enum, and it does not know how
# to pickle itself, pickle.dumps will succeed but pickle.loads will
# fail. Rather than have the error show up later and possibly far
# from the source, sabotage the pickle protocol for this class so
# that pickle.dumps also fails.
#
# However, if the new class implements its own __reduce_ex__, do not
# sabotage -- it's on them to make sure it works correctly. We use
# __reduce_ex__ instead of any of the others as it is preferred by
# pickle over __reduce__, and it handles all pickle protocols.
unpicklable = False
if '__reduce_ex__' not in classdict:
if member_type is not object:
methods = ('__getnewargs_ex__', '__getnewargs__',
'__reduce_ex__', '__reduce__')
if not any(m in member_type.__dict__ for m in methods):
_make_class_unpicklable(enum_class)
unpicklable = True
# double check that repr and friends are not the mixin's or various
# things break (such as pickle)
for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'):
class_method = getattr(enum_class, name)
obj_method = getattr(member_type, name, None)
enum_method = getattr(first_enum, name, None)
if name not in classdict and class_method is not enum_method:
if name == '__reduce_ex__' and unpicklable:
continue
setattr(enum_class, name, enum_method)
# method resolution and int's are not playing nice
# Python's less than 2.6 use __cmp__
if pyver < 2.6:
if issubclass(enum_class, int):
setattr(enum_class, '__cmp__', getattr(int, '__cmp__'))
elif pyver < 3.0:
if issubclass(enum_class, int):
for method in (
'__le__',
'__lt__',
'__gt__',
'__ge__',
'__eq__',
'__ne__',
'__hash__',
):
setattr(enum_class, method, getattr(int, method))
# replace any other __new__ with our own (as long as Enum is not None,
# anyway) -- again, this is to support pickle
if Enum is not None:
# if the user defined their own __new__, save it before it gets
# clobbered in case they subclass later
if save_new:
setattr(enum_class, '__member_new__', enum_class.__dict__['__new__'])
setattr(enum_class, '__new__', Enum.__dict__['__new__'])
return enum_class
def __bool__(cls):
"""
classes/types should always be True.
"""
return True
def __call__(cls, value, names=None, module=None, type=None, start=1):
"""Either returns an existing member, or creates a new enum class.
This method is used both when an enum class is given a value to match
to an enumeration member (i.e. Color(3)) and for the functional API
(i.e. Color = Enum('Color', names='red green blue')).
When used for the functional API: `module`, if set, will be stored in
the new class' __module__ attribute; `type`, if set, will be mixed in
as the first base class.
Note: if `module` is not set this routine will attempt to discover the
calling module by walking the frame stack; if this is unsuccessful
the resulting class will not be pickleable.
"""
if names is None: # simple value lookup
return cls.__new__(cls, value)
# otherwise, functional API: we're creating a new Enum type
return cls._create_(value, names, module=module, type=type, start=start)
def __contains__(cls, member):
return isinstance(member, cls) and member.name in cls._member_map_
def __delattr__(cls, attr):
# nicer error message when someone tries to delete an attribute
# (see issue19025).
if attr in cls._member_map_:
raise AttributeError(
"%s: cannot delete Enum member." % cls.__name__)
super(EnumMeta, cls).__delattr__(attr)
def __dir__(self):
return (['__class__', '__doc__', '__members__', '__module__'] +
self._member_names_)
@property
def __members__(cls):
"""Returns a mapping of member name->value.
This mapping lists all enum members, including aliases. Note that this
is a copy of the internal mapping.
"""
return cls._member_map_.copy()
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
if _is_dunder(name):
raise AttributeError(name)
try:
return cls._member_map_[name]
except KeyError:
raise AttributeError(name)
def __getitem__(cls, name):
return cls._member_map_[name]
def __iter__(cls):
return (cls._member_map_[name] for name in cls._member_names_)
def __reversed__(cls):
return (cls._member_map_[name] for name in reversed(cls._member_names_))
def __len__(cls):
return len(cls._member_names_)
__nonzero__ = __bool__
def __repr__(cls):
return "<enum %r>" % cls.__name__
def __setattr__(cls, name, value):
"""Block attempts to reassign Enum members.
A simple assignment to the class namespace only changes one of the
several possible ways to get an Enum member from the Enum class,
resulting in an inconsistent Enumeration.
"""
member_map = cls.__dict__.get('_member_map_', {})
if name in member_map:
raise AttributeError('Cannot reassign members.')
super(EnumMeta, cls).__setattr__(name, value)
def _create_(cls, class_name, names=None, module=None, type=None, start=1):
"""Convenience method to create a new Enum class.
`names` can be:
* A string containing member names, separated either with spaces or
commas. Values are auto-numbered from 1.
* An iterable of member names. Values are auto-numbered from 1.
* An iterable of (member name, value) pairs.
* A mapping of member name -> value.
"""
if pyver < 3.0:
# if class_name is unicode, attempt a conversion to ASCII
if isinstance(class_name, unicode):
try:
class_name = class_name.encode('ascii')
except UnicodeEncodeError:
raise TypeError('%r is not representable in ASCII' % class_name)
metacls = cls.__class__
if type is None:
bases = (cls, )
else:
bases = (type, cls)
classdict = metacls.__prepare__(class_name, bases)
_order_ = []
# special processing needed for names?
if isinstance(names, basestring):
names = names.replace(',', ' ').split()
if isinstance(names, (tuple, list)) and isinstance(names[0], basestring):
names = [(e, i+start) for (i, e) in enumerate(names)]
# Here, names is either an iterable of (name, value) or a mapping.
item = None # in case names is empty
for item in names:
if isinstance(item, basestring):
member_name, member_value = item, names[item]
else:
member_name, member_value = item
classdict[member_name] = member_value
_order_.append(member_name)
# only set _order_ in classdict if name/value was not from a mapping
if not isinstance(item, basestring):
classdict['_order_'] = ' '.join(_order_)
enum_class = metacls.__new__(metacls, class_name, bases, classdict)
# TODO: replace the frame hack if a blessed way to know the calling
# module is ever developed
if module is None:
try:
module = _sys._getframe(2).f_globals['__name__']
except (AttributeError, ValueError):
pass
if module is None:
_make_class_unpicklable(enum_class)
else:
enum_class.__module__ = module
return enum_class
@staticmethod
def _get_mixins_(bases):
"""Returns the type for creating enum members, and the first inherited
enum class.
bases: the tuple of bases that was given to __new__
"""
if not bases or Enum is None:
return object, Enum
# double check that we are not subclassing a class with existing
# enumeration members; while we're at it, see if any other data
# type has been mixed in so we can use the correct __new__
member_type = first_enum = None
for base in bases:
if (base is not Enum and
issubclass(base, Enum) and
base._member_names_):
raise TypeError("Cannot extend enumerations")
# base is now the last base in bases
if not issubclass(base, Enum):
raise TypeError("new enumerations must be created as "
"`ClassName([mixin_type,] enum_type)`")
# get correct mix-in type (either mix-in type of Enum subclass, or
# first base if last base is Enum)
if not issubclass(bases[0], Enum):
member_type = bases[0] # first data type
first_enum = bases[-1] # enum type
else:
for base in bases[0].__mro__:
# most common: (IntEnum, int, Enum, object)
# possible: (<Enum 'AutoIntEnum'>, <Enum 'IntEnum'>,
# <class 'int'>, <Enum 'Enum'>,
# <class 'object'>)
if issubclass(base, Enum):
if first_enum is None:
first_enum = base
else:
if member_type is None:
member_type = base
return member_type, first_enum
if pyver < 3.0:
@staticmethod
def _find_new_(classdict, member_type, first_enum):
"""Returns the __new__ to be used for creating the enum members.
classdict: the class dictionary given to __new__
member_type: the data type whose __new__ will be used by default
first_enum: enumeration to check for an overriding __new__
"""
# now find the correct __new__, checking to see of one was defined
# by the user; also check earlier enum classes in case a __new__ was
# saved as __member_new__
__new__ = classdict.get('__new__', None)
if __new__:
return None, True, True # __new__, save_new, use_args
N__new__ = getattr(None, '__new__')
O__new__ = getattr(object, '__new__')
if Enum is None:
E__new__ = N__new__
else:
E__new__ = Enum.__dict__['__new__']
# check all possibles for __member_new__ before falling back to
# __new__
for method in ('__member_new__', '__new__'):
for possible in (member_type, first_enum):
try:
target = possible.__dict__[method]
except (AttributeError, KeyError):
target = getattr(possible, method, None)
if target not in [
None,
N__new__,
O__new__,
E__new__,
]:
if method == '__member_new__':
classdict['__new__'] = target
return None, False, True
if isinstance(target, staticmethod):
target = target.__get__(member_type)
__new__ = target
break
if __new__ is not None:
break
else:
__new__ = object.__new__
# if a non-object.__new__ is used then whatever value/tuple was
# assigned to the enum member name will be passed to __new__ and to the
# new enum member's __init__
if __new__ is object.__new__:
use_args = False
else:
use_args = True
return __new__, False, use_args
else:
@staticmethod
def _find_new_(classdict, member_type, first_enum):
"""Returns the __new__ to be used for creating the enum members.
classdict: the class dictionary given to __new__
member_type: the data type whose __new__ will be used by default
first_enum: enumeration to check for an overriding __new__
"""
# now find the correct __new__, checking to see of one was defined
# by the user; also check earlier enum classes in case a __new__ was
# saved as __member_new__
__new__ = classdict.get('__new__', None)
# should __new__ be saved as __member_new__ later?
save_new = __new__ is not None
if __new__ is None:
# check all possibles for __member_new__ before falling back to
# __new__
for method in ('__member_new__', '__new__'):
for possible in (member_type, first_enum):
target = getattr(possible, method, None)
if target not in (
None,
None.__new__,
object.__new__,
Enum.__new__,
):
__new__ = target
break
if __new__ is not None:
break
else:
__new__ = object.__new__
# if a non-object.__new__ is used then whatever value/tuple was
# assigned to the enum member name will be passed to __new__ and to the
# new enum member's __init__
if __new__ is object.__new__:
use_args = False
else:
use_args = True
return __new__, save_new, use_args
########################################################
# In order to support Python 2 and 3 with a single
# codebase we have to create the Enum methods separately
# and then use the `type(name, bases, dict)` method to
# create the class.
########################################################
temp_enum_dict = {}
temp_enum_dict['__doc__'] = "Generic enumeration.\n\n Derive from this class to define new enumerations.\n\n"
def __new__(cls, value):
# all enum instances are actually created during class construction
# without calling this method; this method is called by the metaclass'
# __call__ (i.e. Color(3) ), and by pickle
if type(value) is cls:
# For lookups like Color(Color.red)
value = value.value
#return value
# by-value search for a matching enum member
# see if it's in the reverse mapping (for hashable values)
try:
if value in cls._value2member_map_:
return cls._value2member_map_[value]
except TypeError:
# not there, now do long search -- O(n) behavior
for member in cls._member_map_.values():
if member.value == value:
return member
raise ValueError("%s is not a valid %s" % (value, cls.__name__))
temp_enum_dict['__new__'] = __new__
del __new__
def __repr__(self):
return "<%s.%s: %r>" % (
self.__class__.__name__, self._name_, self._value_)
temp_enum_dict['__repr__'] = __repr__
del __repr__
def __str__(self):
return "%s.%s" % (self.__class__.__name__, self._name_)
temp_enum_dict['__str__'] = __str__
del __str__
if pyver >= 3.0:
def __dir__(self):
added_behavior = [
m
for cls in self.__class__.mro()
for m in cls.__dict__
if m[0] != '_' and m not in self._member_map_
]
return (['__class__', '__doc__', '__module__', ] + added_behavior)
temp_enum_dict['__dir__'] = __dir__
del __dir__
def __format__(self, format_spec):
# mixed-in Enums should use the mixed-in type's __format__, otherwise
# we can get strange results with the Enum name showing up instead of
# the value
# pure Enum branch
if self._member_type_ is object:
cls = str
val = str(self)
# mix-in branch
else:
cls = self._member_type_
val = self.value
return cls.__format__(val, format_spec)
temp_enum_dict['__format__'] = __format__
del __format__
####################################
# Python's less than 2.6 use __cmp__
if pyver < 2.6:
def __cmp__(self, other):
if type(other) is self.__class__:
if self is other:
return 0
return -1
return NotImplemented
raise TypeError("unorderable types: %s() and %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__cmp__'] = __cmp__
del __cmp__
else:
def __le__(self, other):
raise TypeError("unorderable types: %s() <= %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__le__'] = __le__
del __le__
def __lt__(self, other):
raise TypeError("unorderable types: %s() < %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__lt__'] = __lt__
del __lt__
def __ge__(self, other):
raise TypeError("unorderable types: %s() >= %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__ge__'] = __ge__
del __ge__
def __gt__(self, other):
raise TypeError("unorderable types: %s() > %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__gt__'] = __gt__
del __gt__
def __eq__(self, other):
if type(other) is self.__class__:
return self is other
return NotImplemented
temp_enum_dict['__eq__'] = __eq__
del __eq__
def __ne__(self, other):
if type(other) is self.__class__:
return self is not other
return NotImplemented
temp_enum_dict['__ne__'] = __ne__
del __ne__
def __hash__(self):
return hash(self._name_)
temp_enum_dict['__hash__'] = __hash__
del __hash__
def __reduce_ex__(self, proto):
return self.__class__, (self._value_, )
temp_enum_dict['__reduce_ex__'] = __reduce_ex__
del __reduce_ex__
# _RouteClassAttributeToGetattr is used to provide access to the `name`
# and `value` properties of enum members while keeping some measure of
# protection from modification, while still allowing for an enumeration
# to have members named `name` and `value`. This works because enumeration
# members are not set directly on the enum class -- __getattr__ is
# used to look them up.
@_RouteClassAttributeToGetattr
def name(self):
return self._name_
temp_enum_dict['name'] = name
del name
@_RouteClassAttributeToGetattr
def value(self):
return self._value_
temp_enum_dict['value'] = value
del value
@classmethod
def _convert(cls, name, module, filter, source=None):
"""
Create a new Enum subclass that replaces a collection of global constants
"""
# convert all constants from source (or module) that pass filter() to
# a new Enum called name, and export the enum and its members back to
# module;
# also, replace the __reduce_ex__ method so unpickling works in
# previous Python versions
module_globals = vars(_sys.modules[module])
if source:
source = vars(source)
else:
source = module_globals
members = dict((name, value) for name, value in source.items() if filter(name))
cls = cls(name, members, module=module)
cls.__reduce_ex__ = _reduce_ex_by_name
module_globals.update(cls.__members__)
module_globals[name] = cls
return cls
temp_enum_dict['_convert'] = _convert
del _convert
Enum = EnumMeta('Enum', (object, ), temp_enum_dict)
del temp_enum_dict
# Enum has now been created
###########################
class IntEnum(int, Enum):
"""Enum where members are also (and must be) ints"""
def _reduce_ex_by_name(self, proto):
return self.name
def unique(enumeration):
"""Class decorator that ensures only unique members exist in an enumeration."""
duplicates = []
for name, member in enumeration.__members__.items():
if name != member.name:
duplicates.append((name, member.name))
if duplicates:
duplicate_names = ', '.join(
["%s -> %s" % (alias, name) for (alias, name) in duplicates]
)
raise ValueError('duplicate names found in %r: %s' %
(enumeration, duplicate_names)
)
return enumeration

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,735 +0,0 @@
``enum`` --- support for enumerations
========================================
.. :synopsis: enumerations are sets of symbolic names bound to unique, constant
values.
.. :moduleauthor:: Ethan Furman <ethan@stoneleaf.us>
.. :sectionauthor:: Barry Warsaw <barry@python.org>,
.. :sectionauthor:: Eli Bendersky <eliben@gmail.com>,
.. :sectionauthor:: Ethan Furman <ethan@stoneleaf.us>
----------------
An enumeration is a set of symbolic names (members) bound to unique, constant
values. Within an enumeration, the members can be compared by identity, and
the enumeration itself can be iterated over.
Module Contents
---------------
This module defines two enumeration classes that can be used to define unique
sets of names and values: ``Enum`` and ``IntEnum``. It also defines
one decorator, ``unique``.
``Enum``
Base class for creating enumerated constants. See section `Functional API`_
for an alternate construction syntax.
``IntEnum``
Base class for creating enumerated constants that are also subclasses of ``int``.
``unique``
Enum class decorator that ensures only one name is bound to any one value.
Creating an Enum
----------------
Enumerations are created using the ``class`` syntax, which makes them
easy to read and write. An alternative creation method is described in
`Functional API`_. To define an enumeration, subclass ``Enum`` as
follows::
>>> from enum import Enum
>>> class Color(Enum):
... red = 1
... green = 2
... blue = 3
Note: Nomenclature
- The class ``Color`` is an *enumeration* (or *enum*)
- The attributes ``Color.red``, ``Color.green``, etc., are
*enumeration members* (or *enum members*).
- The enum members have *names* and *values* (the name of
``Color.red`` is ``red``, the value of ``Color.blue`` is
``3``, etc.)
Note:
Even though we use the ``class`` syntax to create Enums, Enums
are not normal Python classes. See `How are Enums different?`_ for
more details.
Enumeration members have human readable string representations::
>>> print(Color.red)
Color.red
...while their ``repr`` has more information::
>>> print(repr(Color.red))
<Color.red: 1>
The *type* of an enumeration member is the enumeration it belongs to::
>>> type(Color.red)
<enum 'Color'>
>>> isinstance(Color.green, Color)
True
>>>
Enum members also have a property that contains just their item name::
>>> print(Color.red.name)
red
Enumerations support iteration. In Python 3.x definition order is used; in
Python 2.x the definition order is not available, but class attribute
``__order__`` is supported; otherwise, value order is used::
>>> class Shake(Enum):
... __order__ = 'vanilla chocolate cookies mint' # only needed in 2.x
... vanilla = 7
... chocolate = 4
... cookies = 9
... mint = 3
...
>>> for shake in Shake:
... print(shake)
...
Shake.vanilla
Shake.chocolate
Shake.cookies
Shake.mint
The ``__order__`` attribute is always removed, and in 3.x it is also ignored
(order is definition order); however, in the stdlib version it will be ignored
but not removed.
Enumeration members are hashable, so they can be used in dictionaries and sets::
>>> apples = {}
>>> apples[Color.red] = 'red delicious'
>>> apples[Color.green] = 'granny smith'
>>> apples == {Color.red: 'red delicious', Color.green: 'granny smith'}
True
Programmatic access to enumeration members and their attributes
---------------------------------------------------------------
Sometimes it's useful to access members in enumerations programmatically (i.e.
situations where ``Color.red`` won't do because the exact color is not known
at program-writing time). ``Enum`` allows such access::
>>> Color(1)
<Color.red: 1>
>>> Color(3)
<Color.blue: 3>
If you want to access enum members by *name*, use item access::
>>> Color['red']
<Color.red: 1>
>>> Color['green']
<Color.green: 2>
If have an enum member and need its ``name`` or ``value``::
>>> member = Color.red
>>> member.name
'red'
>>> member.value
1
Duplicating enum members and values
-----------------------------------
Having two enum members (or any other attribute) with the same name is invalid;
in Python 3.x this would raise an error, but in Python 2.x the second member
simply overwrites the first::
>>> # python 2.x
>>> class Shape(Enum):
... square = 2
... square = 3
...
>>> Shape.square
<Shape.square: 3>
>>> # python 3.x
>>> class Shape(Enum):
... square = 2
... square = 3
Traceback (most recent call last):
...
TypeError: Attempted to reuse key: 'square'
However, two enum members are allowed to have the same value. Given two members
A and B with the same value (and A defined first), B is an alias to A. By-value
lookup of the value of A and B will return A. By-name lookup of B will also
return A::
>>> class Shape(Enum):
... __order__ = 'square diamond circle alias_for_square' # only needed in 2.x
... square = 2
... diamond = 1
... circle = 3
... alias_for_square = 2
...
>>> Shape.square
<Shape.square: 2>
>>> Shape.alias_for_square
<Shape.square: 2>
>>> Shape(2)
<Shape.square: 2>
Allowing aliases is not always desirable. ``unique`` can be used to ensure
that none exist in a particular enumeration::
>>> from enum import unique
>>> @unique
... class Mistake(Enum):
... __order__ = 'one two three four' # only needed in 2.x
... one = 1
... two = 2
... three = 3
... four = 3
Traceback (most recent call last):
...
ValueError: duplicate names found in <enum 'Mistake'>: four -> three
Iterating over the members of an enum does not provide the aliases::
>>> list(Shape)
[<Shape.square: 2>, <Shape.diamond: 1>, <Shape.circle: 3>]
The special attribute ``__members__`` is a dictionary mapping names to members.
It includes all names defined in the enumeration, including the aliases::
>>> for name, member in sorted(Shape.__members__.items()):
... name, member
...
('alias_for_square', <Shape.square: 2>)
('circle', <Shape.circle: 3>)
('diamond', <Shape.diamond: 1>)
('square', <Shape.square: 2>)
The ``__members__`` attribute can be used for detailed programmatic access to
the enumeration members. For example, finding all the aliases::
>>> [name for name, member in Shape.__members__.items() if member.name != name]
['alias_for_square']
Comparisons
-----------
Enumeration members are compared by identity::
>>> Color.red is Color.red
True
>>> Color.red is Color.blue
False
>>> Color.red is not Color.blue
True
Ordered comparisons between enumeration values are *not* supported. Enum
members are not integers (but see `IntEnum`_ below)::
>>> Color.red < Color.blue
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: unorderable types: Color() < Color()
.. warning::
In Python 2 *everything* is ordered, even though the ordering may not
make sense. If you want your enumerations to have a sensible ordering
check out the `OrderedEnum`_ recipe below.
Equality comparisons are defined though::
>>> Color.blue == Color.red
False
>>> Color.blue != Color.red
True
>>> Color.blue == Color.blue
True
Comparisons against non-enumeration values will always compare not equal
(again, ``IntEnum`` was explicitly designed to behave differently, see
below)::
>>> Color.blue == 2
False
Allowed members and attributes of enumerations
----------------------------------------------
The examples above use integers for enumeration values. Using integers is
short and handy (and provided by default by the `Functional API`_), but not
strictly enforced. In the vast majority of use-cases, one doesn't care what
the actual value of an enumeration is. But if the value *is* important,
enumerations can have arbitrary values.
Enumerations are Python classes, and can have methods and special methods as
usual. If we have this enumeration::
>>> class Mood(Enum):
... funky = 1
... happy = 3
...
... def describe(self):
... # self is the member here
... return self.name, self.value
...
... def __str__(self):
... return 'my custom str! {0}'.format(self.value)
...
... @classmethod
... def favorite_mood(cls):
... # cls here is the enumeration
... return cls.happy
Then::
>>> Mood.favorite_mood()
<Mood.happy: 3>
>>> Mood.happy.describe()
('happy', 3)
>>> str(Mood.funky)
'my custom str! 1'
The rules for what is allowed are as follows: _sunder_ names (starting and
ending with a single underscore) are reserved by enum and cannot be used;
all other attributes defined within an enumeration will become members of this
enumeration, with the exception of *__dunder__* names and descriptors (methods
are also descriptors).
Note:
If your enumeration defines ``__new__`` and/or ``__init__`` then
whatever value(s) were given to the enum member will be passed into
those methods. See `Planet`_ for an example.
Restricted subclassing of enumerations
--------------------------------------
Subclassing an enumeration is allowed only if the enumeration does not define
any members. So this is forbidden::
>>> class MoreColor(Color):
... pink = 17
Traceback (most recent call last):
...
TypeError: Cannot extend enumerations
But this is allowed::
>>> class Foo(Enum):
... def some_behavior(self):
... pass
...
>>> class Bar(Foo):
... happy = 1
... sad = 2
...
Allowing subclassing of enums that define members would lead to a violation of
some important invariants of types and instances. On the other hand, it makes
sense to allow sharing some common behavior between a group of enumerations.
(See `OrderedEnum`_ for an example.)
Pickling
--------
Enumerations can be pickled and unpickled::
>>> from enum.test_enum import Fruit
>>> from pickle import dumps, loads
>>> Fruit.tomato is loads(dumps(Fruit.tomato, 2))
True
The usual restrictions for pickling apply: picklable enums must be defined in
the top level of a module, since unpickling requires them to be importable
from that module.
Note:
With pickle protocol version 4 (introduced in Python 3.4) it is possible
to easily pickle enums nested in other classes.
Functional API
--------------
The ``Enum`` class is callable, providing the following functional API::
>>> Animal = Enum('Animal', 'ant bee cat dog')
>>> Animal
<enum 'Animal'>
>>> Animal.ant
<Animal.ant: 1>
>>> Animal.ant.value
1
>>> list(Animal)
[<Animal.ant: 1>, <Animal.bee: 2>, <Animal.cat: 3>, <Animal.dog: 4>]
The semantics of this API resemble ``namedtuple``. The first argument
of the call to ``Enum`` is the name of the enumeration.
The second argument is the *source* of enumeration member names. It can be a
whitespace-separated string of names, a sequence of names, a sequence of
2-tuples with key/value pairs, or a mapping (e.g. dictionary) of names to
values. The last two options enable assigning arbitrary values to
enumerations; the others auto-assign increasing integers starting with 1. A
new class derived from ``Enum`` is returned. In other words, the above
assignment to ``Animal`` is equivalent to::
>>> class Animals(Enum):
... ant = 1
... bee = 2
... cat = 3
... dog = 4
Pickling enums created with the functional API can be tricky as frame stack
implementation details are used to try and figure out which module the
enumeration is being created in (e.g. it will fail if you use a utility
function in separate module, and also may not work on IronPython or Jython).
The solution is to specify the module name explicitly as follows::
>>> Animals = Enum('Animals', 'ant bee cat dog', module=__name__)
Derived Enumerations
--------------------
IntEnum
^^^^^^^
A variation of ``Enum`` is provided which is also a subclass of
``int``. Members of an ``IntEnum`` can be compared to integers;
by extension, integer enumerations of different types can also be compared
to each other::
>>> from enum import IntEnum
>>> class Shape(IntEnum):
... circle = 1
... square = 2
...
>>> class Request(IntEnum):
... post = 1
... get = 2
...
>>> Shape == 1
False
>>> Shape.circle == 1
True
>>> Shape.circle == Request.post
True
However, they still can't be compared to standard ``Enum`` enumerations::
>>> class Shape(IntEnum):
... circle = 1
... square = 2
...
>>> class Color(Enum):
... red = 1
... green = 2
...
>>> Shape.circle == Color.red
False
``IntEnum`` values behave like integers in other ways you'd expect::
>>> int(Shape.circle)
1
>>> ['a', 'b', 'c'][Shape.circle]
'b'
>>> [i for i in range(Shape.square)]
[0, 1]
For the vast majority of code, ``Enum`` is strongly recommended,
since ``IntEnum`` breaks some semantic promises of an enumeration (by
being comparable to integers, and thus by transitivity to other
unrelated enumerations). It should be used only in special cases where
there's no other choice; for example, when integer constants are
replaced with enumerations and backwards compatibility is required with code
that still expects integers.
Others
^^^^^^
While ``IntEnum`` is part of the ``enum`` module, it would be very
simple to implement independently::
class IntEnum(int, Enum):
pass
This demonstrates how similar derived enumerations can be defined; for example
a ``StrEnum`` that mixes in ``str`` instead of ``int``.
Some rules:
1. When subclassing ``Enum``, mix-in types must appear before
``Enum`` itself in the sequence of bases, as in the ``IntEnum``
example above.
2. While ``Enum`` can have members of any type, once you mix in an
additional type, all the members must have values of that type, e.g.
``int`` above. This restriction does not apply to mix-ins which only
add methods and don't specify another data type such as ``int`` or
``str``.
3. When another data type is mixed in, the ``value`` attribute is *not the
same* as the enum member itself, although it is equivalant and will compare
equal.
4. %-style formatting: ``%s`` and ``%r`` call ``Enum``'s ``__str__`` and
``__repr__`` respectively; other codes (such as ``%i`` or ``%h`` for
IntEnum) treat the enum member as its mixed-in type.
Note: Prior to Python 3.4 there is a bug in ``str``'s %-formatting: ``int``
subclasses are printed as strings and not numbers when the ``%d``, ``%i``,
or ``%u`` codes are used.
5. ``str.__format__`` (or ``format``) will use the mixed-in
type's ``__format__``. If the ``Enum``'s ``str`` or
``repr`` is desired use the ``!s`` or ``!r`` ``str`` format codes.
Decorators
----------
unique
^^^^^^
A ``class`` decorator specifically for enumerations. It searches an
enumeration's ``__members__`` gathering any aliases it finds; if any are
found ``ValueError`` is raised with the details::
>>> @unique
... class NoDupes(Enum):
... first = 'one'
... second = 'two'
... third = 'two'
Traceback (most recent call last):
...
ValueError: duplicate names found in <enum 'NoDupes'>: third -> second
Interesting examples
--------------------
While ``Enum`` and ``IntEnum`` are expected to cover the majority of
use-cases, they cannot cover them all. Here are recipes for some different
types of enumerations that can be used directly, or as examples for creating
one's own.
AutoNumber
^^^^^^^^^^
Avoids having to specify the value for each enumeration member::
>>> class AutoNumber(Enum):
... def __new__(cls):
... value = len(cls.__members__) + 1
... obj = object.__new__(cls)
... obj._value_ = value
... return obj
...
>>> class Color(AutoNumber):
... __order__ = "red green blue" # only needed in 2.x
... red = ()
... green = ()
... blue = ()
...
>>> Color.green.value == 2
True
Note:
The `__new__` method, if defined, is used during creation of the Enum
members; it is then replaced by Enum's `__new__` which is used after
class creation for lookup of existing members. Due to the way Enums are
supposed to behave, there is no way to customize Enum's `__new__`.
UniqueEnum
^^^^^^^^^^
Raises an error if a duplicate member name is found instead of creating an
alias::
>>> class UniqueEnum(Enum):
... def __init__(self, *args):
... cls = self.__class__
... if any(self.value == e.value for e in cls):
... a = self.name
... e = cls(self.value).name
... raise ValueError(
... "aliases not allowed in UniqueEnum: %r --> %r"
... % (a, e))
...
>>> class Color(UniqueEnum):
... red = 1
... green = 2
... blue = 3
... grene = 2
Traceback (most recent call last):
...
ValueError: aliases not allowed in UniqueEnum: 'grene' --> 'green'
OrderedEnum
^^^^^^^^^^^
An ordered enumeration that is not based on ``IntEnum`` and so maintains
the normal ``Enum`` invariants (such as not being comparable to other
enumerations)::
>>> class OrderedEnum(Enum):
... def __ge__(self, other):
... if self.__class__ is other.__class__:
... return self._value_ >= other._value_
... return NotImplemented
... def __gt__(self, other):
... if self.__class__ is other.__class__:
... return self._value_ > other._value_
... return NotImplemented
... def __le__(self, other):
... if self.__class__ is other.__class__:
... return self._value_ <= other._value_
... return NotImplemented
... def __lt__(self, other):
... if self.__class__ is other.__class__:
... return self._value_ < other._value_
... return NotImplemented
...
>>> class Grade(OrderedEnum):
... __ordered__ = 'A B C D F'
... A = 5
... B = 4
... C = 3
... D = 2
... F = 1
...
>>> Grade.C < Grade.A
True
Planet
^^^^^^
If ``__new__`` or ``__init__`` is defined the value of the enum member
will be passed to those methods::
>>> class Planet(Enum):
... MERCURY = (3.303e+23, 2.4397e6)
... VENUS = (4.869e+24, 6.0518e6)
... EARTH = (5.976e+24, 6.37814e6)
... MARS = (6.421e+23, 3.3972e6)
... JUPITER = (1.9e+27, 7.1492e7)
... SATURN = (5.688e+26, 6.0268e7)
... URANUS = (8.686e+25, 2.5559e7)
... NEPTUNE = (1.024e+26, 2.4746e7)
... def __init__(self, mass, radius):
... self.mass = mass # in kilograms
... self.radius = radius # in meters
... @property
... def surface_gravity(self):
... # universal gravitational constant (m3 kg-1 s-2)
... G = 6.67300E-11
... return G * self.mass / (self.radius * self.radius)
...
>>> Planet.EARTH.value
(5.976e+24, 6378140.0)
>>> Planet.EARTH.surface_gravity
9.802652743337129
How are Enums different?
------------------------
Enums have a custom metaclass that affects many aspects of both derived Enum
classes and their instances (members).
Enum Classes
^^^^^^^^^^^^
The ``EnumMeta`` metaclass is responsible for providing the
``__contains__``, ``__dir__``, ``__iter__`` and other methods that
allow one to do things with an ``Enum`` class that fail on a typical
class, such as ``list(Color)`` or ``some_var in Color``. ``EnumMeta`` is
responsible for ensuring that various other methods on the final ``Enum``
class are correct (such as ``__new__``, ``__getnewargs__``,
``__str__`` and ``__repr__``).
.. note::
``__dir__`` is not changed in the Python 2 line as it messes up some
of the decorators included in the stdlib.
Enum Members (aka instances)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The most interesting thing about Enum members is that they are singletons.
``EnumMeta`` creates them all while it is creating the ``Enum``
class itself, and then puts a custom ``__new__`` in place to ensure
that no new ones are ever instantiated by returning only the existing
member instances.
Finer Points
^^^^^^^^^^^^
``Enum`` members are instances of an ``Enum`` class, and even though they
are accessible as `EnumClass.member1.member2`, they should not be
accessed directly from the member as that lookup may fail or, worse,
return something besides the ``Enum`` member you were looking for
(changed in version 1.1.1)::
>>> class FieldTypes(Enum):
... name = 1
... value = 2
... size = 3
...
>>> FieldTypes.value.size
<FieldTypes.size: 3>
>>> FieldTypes.size.value
3
The ``__members__`` attribute is only available on the class.
In Python 3.x ``__members__`` is always an ``OrderedDict``, with the order being
the definition order. In Python 2.7 ``__members__`` is an ``OrderedDict`` if
``__order__`` was specified, and a plain ``dict`` otherwise. In all other Python
2.x versions ``__members__`` is a plain ``dict`` even if ``__order__`` was specified
as the ``OrderedDict`` type didn't exist yet.
If you give your ``Enum`` subclass extra methods, like the `Planet`_
class above, those methods will show up in a `dir` of the member,
but not of the class::
>>> dir(Planet)
['EARTH', 'JUPITER', 'MARS', 'MERCURY', 'NEPTUNE', 'SATURN', 'URANUS',
'VENUS', '__class__', '__doc__', '__members__', '__module__']
>>> dir(Planet.EARTH)
['__class__', '__doc__', '__module__', 'name', 'surface_gravity', 'value']
A ``__new__`` method will only be used for the creation of the
``Enum`` members -- after that it is replaced. This means if you wish to
change how ``Enum`` members are looked up you either have to write a
helper function or a ``classmethod``.

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,62 +0,0 @@
Metadata-Version: 1.1
Name: enum34
Version: 1.1.6
Summary: Python 3.4 Enum backported to 3.3, 3.2, 3.1, 2.7, 2.6, 2.5, and 2.4
Home-page: https://bitbucket.org/stoneleaf/enum34
Author: Ethan Furman
Author-email: ethan@stoneleaf.us
License: BSD License
Description: enum --- support for enumerations
========================================
An enumeration is a set of symbolic names (members) bound to unique, constant
values. Within an enumeration, the members can be compared by identity, and
the enumeration itself can be iterated over.
from enum import Enum
class Fruit(Enum):
apple = 1
banana = 2
orange = 3
list(Fruit)
# [<Fruit.apple: 1>, <Fruit.banana: 2>, <Fruit.orange: 3>]
len(Fruit)
# 3
Fruit.banana
# <Fruit.banana: 2>
Fruit['banana']
# <Fruit.banana: 2>
Fruit(2)
# <Fruit.banana: 2>
Fruit.banana is Fruit['banana'] is Fruit(2)
# True
Fruit.banana.name
# 'banana'
Fruit.banana.value
# 2
Repository and Issue Tracker at https://bitbucket.org/stoneleaf/enum34.
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: BSD License
Classifier: Programming Language :: Python
Classifier: Topic :: Software Development
Classifier: Programming Language :: Python :: 2.4
Classifier: Programming Language :: Python :: 2.5
Classifier: Programming Language :: Python :: 2.6
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3.3
Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
Provides: enum

Просмотреть файл

@ -1,13 +0,0 @@
MANIFEST.in
README
setup.py
enum/LICENSE
enum/README
enum/__init__.py
enum/test.py
enum/doc/enum.pdf
enum/doc/enum.rst
enum34.egg-info/PKG-INFO
enum34.egg-info/SOURCES.txt
enum34.egg-info/dependency_links.txt
enum34.egg-info/top_level.txt

Просмотреть файл

@ -1 +0,0 @@

Просмотреть файл

@ -1 +0,0 @@
enum

Просмотреть файл

@ -1,5 +0,0 @@
[egg_info]
tag_build =
tag_date = 0
tag_svn_revision = 0

Просмотреть файл

@ -1,99 +0,0 @@
import os
import sys
import setuptools
from distutils.core import setup
if sys.version_info[:2] < (2, 7):
required = ['ordereddict']
else:
required = []
long_desc = '''\
enum --- support for enumerations
========================================
An enumeration is a set of symbolic names (members) bound to unique, constant
values. Within an enumeration, the members can be compared by identity, and
the enumeration itself can be iterated over.
from enum import Enum
class Fruit(Enum):
apple = 1
banana = 2
orange = 3
list(Fruit)
# [<Fruit.apple: 1>, <Fruit.banana: 2>, <Fruit.orange: 3>]
len(Fruit)
# 3
Fruit.banana
# <Fruit.banana: 2>
Fruit['banana']
# <Fruit.banana: 2>
Fruit(2)
# <Fruit.banana: 2>
Fruit.banana is Fruit['banana'] is Fruit(2)
# True
Fruit.banana.name
# 'banana'
Fruit.banana.value
# 2
Repository and Issue Tracker at https://bitbucket.org/stoneleaf/enum34.
'''
py2_only = ()
py3_only = ()
make = [
'rst2pdf enum/doc/enum.rst --output=enum/doc/enum.pdf',
]
data = dict(
name='enum34',
version='1.1.6',
url='https://bitbucket.org/stoneleaf/enum34',
packages=['enum'],
package_data={
'enum' : [
'LICENSE',
'README',
'doc/enum.rst',
'doc/enum.pdf',
'test.py',
]
},
license='BSD License',
description='Python 3.4 Enum backported to 3.3, 3.2, 3.1, 2.7, 2.6, 2.5, and 2.4',
long_description=long_desc,
provides=['enum'],
install_requires=required,
author='Ethan Furman',
author_email='ethan@stoneleaf.us',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Topic :: Software Development',
'Programming Language :: Python :: 2.4',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
if __name__ == '__main__':
setup(**data)

Просмотреть файл

@ -1,18 +0,0 @@
[run]
branch = True
source = h2
[report]
fail_under = 100
show_missing = True
exclude_lines =
pragma: no cover
.*:.* # Python \d.*
assert False, "Should not be reachable"
.*:.* # Platform-specific:
[paths]
source =
h2/
.tox/*/lib/python*/site-packages/h2
.tox/pypy*/site-packages/h2

Просмотреть файл

@ -1,11 +0,0 @@
build/
env/
dist/
*.egg-info/
*.pyc
__pycache__
.coverage
.tox/
.hypothesis/
.cache/
_trial_temp/

Просмотреть файл

@ -1,41 +0,0 @@
sudo: false
language: python
cache:
directories:
- $HOME/.cache/pip
- .hypothesis
matrix:
include:
# Main tests on supported Python versions.
- python: "2.7"
env: TOXENV=py27
- python: "3.3"
env: TOXENV=py33
- python: "3.4"
env: TOXENV=py34
- python: "3.5"
env: TOXENV=py35
- python: "3.6"
env: TOXENV=py36
- python: "pypy-5.3.1"
env: TOXENV=pypy
# Linting, docs, and other non-test stuff.
- python: "3.4"
env: TOXENV=lint
- python: "3.5"
env: TOXENV=docs
- python: "2.7"
env: TOXENV=packaging
# Test we haven't broken our major dependencies.
- python: "2.7"
env: TOXENV=py27-twistedMaster
install:
- "pip install -U pip setuptools"
- "pip install -U tox"
script:
- tox -- --hypothesis-profile travis

Просмотреть файл

@ -1,114 +0,0 @@
Hyper-h2 is written and maintained by Cory Benfield and various contributors:
Development Lead
````````````````
- Cory Benfield <cory@lukasa.co.uk>
Contributors
````````````
In chronological order:
- Robert Collins (@rbtcollins)
- Provided invaluable and substantial early input into API design and layout.
- Added code preventing ``Proxy-Authorization`` from getting added to HPACK
compression contexts.
- Maximilian Hils (@maximilianhils)
- Added asyncio example.
- Alex Chan (@alexwlchan)
- Fixed docstring, added URLs to README.
- Glyph Lefkowitz (@glyph)
- Improved example Twisted server.
- Thomas Kriechbaumer (@Kriechi)
- Fixed incorrect arguments being passed to ``StreamIDTooLowError``.
- Added new arguments to ``close_connection``.
- WeiZheng Xu (@boyxuper)
- Reported a bug relating to hyper-h2's updating of the connection window in
response to SETTINGS_INITIAL_WINDOW_SIZE.
- Evgeny Tataurov (@etataurov)
- Added the ``additional_data`` field to the ``ConnectionTerminated`` event.
- Brett Cannon (@brettcannon)
- Changed Travis status icon to SVG.
- Documentation improvements.
- Felix Yan (@felixonmars)
- Widened allowed version numbers of enum34.
- Updated test requirements.
- Keith Dart (@kdart)
- Fixed curio example server flow control problems.
- Gil Gonçalves (@LuRsT)
- Added code forbidding non-RFC 7540 pseudo-headers.
- Louis Taylor (@kragniz)
- Cleaned up the README
- Berker Peksag (@berkerpeksag)
- Improved the docstring for ``StreamIDTooLowError``.
- Adrian Lewis (@aidylewis)
- Fixed the broken Twisted HEAD request example.
- Added verification logic for ensuring that responses to HEAD requests have
no body.
- Lorenzo (@Mec-iS)
- Changed documentation to stop using dictionaries for header blocks.
- Kracekumar Ramaraj (@kracekumar)
- Cleaned up Twisted example.
- @mlvnd
- Cleaned up curio example.
- Tom Offermann (@toffer)
- Added Tornado example.
- Tarashish Mishra (@sunu)
- Added code to reject header fields with leading/trailing whitespace.
- Added code to remove leading/trailing whitespace from sent header fields.
- Nate Prewitt (@nateprewitt)
- Added code to validate that trailers do not contain pseudo-header fields.
- Chun-Han, Hsiao (@chhsiao90)
- Fixed a bug with invalid ``HTTP2-Settings`` header output in plaintext
upgrade.
- Bhavishya (@bhavishyagopesh)
- Added support for equality testing to ``h2.settings.Settings`` objects.
- Fred Thomsen (@fredthomsen)
- Added logging.

Просмотреть файл

@ -1,701 +0,0 @@
Release History
===============
3.0.1 (2017-04-03)
------------------
Bugfixes
~~~~~~~~
- CONTINUATION frames sent on closed streams previously caused stream errors
of type STREAM_CLOSED. RFC 7540 § 6.10 requires that these be connection
errors of type PROTOCOL_ERROR, and so this release changes to match that
behaviour.
- Remote peers incrementing their inbound connection window beyond the maximum
allowed value now cause stream-level errors, rather than connection-level
errors, allowing connections to stay up longer.
- h2 now rejects receiving and sending request header blocks that are missing
any of the mandatory pseudo-header fields (:path, :scheme, and :method).
- h2 now rejects receiving and sending request header blocks that have an empty
:path pseudo-header value.
- h2 now rejects receiving and sending request header blocks that contain
response-only pseudo-headers, and vice versa.
- h2 now correct respects user-initiated changes to the HEADER_TABLE_SIZE
local setting, and ensures that if users shrink or increase the header
table size it is policed appropriately.
2.6.2 (2017-04-03)
------------------
Bugfixes
~~~~~~~~
- CONTINUATION frames sent on closed streams previously caused stream errors
of type STREAM_CLOSED. RFC 7540 § 6.10 requires that these be connection
errors of type PROTOCOL_ERROR, and so this release changes to match that
behaviour.
- Remote peers incrementing their inbound connection window beyond the maximum
allowed value now cause stream-level errors, rather than connection-level
errors, allowing connections to stay up longer.
- h2 now rejects receiving and sending request header blocks that are missing
any of the mandatory pseudo-header fields (:path, :scheme, and :method).
- h2 now rejects receiving and sending request header blocks that have an empty
:path pseudo-header value.
- h2 now rejects receiving and sending request header blocks that contain
response-only pseudo-headers, and vice versa.
- h2 now correct respects user-initiated changes to the HEADER_TABLE_SIZE
local setting, and ensures that if users shrink or increase the header
table size it is policed appropriately.
2.5.4 (2017-04-03)
------------------
Bugfixes
~~~~~~~~
- CONTINUATION frames sent on closed streams previously caused stream errors
of type STREAM_CLOSED. RFC 7540 § 6.10 requires that these be connection
errors of type PROTOCOL_ERROR, and so this release changes to match that
behaviour.
- Remote peers incrementing their inbound connection window beyond the maximum
allowed value now cause stream-level errors, rather than connection-level
errors, allowing connections to stay up longer.
- h2 now correct respects user-initiated changes to the HEADER_TABLE_SIZE
local setting, and ensures that if users shrink or increase the header
table size it is policed appropriately.
3.0.0 (2017-03-24)
------------------
API Changes (Backward-Incompatible)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- By default, hyper-h2 now joins together received cookie header fields, per
RFC 7540 Section 8.1.2.5.
- Added a ``normalize_inbound_headers`` flag to the ``H2Configuration`` object
that defaults to ``True``. Setting this to ``False`` changes the behaviour
from the previous point back to the v2 behaviour.
- Removed deprecated fields from ``h2.errors`` module.
- Removed deprecated fields from ``h2.settings`` module.
- Removed deprecated ``client_side`` and ``header_encoding`` arguments from
``H2Connection``.
- Removed deprecated ``client_side`` and ``header_encoding`` properties from
``H2Connection``.
- ``dict`` objects are no longer allowed for user-supplied headers.
- The default header encoding is now ``None``, not ``utf-8``: this means that
all events that carry headers now return those headers as byte strings by
default. The header encoding can be set back to ``utf-8`` to restore the old
behaviour.
API Changes (Backward-Compatible)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- Added new ``UnknownFrameReceived`` event that fires when unknown extension
frames have been received. This only fires when using hyperframe 5.0 or
later: earlier versions of hyperframe cause us to silently ignore extension
frames.
Bugfixes
~~~~~~~~
None
2.6.1 (2017-03-16)
------------------
Bugfixes
~~~~~~~~
- Allowed hyperframe v5 support while continuing to ignore unexpected frames.
2.5.3 (2017-03-16)
------------------
Bugfixes
~~~~~~~~
- Allowed hyperframe v5 support while continuing to ignore unexpected frames.
2.4.4 (2017-03-16)
------------------
Bugfixes
~~~~~~~~
- Allowed hyperframe v5 support while continuing to ignore unexpected frames.
2.6.0 (2017-02-28)
------------------
API Changes (Backward-Compatible)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- Added a new ``h2.events.Event`` class that acts as a base class for all
events.
- Rather than reject outbound Connection-specific headers, h2 will now
normalize the header block by removing them.
- Implement equality for the ``h2.settings.Settings`` class.
- Added ``h2.settings.SettingCodes``, an enum that is used to store all the
HTTP/2 setting codes. This allows us to use a better printed representation of
the setting code in most places that it is used.
- The ``setting`` field in ``ChangedSetting`` for the ``RemoteSettingsChanged``
and ``SettingsAcknowledged`` events has been updated to be instances of
``SettingCodes`` whenever they correspond to a known setting code. When they
are an unknown setting code, they are instead ``int``. As ``SettingCodes`` is
a subclass of ``int``, this is non-breaking.
- Deprecated the other fields in ``h2.settings``. These will be removed in
3.0.0.
- Added an optional ``pad_length`` parameter to ``H2Connection.send_data``
to allow the user to include padding on a data frame.
- Added a new parameter to the ``h2.config.H2Configuration`` initializer which
takes a logger. This allows us to log by providing a logger that conforms
to the requirements of this module so that it can be used in different
environments.
Bugfixes
~~~~~~~~
- Correctly reject pushed request header blocks whenever they have malformed
request header blocks.
- Correctly normalize pushed request header blocks whenever they have
normalizable header fields.
- Remote peers are now allowed to send zero or any positive number as a value
for ``SETTINGS_MAX_HEADER_LIST_SIZE``, where previously sending zero would
raise a ``InvalidSettingsValueError``.
- Resolved issue where the ``HTTP2-Settings`` header value for plaintext
upgrade that was emitted by ``initiate_upgrade_connection`` included the
*entire* ``SETTINGS`` frame, instead of just the payload.
- Resolved issue where the ``HTTP2-Settings`` header value sent by a client for
plaintext upgrade would be ignored by ``initiate_upgrade_connection``, rather
than have those settings applied appropriately.
- Resolved an issue whereby certain frames received from a peer in the CLOSED
state would trigger connection errors when RFC 7540 says they should have
triggered stream errors instead. Added more detailed stream closure tracking
to ensure we don't throw away connections unnecessarily.
2.5.2 (2017-01-27)
------------------
- Resolved issue where the ``HTTP2-Settings`` header value for plaintext
upgrade that was emitted by ``initiate_upgrade_connection`` included the
*entire* ``SETTINGS`` frame, instead of just the payload.
- Resolved issue where the ``HTTP2-Settings`` header value sent by a client for
plaintext upgrade would be ignored by ``initiate_upgrade_connection``, rather
than have those settings applied appropriately.
2.4.3 (2017-01-27)
------------------
- Resolved issue where the ``HTTP2-Settings`` header value for plaintext
upgrade that was emitted by ``initiate_upgrade_connection`` included the
*entire* ``SETTINGS`` frame, instead of just the payload.
- Resolved issue where the ``HTTP2-Settings`` header value sent by a client for
plaintext upgrade would be ignored by ``initiate_upgrade_connection``, rather
than have those settings applied appropriately.
2.3.4 (2017-01-27)
------------------
- Resolved issue where the ``HTTP2-Settings`` header value for plaintext
upgrade that was emitted by ``initiate_upgrade_connection`` included the
*entire* ``SETTINGS`` frame, instead of just the payload.
- Resolved issue where the ``HTTP2-Settings`` header value sent by a client for
plaintext upgrade would be ignored by ``initiate_upgrade_connection``, rather
than have those settings applied appropriately.
2.5.1 (2016-12-17)
------------------
Bugfixes
~~~~~~~~
- Remote peers are now allowed to send zero or any positive number as a value
for ``SETTINGS_MAX_HEADER_LIST_SIZE``, where previously sending zero would
raise a ``InvalidSettingsValueError``.
2.5.0 (2016-10-25)
------------------
API Changes (Backward-Compatible)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- Added a new ``H2Configuration`` object that allows rich configuration of
a ``H2Connection``. This object supersedes the prior keyword arguments to the
``H2Connection`` object, which are now deprecated and will be removed in 3.0.
- Added support for automated window management via the
``acknowledge_received_data`` method. See the documentation for more details.
- Added a ``DenialOfServiceError`` that is raised whenever a behaviour that
looks like a DoS attempt is encountered: for example, an overly large
decompressed header list. This is a subclass of ``ProtocolError``.
- Added support for setting and managing ``SETTINGS_MAX_HEADER_LIST_SIZE``.
This setting is now defaulted to 64kB.
- Added ``h2.errors.ErrorCodes``, an enum that is used to store all the HTTP/2
error codes. This allows us to use a better printed representation of the
error code in most places that it is used.
- The ``error_code`` fields on ``ConnectionTerminated`` and ``StreamReset``
events have been updated to be instances of ``ErrorCodes`` whenever they
correspond to a known error code. When they are an unknown error code, they
are instead ``int``. As ``ErrorCodes`` is a subclass of ``int``, this is
non-breaking.
- Deprecated the other fields in ``h2.errors``. These will be removed in 3.0.0.
Bugfixes
~~~~~~~~
- Correctly reject request header blocks with neither :authority nor Host
headers, or header blocks which contain mismatched :authority and Host
headers, per RFC 7540 Section 8.1.2.3.
- Correctly expect that responses to HEAD requests will have no body regardless
of the value of the Content-Length header, and reject those that do.
- Correctly refuse to send header blocks that contain neither :authority nor
Host headers, or header blocks which contain mismatched :authority and Host
headers, per RFC 7540 Section 8.1.2.3.
- Hyper-h2 will now reject header field names and values that contain leading
or trailing whitespace.
- Correctly strip leading/trailing whitespace from header field names and
values.
- Correctly refuse to send header blocks with a TE header whose value is not
``trailers``, per RFC 7540 Section 8.1.2.2.
- Correctly refuse to send header blocks with connection-specific headers,
per RFC 7540 Section 8.1.2.2.
- Correctly refuse to send header blocks that contain duplicate pseudo-header
fields, or with pseudo-header fields that appear after ordinary header fields,
per RFC 7540 Section 8.1.2.1.
This may cause passing a dictionary as the header block to ``send_headers``
to throw a ``ProtocolError``, because dictionaries are unordered and so they
may trip this check. Passing dictionaries here is deprecated, and callers
should change to using a sequence of 2-tuples as their header blocks.
- Correctly reject trailers that contain HTTP/2 pseudo-header fields, per RFC
7540 Section 8.1.2.1.
- Correctly refuse to send trailers that contain HTTP/2 pseudo-header fields,
per RFC 7540 Section 8.1.2.1.
- Correctly reject responses that do not contain the ``:status`` header field,
per RFC 7540 Section 8.1.2.4.
- Correctly refuse to send responses that do not contain the ``:status`` header
field, per RFC 7540 Section 8.1.2.4.
- Correctly update the maximum frame size when the user updates the value of
that setting. Prior to this release, if the user updated the maximum frame
size hyper-h2 would ignore the update, preventing the remote peer from using
the higher frame sizes.
2.4.2 (2016-10-25)
------------------
Bugfixes
~~~~~~~~
- Correctly update the maximum frame size when the user updates the value of
that setting. Prior to this release, if the user updated the maximum frame
size hyper-h2 would ignore the update, preventing the remote peer from using
the higher frame sizes.
2.3.3 (2016-10-25)
------------------
Bugfixes
~~~~~~~~
- Correctly update the maximum frame size when the user updates the value of
that setting. Prior to this release, if the user updated the maximum frame
size hyper-h2 would ignore the update, preventing the remote peer from using
the higher frame sizes.
2.2.7 (2016-10-25)
------------------
*Final 2.2.X release*
Bugfixes
~~~~~~~~
- Correctly update the maximum frame size when the user updates the value of
that setting. Prior to this release, if the user updated the maximum frame
size hyper-h2 would ignore the update, preventing the remote peer from using
the higher frame sizes.
2.4.1 (2016-08-23)
------------------
Bugfixes
~~~~~~~~
- Correctly expect that responses to HEAD requests will have no body regardless
of the value of the Content-Length header, and reject those that do.
2.3.2 (2016-08-23)
------------------
Bugfixes
~~~~~~~~
- Correctly expect that responses to HEAD requests will have no body regardless
of the value of the Content-Length header, and reject those that do.
2.4.0 (2016-07-01)
------------------
API Changes (Backward-Compatible)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- Adds ``additional_data`` to ``H2Connection.close_connection``, allowing the
user to send additional debug data on the GOAWAY frame.
- Adds ``last_stream_id`` to ``H2Connection.close_connection``, allowing the
user to manually control what the reported last stream ID is.
- Add new method: ``prioritize``.
- Add support for emitting stream priority information when sending headers
frames using three new keyword arguments: ``priority_weight``,
``priority_depends_on``, and ``priority_exclusive``.
- Add support for "related events": events that fire simultaneously on a single
frame.
2.3.1 (2016-05-12)
------------------
Bugfixes
~~~~~~~~
- Resolved ``AttributeError`` encountered when receiving more than one sequence
of CONTINUATION frames on a given connection.
2.2.5 (2016-05-12)
------------------
Bugfixes
~~~~~~~~
- Resolved ``AttributeError`` encountered when receiving more than one sequence
of CONTINUATION frames on a given connection.
2.3.0 (2016-04-26)
------------------
API Changes (Backward-Compatible)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- Added a new flag to the ``H2Connection`` constructor: ``header_encoding``,
that controls what encoding is used (if any) to decode the headers from bytes
to unicode. This defaults to UTF-8 for backward compatibility. To disable the
decode and use bytes exclusively, set the field to False, None, or the empty
string. This affects all headers, including those pushed by servers.
- Bumped the minimum version of HPACK allowed from 2.0 to 2.2.
- Added support for advertising RFC 7838 Alternative services.
- Allowed users to provide ``hpack.HeaderTuple`` and
``hpack.NeverIndexedHeaderTuple`` objects to all methods that send headers.
- Changed all events that carry headers to emit ``hpack.HeaderTuple`` and
``hpack.NeverIndexedHeaderTuple`` instead of plain tuples. This allows users
to maintain header indexing state.
- Added support for plaintext upgrade with the ``initiate_upgrade_connection``
method.
Bugfixes
~~~~~~~~
- Automatically ensure that all ``Authorization`` and ``Proxy-Authorization``
headers, as well as short ``Cookie`` headers, are prevented from being added
to encoding contexts.
2.2.4 (2016-04-25)
------------------
Bugfixes
~~~~~~~~
- Correctly forbid pseudo-headers that were not defined in RFC 7540.
- Ignore AltSvc frames, rather than exploding when receiving them.
2.1.5 (2016-04-25)
------------------
*Final 2.1.X release*
Bugfixes
~~~~~~~~
- Correctly forbid pseudo-headers that were not defined in RFC 7540.
- Ignore AltSvc frames, rather than exploding when receiving them.
2.2.3 (2016-04-13)
------------------
Bugfixes
~~~~~~~~
- Allowed the 4.X series of hyperframe releases as dependencies.
2.1.4 (2016-04-13)
------------------
Bugfixes
~~~~~~~~
- Allowed the 4.X series of hyperframe releases as dependencies.
2.2.2 (2016-04-05)
------------------
Bugfixes
~~~~~~~~
- Fixed issue where informational responses were erroneously not allowed to be
sent in the ``HALF_CLOSED_REMOTE`` state.
- Fixed issue where informational responses were erroneously not allowed to be
received in the ``HALF_CLOSED_LOCAL`` state.
- Fixed issue where we allowed information responses to be sent or received
after final responses.
2.2.1 (2016-03-23)
------------------
Bugfixes
~~~~~~~~
- Fixed issue where users using locales that did not default to UTF-8 were
unable to install source distributions of the package.
2.2.0 (2016-03-23)
------------------
API Changes (Backward-Compatible)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- Added support for sending informational responses (responses with 1XX status)
codes as part of the standard flow. HTTP/2 allows zero or more informational
responses with no upper limit: hyper-h2 does too.
- Added support for receiving informational responses (responses with 1XX
status) codes as part of the standard flow. HTTP/2 allows zero or more
informational responses with no upper limit: hyper-h2 does too.
- Added a new event: ``ReceivedInformationalResponse``. This response is fired
when informational responses (those with 1XX status codes).
- Added an ``additional_data`` field to the ``ConnectionTerminated`` event that
carries any additional data sent on the GOAWAY frame. May be ``None`` if no
such data was sent.
- Added the ``initial_values`` optional argument to the ``Settings`` object.
Bugfixes
~~~~~~~~
- Correctly reject all of the connection-specific headers mentioned in RFC 7540
§ 8.1.2.2, not just the ``Connection:`` header.
- Defaulted the value of ``SETTINGS_MAX_CONCURRENT_STREAMS`` to 100, unless
explicitly overridden. This is a safe defensive initial value for this
setting.
2.1.3 (2016-03-16)
------------------
Deprecations
~~~~~~~~~~~~
- Passing dictionaries to ``send_headers`` as the header block is deprecated,
and will be removed in 3.0.
2.1.2 (2016-02-17)
------------------
Bugfixes
~~~~~~~~
- Reject attempts to push streams on streams that were themselves pushed:
streams can only be pushed on streams that were initiated by the client.
- Correctly allow CONTINUATION frames to extend the header block started by a
PUSH_PROMISE frame.
- Changed our handling of frames received on streams that were reset by the
user.
Previously these would, at best, cause ProtocolErrors to be raised and the
connection to be torn down (rather defeating the point of resetting streams
at all) and, at worst, would cause subtle inconsistencies in state between
hyper-h2 and the remote peer that could lead to header block decoding errors
or flow control blockages.
Now when the user resets a stream all further frames received on that stream
are ignored except where they affect some form of connection-level state,
where they have their effect and are then ignored.
- Fixed a bug whereby receiving a PUSH_PROMISE frame on a stream that was
closed would cause a RST_STREAM frame to be emitted on the closed-stream,
but not the newly-pushed one. Now this causes a ``ProtocolError``.
2.1.1 (2016-02-05)
------------------
Bugfixes
~~~~~~~~
- Added debug representations for all events.
- Fixed problems with setup.py that caused trouble on older setuptools/pip
installs.
2.1.0 (2016-02-02)
------------------
API Changes (Backward-Compatible)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- Added new field to ``DataReceived``: ``flow_controlled_length``. This is the
length of the frame including padded data, allowing users to correctly track
changes to the flow control window.
- Defined new ``UnsupportedFrameError``, thrown when frames that are known to
hyperframe but not supported by hyper-h2 are received. For
backward-compatibility reasons, this is a ``ProtocolError`` *and* a
``KeyError``.
Bugfixes
~~~~~~~~
- Hyper-h2 now correctly accounts for padding when maintaining flow control
windows.
- Resolved a bug where hyper-h2 would mistakenly apply
SETTINGS_INITIAL_WINDOW_SIZE to the connection flow control window in
addition to the stream-level flow control windows.
- Invalid Content-Length headers now throw ``ProtocolError`` exceptions and
correctly tear the connection down, instead of leaving the connection in an
indeterminate state.
- Invalid header blocks now throw ``ProtocolError``, rather than a grab bag of
possible other exceptions.
2.0.0 (2016-01-25)
------------------
API Changes (Breaking)
~~~~~~~~~~~~~~~~~~~~~~
- Attempts to open streams with invalid stream IDs, either by the remote peer
or by the user, are now rejected as a ``ProtocolError``. Previously these
were allowed, and would cause remote peers to error.
- Receiving frames that have invalid padding now causes the connection to be
terminated with a ``ProtocolError`` being raised. Previously these passed
undetected.
- Settings values set by both the user and the remote peer are now validated
when they're set. If they're invalid, a new ``InvalidSettingsValueError`` is
raised and, if set by the remote peer, a connection error is signaled.
Previously, it was possible to set invalid values. These would either be
caught when building frames, or would be allowed to stand.
- Settings changes no longer require user action to be acknowledged: hyper-h2
acknowledges them automatically. This moves the location where some
exceptions may be thrown, and also causes the ``acknowledge_settings`` method
to be removed from the public API.
- Removed a number of methods on the ``H2Connection`` object from the public,
semantically versioned API, by renaming them to have leading underscores.
Specifically, removed:
- ``get_stream_by_id``
- ``get_or_create_stream``
- ``begin_new_stream``
- ``receive_frame``
- ``acknowledge_settings``
- Added full support for receiving CONTINUATION frames, including policing
logic about when and how they are received. Previously, receiving
CONTINUATION frames was not supported and would throw exceptions.
- All public API functions on ``H2Connection`` except for ``receive_data`` no
longer return lists of events, because these lists were always empty. Events
are now only raised by ``receive_data``.
- Calls to ``increment_flow_control_window`` with out of range values now raise
``ValueError`` exceptions. Previously they would be allowed, or would cause
errors when serializing frames.
API Changes (Backward-Compatible)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- Added ``PriorityUpdated`` event for signaling priority changes.
- Added ``get_next_available_stream_id`` function.
- Receiving DATA frames on streams not in the OPEN or HALF_CLOSED_LOCAL states
now causes a stream reset, rather than a connection reset. The error is now
also classified as a ``StreamClosedError``, rather than a more generic
``ProtocolError``.
- Receiving HEADERS or PUSH_PROMISE frames in the HALF_CLOSED_REMOTE state now
causes a stream reset, rather than a connection reset.
- Receiving frames that violate the max frame size now causes connection errors
with error code FRAME_SIZE_ERROR, not a generic PROTOCOL_ERROR. This
condition now also raises a ``FrameTooLargeError``, a new subclass of
``ProtocolError``.
- Made ``NoSuchStreamError`` a subclass of ``ProtocolError``.
- The ``StreamReset`` event is now also fired whenever a protocol error from
the remote peer forces a stream to close early. This is only fired once.
- The ``StreamReset`` event now carries a flag, ``remote_reset``, that is set
to ``True`` in all cases where ``StreamReset`` would previously have fired
(e.g. when the remote peer sent a RST_STREAM), and is set to ``False`` when
it fires because the remote peer made a protocol error.
- Hyper-h2 now rejects attempts by peers to increment a flow control window by
zero bytes.
- Hyper-h2 now rejects peers sending header blocks that are ill-formed for a
number of reasons as set out in RFC 7540 Section 8.1.2.
- Attempting to send non-PRIORITY frames on closed streams now raises
``StreamClosedError``.
- Remote peers attempting to increase the flow control window beyond
``2**31 - 1``, either by window increment or by settings frame, are now
rejected as ``ProtocolError``.
- Local attempts to increase the flow control window beyond ``2**31 - 1`` by
window increment are now rejected as ``ProtocolError``.
- The bytes that represent individual settings are now available in
``h2.settings``, instead of needing users to import them from hyperframe.
Bugfixes
~~~~~~~~
- RFC 7540 requires that a separate minimum stream ID be used for inbound and
outbound streams. Hyper-h2 now obeys this requirement.
- Hyper-h2 now does a better job of reporting the last stream ID it has
partially handled when terminating connections.
- Fixed an error in the arguments of ``StreamIDTooLowError``.
- Prevent ``ValueError`` leaking from Hyperframe.
- Prevent ``struct.error`` and ``InvalidFrameError`` leaking from Hyperframe.
1.1.1 (2015-11-17)
------------------
Bugfixes
~~~~~~~~
- Forcibly lowercase all header names to improve compatibility with
implementations that demand lower-case header names.
1.1.0 (2015-10-28)
------------------
API Changes (Backward-Compatible)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- Added a new ``ConnectionTerminated`` event, which fires when GOAWAY frames
are received.
- Added a subclass of ``NoSuchStreamError``, called ``StreamClosedError``, that
fires when actions are taken on a stream that is closed and has had its state
flushed from the system.
- Added ``StreamIDTooLowError``, raised when the user or the remote peer
attempts to create a stream with an ID lower than one previously used in the
dialog. Inherits from ``ValueError`` for backward-compatibility reasons.
Bugfixes
~~~~~~~~
- Do not throw ``ProtocolError`` when attempting to send multiple GOAWAY
frames on one connection.
- We no longer forcefully change the decoder table size when settings changes
are ACKed, instead waiting for remote acknowledgement of the change.
- Improve the performance of checking whether a stream is open.
- We now attempt to lazily garbage collect closed streams, to avoid having the
state hang around indefinitely, leaking memory.
- Avoid further per-stream allocations, leading to substantial performance
improvements when many short-lived streams are used.
1.0.0 (2015-10-15)
------------------
- First production release!

Просмотреть файл

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2015-2016 Cory Benfield and contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

Просмотреть файл

@ -1,7 +0,0 @@
include README.rst LICENSE CONTRIBUTORS.rst HISTORY.rst tox.ini test_requirements.txt .coveragerc Makefile
recursive-include test *.py
graft docs
prune docs/build
graft visualizer
recursive-include examples *.py *.crt *.key *.pem *.csr
recursive-include utils *.sh

Просмотреть файл

@ -1,9 +0,0 @@
.PHONY: publish test
publish:
rm -rf dist/
python setup.py sdist bdist_wheel
twine upload -s dist/*
test:
py.test -n 4 --cov h2 test/

Просмотреть файл

@ -1,65 +0,0 @@
===============================
hyper-h2: HTTP/2 Protocol Stack
===============================
.. image:: https://raw.github.com/Lukasa/hyper/development/docs/source/images/hyper.png
.. image:: https://travis-ci.org/python-hyper/hyper-h2.svg?branch=master
:target: https://travis-ci.org/python-hyper/hyper-h2
This repository contains a pure-Python implementation of a HTTP/2 protocol
stack. It's written from the ground up to be embeddable in whatever program you
choose to use, ensuring that you can speak HTTP/2 regardless of your
programming paradigm.
You use it like this:
.. code-block:: python
import h2.connection
conn = h2.connection.H2Connection()
conn.send_headers(stream_id=stream_id, headers=headers)
conn.send_data(stream_id, data)
socket.sendall(conn.data_to_send())
events = conn.receive_data(socket_data)
This repository does not provide a parsing layer, a network layer, or any rules
about concurrency. Instead, it's a purely in-memory solution, defined in terms
of data actions and HTTP/2 frames. This is one building block of a full Python
HTTP implementation.
To install it, just run:
.. code-block:: console
$ pip install h2
Documentation
=============
Documentation is available at http://python-hyper.org/h2/.
Contributing
============
``hyper-h2`` welcomes contributions from anyone! Unlike many other projects we
are happy to accept cosmetic contributions and small contributions, in addition
to large feature requests and changes.
Before you contribute (either by opening an issue or filing a pull request),
please `read the contribution guidelines`_.
.. _read the contribution guidelines: http://python-hyper.org/en/latest/contributing.html
License
=======
``hyper-h2`` is made available under the MIT License. For more details, see the
``LICENSE`` file in the repository.
Authors
=======
``hyper-h2`` is maintained by Cory Benfield, with contributions from others. For
more details about the contributors, please see ``CONTRIBUTORS.rst``.

Просмотреть файл

@ -1,177 +0,0 @@
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = build
# User-friendly check for sphinx-build
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
endif
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " xml to make Docutils-native XML files"
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean:
rm -rf $(BUILDDIR)/*
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/hyper-h2.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/hyper-h2.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/hyper-h2"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/hyper-h2"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
latexpdfja:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through platex and dvipdfmx..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."
xml:
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
@echo
@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
pseudoxml:
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
@echo
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."

Просмотреть файл

@ -1,242 +0,0 @@
@ECHO OFF
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set BUILDDIR=build
set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% source
set I18NSPHINXOPTS=%SPHINXOPTS% source
if NOT "%PAPER%" == "" (
set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
)
if "%1" == "" goto help
if "%1" == "help" (
:help
echo.Please use `make ^<target^>` where ^<target^> is one of
echo. html to make standalone HTML files
echo. dirhtml to make HTML files named index.html in directories
echo. singlehtml to make a single large HTML file
echo. pickle to make pickle files
echo. json to make JSON files
echo. htmlhelp to make HTML files and a HTML help project
echo. qthelp to make HTML files and a qthelp project
echo. devhelp to make HTML files and a Devhelp project
echo. epub to make an epub
echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
echo. text to make text files
echo. man to make manual pages
echo. texinfo to make Texinfo files
echo. gettext to make PO message catalogs
echo. changes to make an overview over all changed/added/deprecated items
echo. xml to make Docutils-native XML files
echo. pseudoxml to make pseudoxml-XML files for display purposes
echo. linkcheck to check all external links for integrity
echo. doctest to run all doctests embedded in the documentation if enabled
goto end
)
if "%1" == "clean" (
for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
del /q /s %BUILDDIR%\*
goto end
)
%SPHINXBUILD% 2> nul
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.http://sphinx-doc.org/
exit /b 1
)
if "%1" == "html" (
%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/html.
goto end
)
if "%1" == "dirhtml" (
%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
goto end
)
if "%1" == "singlehtml" (
%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
goto end
)
if "%1" == "pickle" (
%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can process the pickle files.
goto end
)
if "%1" == "json" (
%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can process the JSON files.
goto end
)
if "%1" == "htmlhelp" (
%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can run HTML Help Workshop with the ^
.hhp project file in %BUILDDIR%/htmlhelp.
goto end
)
if "%1" == "qthelp" (
%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can run "qcollectiongenerator" with the ^
.qhcp project file in %BUILDDIR%/qthelp, like this:
echo.^> qcollectiongenerator %BUILDDIR%\qthelp\hyper-h2.qhcp
echo.To view the help file:
echo.^> assistant -collectionFile %BUILDDIR%\qthelp\hyper-h2.ghc
goto end
)
if "%1" == "devhelp" (
%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished.
goto end
)
if "%1" == "epub" (
%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The epub file is in %BUILDDIR%/epub.
goto end
)
if "%1" == "latex" (
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
if errorlevel 1 exit /b 1
echo.
echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
goto end
)
if "%1" == "latexpdf" (
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
cd %BUILDDIR%/latex
make all-pdf
cd %BUILDDIR%/..
echo.
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
goto end
)
if "%1" == "latexpdfja" (
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
cd %BUILDDIR%/latex
make all-pdf-ja
cd %BUILDDIR%/..
echo.
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
goto end
)
if "%1" == "text" (
%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The text files are in %BUILDDIR%/text.
goto end
)
if "%1" == "man" (
%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The manual pages are in %BUILDDIR%/man.
goto end
)
if "%1" == "texinfo" (
%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
goto end
)
if "%1" == "gettext" (
%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
goto end
)
if "%1" == "changes" (
%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
if errorlevel 1 exit /b 1
echo.
echo.The overview file is in %BUILDDIR%/changes.
goto end
)
if "%1" == "linkcheck" (
%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
if errorlevel 1 exit /b 1
echo.
echo.Link check complete; look for any errors in the above output ^
or in %BUILDDIR%/linkcheck/output.txt.
goto end
)
if "%1" == "doctest" (
%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
if errorlevel 1 exit /b 1
echo.
echo.Testing of doctests in the sources finished, look at the ^
results in %BUILDDIR%/doctest/output.txt.
goto end
)
if "%1" == "xml" (
%SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The XML files are in %BUILDDIR%/xml.
goto end
)
if "%1" == "pseudoxml" (
%SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.
goto end
)
:end

Просмотреть файл

Двоичный файл не отображается.

До

Ширина:  |  Высота:  |  Размер: 698 KiB

Двоичный файл не отображается.

До

Ширина:  |  Высота:  |  Размер: 1.8 MiB

Просмотреть файл

@ -1,325 +0,0 @@
Advanced Usage
==============
Priority
--------
.. versionadded:: 2.0.0
`RFC 7540`_ has a fairly substantial and complex section describing how to
build a HTTP/2 priority tree, and the effect that should have on sending data
from a server.
Hyper-h2 does not enforce any priority logic by default for servers. This is
because scheduling data sends is outside the scope of this library, as it
likely requires fairly substantial understanding of the scheduler being used.
However, for servers that *do* want to follow the priority recommendations
given by clients, the Hyper project provides `an implementation`_ of the
`RFC 7540`_ priority tree that will be useful to plug into a server. That,
combined with the :class:`PriorityUpdated <h2.events.PriorityUpdated>` event from
this library, can be used to build a server that conforms to RFC 7540's
recommendations for priority handling.
Related Events
--------------
.. versionadded:: 2.4.0
In the 2.4.0 release hyper-h2 added support for signaling "related events".
These are a HTTP/2-only construct that exist because certain HTTP/2 events can
occur simultaneously: that is, one HTTP/2 frame can cause multiple state
transitions to occur at the same time. One example of this is a HEADERS frame
that contains priority information and carries the END_STREAM flag: this would
cause three events to fire (one of the various request/response received
events, a :class:`PriorityUpdated <h2.events.PriorityUpdated>` event, and a
:class:`StreamEnded <h2.events.StreamEnded>` event).
Ordinarily hyper-h2's logic will emit those events to you one at a time. This
means that you may attempt to process, for example, a
:class:`DataReceived <h2.events.DataReceived>` event, not knowing that the next
event out will be a :class:`StreamEnded <h2.events.StreamEnded>` event.
hyper-h2 *does* know this, however, and so will forbid you from taking certain
actions that are a violation of the HTTP/2 protocol.
To avoid this asymmetry of information, events that can occur simultaneously
now carry properties for their "related events". These allow users to find the
events that can have occurred simultaneously with each other before the event
is emitted by hyper-h2. The following objects have "related events":
- :class:`RequestReceived <h2.events.RequestReceived>`:
- :data:`stream_ended <h2.events.RequestReceived.stream_ended>`: any
:class:`StreamEnded <h2.events.StreamEnded>` event that occurred at the
same time as receiving this request.
- :data:`priority_updated
<h2.events.RequestReceived.priority_updated>`: any
:class:`PriorityUpdated <h2.events.PriorityUpdated>` event that occurred
at the same time as receiving this request.
- :class:`ResponseReceived <h2.events.ResponseReceived>`:
- :data:`stream_ended <h2.events.ResponseReceived.stream_ended>`: any
:class:`StreamEnded <h2.events.StreamEnded>` event that occurred at the
same time as receiving this response.
- :data:`priority_updated
<h2.events.ResponseReceived.priority_updated>`: any
:class:`PriorityUpdated <h2.events.PriorityUpdated>` event that occurred
at the same time as receiving this response.
- :class:`TrailersReceived <h2.events.TrailersReceived>`:
- :data:`stream_ended <h2.events.TrailersReceived.stream_ended>`: any
:class:`StreamEnded <h2.events.StreamEnded>` event that occurred at the
same time as receiving this set of trailers. This will **always** be
present for trailers, as they must terminate streams.
- :data:`priority_updated
<h2.events.TrailersReceived.priority_updated>`: any
:class:`PriorityUpdated <h2.events.PriorityUpdated>` event that occurred
at the same time as receiving this response.
- :class:`InformationalResponseReceived
<h2.events.InformationalResponseReceived>`:
- :data:`priority_updated
<h2.events.InformationalResponseReceived.priority_updated>`: any
:class:`PriorityUpdated <h2.events.PriorityUpdated>` event that occurred
at the same time as receiving this informational response.
- :class:`DataReceived <h2.events.DataReceived>`:
- :data:`stream_ended <h2.events.DataReceived.stream_ended>`: any
:class:`StreamEnded <h2.events.StreamEnded>` event that occurred at the
same time as receiving this data.
.. warning:: hyper-h2 does not know if you are looking for related events or
expecting to find events in the event stream. Therefore, it will
always emit "related events" in the event stream. If you are using
the "related events" event pattern, you will want to be careful to
avoid double-processing related events.
.. _h2-connection-advanced:
Connections: Advanced
---------------------
Thread Safety
~~~~~~~~~~~~~
``H2Connection`` objects are *not* thread-safe. They cannot safely be accessed
from multiple threads at once. This is a deliberate design decision: it is not
trivially possible to design the ``H2Connection`` object in a way that would
be either lock-free or have the locks at a fine granularity.
Your implementations should bear this in mind, and handle it appropriately. It
should be simple enough to use locking alongside the ``H2Connection``: simply
lock around the connection object itself. Because the ``H2Connection`` object
does no I/O it should be entirely safe to do that. Alternatively, have a single
thread take ownership of the ``H2Connection`` and use a message-passing
interface to serialize access to the ``H2Connection``.
If you are using a non-threaded concurrency approach (e.g. Twisted), this
should not affect you.
Internal Buffers
~~~~~~~~~~~~~~~~
In order to avoid doing I/O, the ``H2Connection`` employs an internal buffer.
This buffer is *unbounded* in size: it can potentially grow infinitely. This
means that, if you are not making sure to regularly empty it, you are at risk
of exceeding the memory limit of a single process and finding your program
crashes.
It is highly recommended that you send data at regular intervals, ideally as
soon as possible.
.. _advanced-sending-data:
Sending Data
~~~~~~~~~~~~
When sending data on the network, it's important to remember that you may not
be able to send an unbounded amount of data at once. Particularly when using
TCP, it is often the case that there are limits on how much data may be in
flight at any one time. These limits can be very low, and your operating system
will only buffer so much data in memory before it starts to complain.
For this reason, it is possible to consume only a subset of the data available
when you call :meth:`data_to_send <h2.connection.H2Connection.data_to_send>`.
However, once you have pulled the data out of the ``H2Connection`` internal
buffer, it is *not* possible to put it back on again. For that reason, it is
adviseable that you confirm how much space is available in the OS buffer before
sending.
Alternatively, use tools made available by your framework. For example, the
Python standard library :mod:`socket <python:socket>` module provides a
:meth:`sendall <python:socket.socket.sendall>` method that will automatically
block until all the data has been sent. This will enable you to always use the
unbounded form of
:meth:`data_to_send <h2.connection.H2Connection.data_to_send>`, and will help
you avoid subtle bugs.
When To Send
~~~~~~~~~~~~
In addition to knowing how much data to send (see :ref:`advanced-sending-data`)
it is important to know when to send data. For hyper-h2, this amounts to
knowing when to call :meth:`data_to_send
<h2.connection.H2Connection.data_to_send>`.
Hyper-h2 may write data into its send buffer at two times. The first is
whenever :meth:`receive_data <h2.connection.H2Connection.receive_data>` is
called. This data is sent in response to some control frames that require no
user input: for example, responding to PING frames. The second time is in
response to user action: whenever a user calls a method like
:meth:`send_headers <h2.connection.H2Connection.send_headers>`, data may be
written into the buffer.
In a standard design for a hyper-h2 consumer, then, that means there are two
places where you'll potentially want to send data. The first is in your
"receive data" loop. This is where you take the data you receive, pass it into
:meth:`receive_data <h2.connection.H2Connection.receive_data>`, and then
dispatch events. For this loop, it is usually best to save sending data until
the loop is complete: that allows you to empty the buffer only once.
The other place you'll want to send the data is when initiating requests or
taking any other active, unprompted action on the connection. In this instance,
you'll want to make all the relevant ``send_*`` calls, and *then* call
:meth:`data_to_send <h2.connection.H2Connection.data_to_send>`.
Headers
-------
HTTP/2 defines several "special header fields" which are used to encode data
that was previously sent in either the request or status line of HTTP/1.1.
These header fields are distinguished from ordinary header fields because their
field name begins with a ``:`` character. The special header fields defined in
`RFC 7540`_ are:
- ``:status``
- ``:path``
- ``:method``
- ``:scheme``
- ``:authority``
`RFC 7540`_ **mandates** that all of these header fields appear *first* in the
header block, before the ordinary header fields. This could cause difficulty if
the :meth:`send_headers <h2.connection.H2Connection.send_headers>` method
accepted a plain ``dict`` for the ``headers`` argument, because ``dict``
objects are unordered. For this reason, we require that you provide a list of
two-tuples.
.. _RFC 7540: https://tools.ietf.org/html/rfc7540
.. _an implementation: http://python-hyper.org/projects/priority/en/latest/
Flow Control
------------
HTTP/2 defines a complex flow control system that uses a sliding window of
data on both a per-stream and per-connection basis. Essentially, each
implementation allows its peer to send a specific amount of data at any time
(the "flow control window") before it must stop. Each stream has a separate
window, and the connection as a whole has a window. Each window can be opened
by an implementation by sending a ``WINDOW_UPDATE`` frame, either on a specific
stream (causing the window for that stream to be opened), or on stream ``0``,
which causes the window for the entire connection to be opened.
In HTTP/2, only data in ``DATA`` frames is flow controlled. All other frames
are exempt from flow control. Each ``DATA`` frame consumes both stream and
connection flow control window bytes. This means that the maximum amount of
data that can be sent on any one stream before a ``WINDOW_UPDATE`` frame is
received is the *lower* of the stream and connection windows. The maximum
amount of data that can be sent on *all* streams before a ``WINDOW_UPDATE``
frame is received is the size of the connection flow control window.
Working With Flow Control
~~~~~~~~~~~~~~~~~~~~~~~~~
The amount of flow control window a ``DATA`` frame consumes is the sum of both
its contained application data *and* the amount of padding used. hyper-h2 shows
this to the user in a :class:`DataReceived <h2.events.DataReceived>` event by
using the :data:`flow_controlled_length
<h2.events.DataReceived.flow_controlled_length>` field. When working with flow
control in hyper-h2, users *must* use this field: simply using
``len(datareceived.data)`` can eventually lead to deadlock.
When data has been received and given to the user in a :class:`DataReceived
<h2.events.DataReceived>`, it is the responsibility of the user to re-open the
flow control window when the user is ready for more data. hyper-h2 does not do
this automatically to avoid flooding the user with data: if we did, the remote
peer could send unbounded amounts of data that the user would need to buffer
before processing.
To re-open the flow control window, then, the user must call
:meth:`increment_flow_control_window
<h2.connection.H2Connection.increment_flow_control_window>` with the
:data:`flow_controlled_length <h2.events.DataReceived.flow_controlled_length>`
of the received data. hyper-h2 requires that you manage both the connection
and the stream flow control windows separately, so you may need to increment
both the stream the data was received on and stream ``0``.
When sending data, a HTTP/2 implementation must not send more than flow control
window available for that stream. As noted above, the maximum amount of data
that can be sent on the stream is the minimum of the stream and the connection
flow control windows. You can find out how much data you can send on a given
stream by using the :meth:`local_flow_control_window
<h2.connection.H2Connection.local_flow_control_window>` method, which will do
all of these calculations for you. If you attempt to send more than this amount
of data on a stream, hyper-h2 will throw a :class:`ProtocolError
<h2.exceptions.ProtocolError>` and refuse to send the data.
In hyper-h2, receiving a ``WINDOW_UPDATE`` frame causes a :class:`WindowUpdated
<h2.events.WindowUpdated>` event to fire. This will notify you that there is
potentially more room in a flow control window. Note that, just because an
increment of a given size was received *does not* mean that that much more data
can be sent: remember that both the connection and stream flow control windows
constrain how much data can be sent.
As a result, when a :class:`WindowUpdated <h2.events.WindowUpdated>` event
fires with a non-zero stream ID, and the user has more data to send on that
stream, the user should call :meth:`local_flow_control_window
<h2.connection.H2Connection.local_flow_control_window>` to check if there
really is more room to send data on that stream.
When a :class:`WindowUpdated <h2.events.WindowUpdated>` event fires with a
stream ID of ``0``, that may have unblocked *all* streams that are currently
blocked. The user should use :meth:`local_flow_control_window
<h2.connection.H2Connection.local_flow_control_window>` to check all blocked
streams to see if more data is available.
Auto Flow Control
~~~~~~~~~~~~~~~~~
.. versionadded:: 2.5.0
In most cases, there is no advantage for users in managing their own flow
control strategies. While particular high performance or specific-use-case
applications may gain value from directly controlling the emission of
``WINDOW_UPDATE`` frames, the average application can use a
lowest-common-denominator strategy to emit those frames. As of version 2.5.0,
hyper-h2 now provides this automatic strategy for users, if they want to use
it.
This automatic strategy is built around a single method:
:meth:`acknowledge_received_data
<h2.connection.H2Connection.acknowledge_received_data>`. This method
flags to the connection object that your application has dealt with a certain
number of flow controlled bytes, and that the window should be incremented in
some way. Whenever your application has "processed" some received bytes, this
method should be called to signal that they have been processed.
The key difference between this method and :meth:`increment_flow_control_window
<h2.connection.H2Connection.increment_flow_control_window>` is that the method
:meth:`acknowledge_received_data
<h2.connection.H2Connection.acknowledge_received_data>` does not guarantee that
it will emit a ``WINDOW_UPDATE`` frame, and if it does it will not necessarily
emit them for *only* the stream or *only* the frame. Instead, the
``WINDOW_UPDATE`` frames will be *coalesced*: they will be emitted only when
a certain number of bytes have been freed up.
For most applications, this method should be preferred to the manual flow
control mechanism.

Просмотреть файл

@ -1,166 +0,0 @@
Hyper-h2 API
============
This document details the API of Hyper-h2.
Semantic Versioning
-------------------
Hyper-h2 follows semantic versioning for its public API. Please note that the
guarantees of semantic versioning apply only to the API that is *documented
here*. Simply because a method or data field is not prefaced by an underscore
does not make it part of Hyper-h2's public API. Anything not documented here is
subject to change at any time.
Connection
----------
.. autoclass:: h2.connection.H2Connection
:members:
:exclude-members: inbound_flow_control_window
Configuration
-------------
.. autoclass:: h2.config.H2Configuration
:members:
.. _h2-events-api:
Events
------
.. autoclass:: h2.events.RequestReceived
:members:
.. autoclass:: h2.events.ResponseReceived
:members:
.. autoclass:: h2.events.TrailersReceived
:members:
.. autoclass:: h2.events.InformationalResponseReceived
:members:
.. autoclass:: h2.events.DataReceived
:members:
.. autoclass:: h2.events.WindowUpdated
:members:
.. autoclass:: h2.events.RemoteSettingsChanged
:members:
.. autoclass:: h2.events.PingAcknowledged
:members:
.. autoclass:: h2.events.StreamEnded
:members:
.. autoclass:: h2.events.StreamReset
:members:
.. autoclass:: h2.events.PushedStreamReceived
:members:
.. autoclass:: h2.events.SettingsAcknowledged
:members:
.. autoclass:: h2.events.PriorityUpdated
:members:
.. autoclass:: h2.events.ConnectionTerminated
:members:
.. autoclass:: h2.events.AlternativeServiceAvailable
:members:
.. autoclass:: h2.events.UnknownFrameReceived
:members:
Exceptions
----------
.. autoclass:: h2.exceptions.H2Error
:members:
.. autoclass:: h2.exceptions.NoSuchStreamError
:show-inheritance:
:members:
.. autoclass:: h2.exceptions.StreamClosedError
:show-inheritance:
:members:
.. autoclass:: h2.exceptions.RFC1122Error
:show-inheritance:
:members:
Protocol Errors
~~~~~~~~~~~~~~~
.. autoclass:: h2.exceptions.ProtocolError
:show-inheritance:
:members:
.. autoclass:: h2.exceptions.FrameTooLargeError
:show-inheritance:
:members:
.. autoclass:: h2.exceptions.FrameDataMissingError
:show-inheritance:
:members:
.. autoclass:: h2.exceptions.TooManyStreamsError
:show-inheritance:
:members:
.. autoclass:: h2.exceptions.FlowControlError
:show-inheritance:
:members:
.. autoclass:: h2.exceptions.StreamIDTooLowError
:show-inheritance:
:members:
.. autoclass:: h2.exceptions.InvalidSettingsValueError
:members:
.. autoclass:: h2.exceptions.NoAvailableStreamIDError
:show-inheritance:
:members:
.. autoclass:: h2.exceptions.InvalidBodyLengthError
:show-inheritance:
:members:
.. autoclass:: h2.exceptions.UnsupportedFrameError
:members:
.. autoclass:: h2.exceptions.DenialOfServiceError
:show-inheritance:
:members:
HTTP/2 Error Codes
------------------
.. automodule:: h2.errors
:members:
Settings
--------
.. autoclass:: h2.settings.SettingCodes
:members:
.. autoclass:: h2.settings.Settings
:inherited-members:
.. autoclass:: h2.settings.ChangedSetting
:members:

Просмотреть файл

@ -1,17 +0,0 @@
Asyncio Example Server
======================
This example is a basic HTTP/2 server written using `asyncio`_, using some
functionality that was introduced in Python 3.5. This server represents
basically just the same JSON-headers-returning server that was built in the
:doc:`basic-usage` document.
This example demonstrates some basic asyncio techniques.
.. literalinclude:: ../../examples/asyncio/asyncio-server.py
:language: python
:linenos:
:encoding: utf-8
.. _asyncio: https://docs.python.org/3/library/asyncio.html

Просмотреть файл

@ -1,735 +0,0 @@
Getting Started: Writing Your Own HTTP/2 Server
===============================================
This document explains how to get started writing fully-fledged HTTP/2
implementations using Hyper-h2 as the underlying protocol stack. It covers the
basic concepts you need to understand, and talks you through writing a very
simple HTTP/2 server.
This document assumes you're moderately familiar with writing Python, and have
*some* understanding of how computer networks work. If you don't, you'll find
it a lot easier if you get some understanding of those concepts first and then
return to this documentation.
.. _h2-connection-basic:
Connections
-----------
Hyper-h2's core object is the
:class:`H2Connection <h2.connection.H2Connection>` object. This object is an
abstract representation of the state of a single HTTP/2 connection, and holds
all the important protocol state. When using Hyper-h2, this object will be the
first thing you create and the object that does most of the heavy lifting.
The interface to this object is relatively simple. For sending data, you
call the object with methods indicating what actions you want to perform: for
example, you may want to send headers (you'd use the
:meth:`send_headers <h2.connection.H2Connection.send_headers>` method), or
send data (you'd use the
:meth:`send_data <h2.connection.H2Connection.send_data>` method). After you've
decided what actions you want to perform, you get some bytes out of the object
that represent the HTTP/2-encoded representation of your actions, and send them
out over the network however you see fit.
When you receive data from the network, you pass that data in to the
``H2Connection`` object, which returns a list of *events*.
These events, covered in more detail later in :ref:`h2-events-basic`, define
the set of actions the remote peer has performed on the connection, as
represented by the HTTP/2-encoded data you just passed to the object.
Thus, you end up with a simple loop (which you may recognise as a more-specific
form of an `event loop`_):
1. First, you perform some actions.
2. You send the data created by performing those actions to the network.
3. You read data from the network.
4. You decode those into events.
5. The events cause you to trigger some actions: go back to step 1.
Of course, HTTP/2 is more complex than that, but in the very simplest case you
can write a fairly effective HTTP/2 tool using just that kind of loop. Later in
this document, we'll do just that.
Some important subtleties of ``H2Connection`` objects are covered in
:doc:`advanced-usage`: see :ref:`h2-connection-advanced` for more information.
However, one subtlety should be covered, and that is this: Hyper-h2's
``H2Connection`` object doesn't do I/O. Let's talk briefly about why.
I/O
~~~
Any useful HTTP/2 tool eventually needs to do I/O. This is because it's not
very useful to be able to speak to other computers using a protocol like HTTP/2
unless you actually *speak* to them sometimes.
However, doing I/O is not a trivial thing: there are lots of different ways to
do it, and once you choose a way to do it your code usually won't work well
with the approaches you *didn't* choose.
While there are lots of different ways to do I/O, when it comes down to it
all HTTP/2 implementations transform bytes received into events, and events
into bytes to send. So there's no reason to have lots of different versions of
this core protocol code: one for Twisted, one for gevent, one for threading,
and one for synchronous code.
This is why we said at the top that Hyper-h2 is a *HTTP/2 Protocol Stack*, not
a *fully-fledged implementation*. Hyper-h2 knows how to transform bytes into
events and back, but that's it. The I/O and smarts might be different, but
the core HTTP/2 logic is the same: that's what Hyper-h2 provides.
Not doing I/O makes Hyper-h2 general, and also relatively simple. It has an
easy-to-understand performance envelope, it's easy to test (and as a result
easy to get correct behaviour out of), and it behaves in a reproducible way.
These are all great traits to have in a library that is doing something quite
complex.
This document will talk you through how to build a relatively simple HTTP/2
implementation using Hyper-h2, to give you an understanding of where it fits in
your software.
.. _h2-events-basic:
Events
------
When writing a HTTP/2 implementation it's important to know what the remote
peer is doing: if you didn't care, writing networked programs would be a lot
easier!
Hyper-h2 encodes the actions of the remote peer in the form of *events*. When
you receive data from the remote peer and pass it into your ``H2Connection``
object (see :ref:`h2-connection-basic`), the ``H2Connection`` returns a list
of objects, each one representing a single event that has occurred. Each
event refers to a single action the remote peer has taken.
Some events are fairly high-level, referring to things that are more general
than HTTP/2: for example, the
:class:`RequestReceived <h2.events.RequestReceived>` event is a general HTTP
concept, not just a HTTP/2 one. Other events are extremely HTTP/2-specific:
for example, :class:`PushedStreamReceived <h2.events.PushedStreamReceived>`
refers to Server Push, a very HTTP/2-specific concept.
The reason these events exist is that Hyper-h2 is intended to be very general.
This means that, in many cases, Hyper-h2 does not know exactly what to do in
response to an event. Your code will need to handle these events, and make
decisions about what to do. That's the major role of any HTTP/2 implementation
built on top of Hyper-h2.
A full list of events is available in :ref:`h2-events-api`. For the purposes
of this example, we will handle only a small set of events.
Writing Your Server
-------------------
Armed with the knowledge you just obtained, we're going to write a very simple
HTTP/2 web server. The goal of this server is to write a server that can handle
a HTTP GET, and that returns the headers sent by the client, encoded in JSON.
Basically, something a lot like `httpbin.org/get`_. Nothing fancy, but this is
a good way to get a handle on how you should interact with Hyper-h2.
For the sake of simplicity, we're going to write this using the Python standard
library, in Python 3. In reality, you'll probably want to use an asynchronous
framework of some kind: see the `examples directory`_ in the repository for
some examples of how you'd do that.
Before we start, create a new file called ``h2server.py``: we'll use that as
our workspace. Additionally, you should install Hyper-h2: follow the
instructions in :doc:`installation`.
Step 1: Sockets
~~~~~~~~~~~~~~~
To begin with, we need to make sure we can listen for incoming data and send it
back. To do that, we need to use the `standard library's socket module`_. For
now we're going to skip doing TLS: if you want to reach your server from your
web browser, though, you'll need to add TLS and some other function. Consider
looking at our examples in our `examples directory`_ instead.
Let's begin. First, open up ``h2server.py``. We need to import the socket
module and start listening for connections.
This is not a socket tutorial, so we're not going to dive too deeply into how
this works. If you want more detail about sockets, there are lots of good
tutorials on the web that you should investigate.
When you want to listen for incoming connections, the you need to *bind* an
address first. So let's do that. Try setting up your file to look like this:
.. code-block:: python
import socket
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('0.0.0.0', 8080))
sock.listen(5)
while True:
print(sock.accept())
In a shell window, execute this program (``python h2server.py``). Then, open
another shell and run ``curl http://localhost:8080/``. In the first shell, you
should see something like this:
.. code-block:: console
$ python h2server.py
(<socket.socket fd=4, family=AddressFamily.AF_INET, type=SocketKind.SOCK_STREAM, proto=0, laddr=('127.0.0.1', 8080), raddr=('127.0.0.1', 58800)>, ('127.0.0.1', 58800))
Run that ``curl`` command a few more times. You should see a few more similar
lines appear. Note that the ``curl`` command itself will exit with an error.
That's fine: it happens because we didn't send any data.
Now go ahead and stop the server running by hitting Ctrl+C in the first shell.
You should see a ``KeyboardInterrupt`` error take the process down.
What's the program above doing? Well, first it creates a
:func:`socket <python:socket.socket>` object. This socket is then *bound* to
a specific address: ``('0.0.0.0', 8080)``. This is a special address: it means
that this socket should be listening for any traffic to TCP port 8080. Don't
worry about the call to ``setsockopt``: it just makes sure you can run this
program repeatedly.
We then loop forever calling the :meth:`accept <python:socket.socket.accept>`
method on the socket. The accept method blocks until someone attempts to
connect to our TCP port: when they do, it returns a tuple: the first element is
a new socket object, the second element is a tuple of the address the new
connection is from. You can see this in the output from our ``h2server.py``
script.
At this point, we have a script that can accept inbound connections. This is a
good start! Let's start getting HTTP/2 involved.
Step 2: Add a H2Connection
~~~~~~~~~~~~~~~~~~~~~~~~~~
Now that we can listen for socket information, we want to prepare our HTTP/2
connection object and start handing it data. For now, let's just see what
happens as we feed it data.
To make HTTP/2 connections, we need a tool that knows how to speak HTTP/2.
Most versions of curl in the wild don't, so let's install a Python tool. In
your Python environment, run ``pip install hyper``. This will install a Python
command-line HTTP/2 tool called ``hyper``. To confirm that it works, try
running this command and verifying that the output looks similar to the one
shown below:
.. code-block:: console
$ hyper GET http://http2bin.org/get
{'args': {},
'headers': {'Connection': 'keep-alive',
'Host': 'http2bin.org',
'Via': '2 http2bin.org'},
'origin': '10.0.0.2',
'url': 'http://http2bin.org/get'}
Assuming it works, you're now ready to start sending HTTP/2 data.
Back in our ``h2server.py`` script, we're going to want to start handling data.
Let's add a function that takes a socket returned from ``accept``, and reads
data from it. Let's call that function ``handle``. That function should create
a :class:`H2Connection <h2.connection.H2Connection>` object and then loop on
the socket, reading data and passing it to the connection.
To read data from a socket we need to call ``recv``. The ``recv`` function
takes a number as its argument, which is the *maximum* amount of data to be
returned from a single call (note that ``recv`` will return as soon as any data
is available, even if that amount is vastly less than the number you passed to
it). For the purposes of writing this kind of software the specific value is
not enormously useful, but should not be overly large. For that reason, when
you're unsure, a number like 4096 or 65535 is a good bet. We'll use 65535 for
this example.
The function should look something like this:
.. code-block:: python
import h2.connection
def handle(sock):
conn = h2.connection.H2Connection(client_side=False)
while True:
data = sock.recv(65535)
print(conn.receive_data(data))
Let's update our main loop so that it passes data on to our new data handling
function. Your ``h2server.py`` should end up looking a like this:
.. code-block:: python
import socket
import h2.connection
def handle(sock):
conn = h2.connection.H2Connection(client_side=False)
while True:
data = sock.recv(65535)
if not data:
break
print(conn.receive_data(data))
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('0.0.0.0', 8080))
sock.listen(5)
while True:
handle(sock.accept()[0])
Running that in one shell, in your other shell you can run
``hyper --h2 GET http://localhost:8080/``. That shell should hang, and you
should then see the following output from your ``h2server.py`` shell:
.. code-block:: console
$ python h2server.py
[<h2.events.RemoteSettingsChanged object at 0x10c4ee390>]
You'll then need to kill ``hyper`` and ``h2server.py`` with Ctrl+C. Feel free
to do this a few times, to see how things behave.
So, what did we see here? When the connection was opened, we used the
:meth:`recv <python:socket.socket.recv>` method to read some data from the
socket, in a loop. We then passed that data to the connection object, which
returned us a single event object:
:class:`RemoteSettingsChanged <h2.events.RemoteSettingsChanged>`.
But what we didn't see was anything else. So it seems like all ``hyper`` did
was change its settings, but nothing else. If you look at the other ``hyper``
window, you'll notice that it hangs for a while and then eventually fails with
a socket timeout. It was waiting for something: what?
Well, it turns out that at the start of a connection, both sides need to send
a bit of data, called "the HTTP/2 preamble". We don't need to get into too much
detail here, but basically both sides need to send a single block of HTTP/2
data that tells the other side what their settings are. ``hyper`` did that,
but we didn't.
Let's do that next.
Step 3: Sending the Preamble
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Hyper-h2 makes doing connection setup really easy. All you need to do is call
the
:meth:`initiate_connection <h2.connection.H2Connection.initiate_connection>`
method, and then send the corresponding data. Let's update our ``handle``
function to do just that:
.. code-block:: python
def handle(sock):
conn = h2.connection.H2Connection(client_side=False)
conn.initiate_connection()
sock.sendall(conn.data_to_send())
while True:
data = sock.recv(65535)
print(conn.receive_data(data))
The big change here is the call to ``initiate_connection``, but there's another
new method in there:
:meth:`data_to_send <h2.connection.H2Connection.data_to_send>`.
When you make function calls on your ``H2Connection`` object, these will often
want to cause HTTP/2 data to be written out to the network. But Hyper-h2
doesn't do any I/O, so it can't do that itself. Instead, it writes it to an
internal buffer. You can retrieve data from this buffer using the
``data_to_send`` method. There are some subtleties about that method, but we
don't need to worry about them right now: all we need to do is make sure we're
sending whatever data is outstanding.
Your ``h2server.py`` script should now look like this:
.. code-block:: python
import socket
import h2.connection
def handle(sock):
conn = h2.connection.H2Connection(client_side=False)
conn.initiate_connection()
sock.sendall(conn.data_to_send())
while True:
data = sock.recv(65535)
if not data:
break
print(conn.receive_data(data))
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('0.0.0.0', 8080))
sock.listen(5)
while True:
handle(sock.accept()[0])
With this change made, rerun your ``h2server.py`` script and hit it with the
same ``hyper`` command: ``hyper --h2 GET http://localhost:8080/``. The
``hyper`` command still hangs, but this time we get a bit more output from our
``h2server.py`` script:
.. code-block:: console
$ python h2server.py
[<h2.events.RemoteSettingsChanged object at 0x10292d390>]
[<h2.events.SettingsAcknowledged object at 0x102b3a160>]
[<h2.events.RequestReceived object at 0x102b3a3c8>, <h2.events.StreamEnded object at 0x102b3a400>]
So, what's happening?
The first thing to note is that we're going around our loop more than once now.
First, we receive some data that triggers a
:class:`RemoteSettingsChanged <h2.events.RemoteSettingsChanged>` event.
Then, we get some more data that triggers a
:class:`SettingsAcknowledged <h2.events.SettingsAcknowledged>` event.
Finally, even more data that triggers *two* events:
:class:`RequestReceived <h2.events.RequestReceived>` and
:class:`StreamEnded <h2.events.StreamEnded>`.
So, what's happening is that ``hyper`` is telling us about its settings,
acknowledging ours, and then sending us a request. Then it ends a *stream*,
which is a HTTP/2 communications channel that holds a request and response
pair.
A stream isn't done until it's either *reset* or both sides *close* it:
in this sense it's bi-directional. So what the ``StreamEnded`` event tells us
is that ``hyper`` is closing its half of the stream: it won't send us any more
data on that stream. That means the request is done.
So why is ``hyper`` hanging? Well, we haven't sent a response yet: let's do
that.
Step 4: Handling Events
~~~~~~~~~~~~~~~~~~~~~~~
What we want to do is send a response when we receive a request. Happily, we
get an event when we receive a request, so we can use that to be our signal.
Let's define a new function that sends a response. For now, this response can
just be a little bit of data that prints "it works!".
The function should take the ``H2Connection`` object, and the event that
signaled the request. Let's define it.
.. code-block:: python
def send_response(conn, event):
stream_id = event.stream_id
conn.send_headers(
stream_id=stream_id,
headers=[
(':status', '200'),
('server', 'basic-h2-server/1.0')
],
)
conn.send_data(
stream_id=stream_id,
data=b'it works!',
end_stream=True
)
So while this is only a short function, there's quite a lot going on here we
need to unpack. Firstly, what's a stream ID? Earlier we discussed streams
briefly, to say that they're a bi-directional communications channel that holds
a request and response pair. Part of what makes HTTP/2 great is that there can
be lots of streams going on at once, sending and receiving different requests
and responses. To identify each stream, we use a *stream ID*. These are unique
across the lifetime of a connection, and they go in ascending order.
Most ``H2Connection`` functions take a stream ID: they require you to actively
tell the connection which one to use. In this case, as a simple server, we will
never need to choose a stream ID ourselves: the client will always choose one
for us. That means we'll always be able to get the one we need off the events
that fire.
Next, we send some *headers*. In HTTP/2, a response is made up of some set of
headers, and optionally some data. The headers have to come first: if you're a
client then you'll be sending *request* headers, but in our case these headers
are our *response* headers.
Mostly these aren't very exciting, but you'll notice once special header in
there: ``:status``. This is a HTTP/2-specific header, and it's used to hold the
HTTP status code that used to go at the top of a HTTP response. Here, we're
saying the response is ``200 OK``, which is successful.
To send headers in Hyper-h2, you use the
:meth:`send_headers <h2.connection.H2Connection.send_headers>` function.
Next, we want to send the body data. To do that, we use the
:meth:`send_data <h2.connection.H2Connection.send_data>` function. This also
takes a stream ID. Note that the data is binary: Hyper-h2 does not work with
unicode strings, so you *must* pass bytestrings to the ``H2Connection``. The
one exception is headers: Hyper-h2 will automatically encode those into UTF-8.
The last thing to note is that on our call to ``send_data``, we set
``end_stream`` to ``True``. This tells Hyper-h2 (and the remote peer) that
we're done with sending data: the response is over. Because we know that
``hyper`` will have ended its side of the stream, when we end ours the stream
will be totally done with.
We're nearly ready to go with this: we just need to plumb this function in.
Let's amend our ``handle`` function again:
.. code-block:: python
import h2.events
def handle(sock):
conn = h2.connection.H2Connection(client_side=False)
conn.initiate_connection()
sock.sendall(conn.data_to_send())
while True:
data = sock.recv(65535)
if not data:
break
events = conn.receive_data(data)
for event in events:
if isinstance(event, h2.events.RequestReceived):
send_response(conn, event)
data_to_send = conn.data_to_send()
if data_to_send:
sock.sendall(data_to_send)
The changes here are all at the end. Now, when we receive some events, we
look through them for the ``RequestReceived`` event. If we find it, we make
sure we send a response.
Then, at the bottom of the loop we check whether we have any data to send, and
if we do, we send it. Then, we repeat again.
With these changes, your ``h2server.py`` file should look like this:
.. code-block:: python
import socket
import h2.connection
import h2.events
def send_response(conn, event):
stream_id = event.stream_id
conn.send_headers(
stream_id=stream_id,
headers=[
(':status', '200'),
('server', 'basic-h2-server/1.0')
],
)
conn.send_data(
stream_id=stream_id,
data=b'it works!',
end_stream=True
)
def handle(sock):
conn = h2.connection.H2Connection(client_side=False)
conn.initiate_connection()
sock.sendall(conn.data_to_send())
while True:
data = sock.recv(65535)
if not data:
break
events = conn.receive_data(data)
for event in events:
if isinstance(event, h2.events.RequestReceived):
send_response(conn, event)
data_to_send = conn.data_to_send()
if data_to_send:
sock.sendall(data_to_send)
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('0.0.0.0', 8080))
sock.listen(5)
while True:
handle(sock.accept()[0])
Alright. Let's run this, and then run our ``hyper`` command again.
This time, nothing is printed from our server, and the ``hyper`` side prints
``it works!``. Success! Try running it a few more times, and we can see that
not only does it work the first time, it works the other times too!
We can speak HTTP/2! Let's add the final step: returning the JSON-encoded
request headers.
Step 5: Returning Headers
~~~~~~~~~~~~~~~~~~~~~~~~~
If we want to return the request headers in JSON, the first thing we have to do
is find them. Handily, if you check the documentation for
:class:`RequestReceived <h2.events.RequestReceived>` you'll find that this
event carries, in addition to the stream ID, the request headers.
This means we can make a really simple change to our ``send_response``
function to take those headers and encode them as a JSON object. Let's do that:
.. code-block:: python
import json
def send_response(conn, event):
stream_id = event.stream_id
response_data = json.dumps(dict(event.headers)).encode('utf-8')
conn.send_headers(
stream_id=stream_id,
headers=[
(':status', '200'),
('server', 'basic-h2-server/1.0'),
('content-length', str(len(response_data))),
('content-type', 'application/json'),
],
)
conn.send_data(
stream_id=stream_id,
data=response_data,
end_stream=True
)
This is a really simple change, but it's all we need to do: a few extra headers
and the JSON dump, but that's it.
Section 6: Bringing It All Together
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This should be all we need!
Let's take all the work we just did and throw that into our ``h2server.py``
file, which should now look like this:
.. code-block:: python
import json
import socket
import h2.connection
import h2.events
def send_response(conn, event):
stream_id = event.stream_id
response_data = json.dumps(dict(event.headers)).encode('utf-8')
conn.send_headers(
stream_id=stream_id,
headers=[
(':status', '200'),
('server', 'basic-h2-server/1.0'),
('content-length', str(len(response_data))),
('content-type', 'application/json'),
],
)
conn.send_data(
stream_id=stream_id,
data=response_data,
end_stream=True
)
def handle(sock):
conn = h2.connection.H2Connection(client_side=False)
conn.initiate_connection()
sock.sendall(conn.data_to_send())
while True:
data = sock.recv(65535)
if not data:
break
events = conn.receive_data(data)
for event in events:
if isinstance(event, h2.events.RequestReceived):
send_response(conn, event)
data_to_send = conn.data_to_send()
if data_to_send:
sock.sendall(data_to_send)
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('0.0.0.0', 8080))
sock.listen(5)
while True:
handle(sock.accept()[0])
Now, execute ``h2server.py`` and then point ``hyper`` at it again. You should
see something like the following output from ``hyper``:
.. code-block:: console
$ hyper --h2 GET http://localhost:8080/
{":scheme": "http", ":authority": "localhost", ":method": "GET", ":path": "/"}
Here you can see the HTTP/2 request 'special headers' that ``hyper`` sends.
These are similar to the ``:status`` header we have to send on our response:
they encode important parts of the HTTP request in a clearly-defined way. If
you were writing a client stack using Hyper-h2, you'd need to make sure you
were sending those headers.
Congratulations!
~~~~~~~~~~~~~~~~
Congratulations! You've written your first HTTP/2 server! If you want to extend
it, there are a few directions you could investigate:
- We didn't handle a few events that we saw were being raised: you could add
some methods to handle those appropriately.
- Right now our server is single threaded, so it can only handle one client at
a time. Consider rewriting this server to use threads, or writing this
server again using your favourite asynchronous programming framework.
If you plan to use threads, you should know that a ``H2Connection`` object is
deliberately not thread-safe. As a possible design pattern, consider creating
threads and passing the sockets returned by ``accept`` to those threads, and
then letting those threads create their own ``H2Connection`` objects.
- Take a look at some of our long-form code examples in :doc:`examples`.
- Alternatively, try playing around with our examples in our repository's
`examples directory`_. These examples are a bit more fully-featured, and can
be reached from your web browser. Try adjusting what they do, or adding new
features to them!
- You may want to make this server reachable from your web browser. To do that,
you'll need to add proper TLS support to your server. This can be tricky, and
in many cases requires `PyOpenSSL`_ in addition to the other libraries you
have installed. Check the `Eventlet example`_ to see what PyOpenSSL code is
required to TLS-ify your server.
.. _event loop: https://en.wikipedia.org/wiki/Event_loop
.. _httpbin.org/get: https://httpbin.org/get
.. _examples directory: https://github.com/python-hyper/hyper-h2/tree/master/examples
.. _standard library's socket module: https://docs.python.org/3.5/library/socket.html
.. _Application Layer Protocol Negotiation: https://en.wikipedia.org/wiki/Application-Layer_Protocol_Negotiation
.. _get your certificate here: https://raw.githubusercontent.com/python-hyper/hyper-h2/master/examples/twisted/server.crt
.. _get your private key here: https://raw.githubusercontent.com/python-hyper/hyper-h2/master/examples/twisted/server.key
.. _PyOpenSSL: http://pyopenssl.readthedocs.org/
.. _Eventlet example: https://github.com/python-hyper/hyper-h2/blob/master/examples/eventlet/eventlet-server.py

Просмотреть файл

@ -1,270 +0,0 @@
# -*- coding: utf-8 -*-
#
# hyper-h2 documentation build configuration file, created by
# sphinx-quickstart on Thu Sep 17 10:06:02 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'hyper-h2'
copyright = u'2015, Cory Benfield'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.0.1'
# The full version, including alpha/beta/rc tags.
release = '3.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'hyper-h2doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'hyper-h2.tex', u'hyper-h2 Documentation',
u'Cory Benfield', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'hyper-h2', u'hyper-h2 Documentation',
[u'Cory Benfield'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'hyper-h2', u'hyper-h2 Documentation',
u'Cory Benfield', 'hyper-h2', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3.5/', None),
'hpack': ('https://python-hyper.org/hpack/en/stable/', None),
'pyopenssl': ('https://pyopenssl.readthedocs.org/en/latest/', None),
}

Просмотреть файл

@ -1,4 +0,0 @@
Contributors
============
.. include:: ../../CONTRIBUTORS.rst

Просмотреть файл

@ -1,17 +0,0 @@
Curio Example Server
====================
This example is a basic HTTP/2 server written using `curio`_, David Beazley's
example of how to build a concurrent networking framework using Python 3.5's
new ``async``/``await`` syntax.
This example is notable for demonstrating the correct use of HTTP/2 flow
control with Hyper-h2. It is also a good example of the brand new syntax.
.. literalinclude:: ../../examples/curio/curio-server.py
:language: python
:linenos:
:encoding: utf-8
.. _curio: https://curio.readthedocs.org/en/latest/

Просмотреть файл

@ -1,19 +0,0 @@
Eventlet Example Server
=======================
This example is a basic HTTP/2 server written using the `eventlet`_ concurrent
networking framework. This example is notable for demonstrating how to
configure `PyOpenSSL`_, which `eventlet`_ uses for its TLS layer.
In terms of HTTP/2 functionality, this example is very simple: it returns the
request headers as a JSON document to the caller. It does not obey HTTP/2 flow
control, which is a flaw, but it is otherwise functional.
.. literalinclude:: ../../examples/eventlet/eventlet-server.py
:language: python
:linenos:
:encoding: utf-8
.. _eventlet: http://eventlet.net/
.. _PyOpenSSL: https://pyopenssl.readthedocs.org/en/stable/

Просмотреть файл

@ -1,28 +0,0 @@
Code Examples
=============
This section of the documentation contains long-form code examples. These are
intended as references for developers that would like to get an understanding
of how Hyper-h2 fits in with various Python I/O frameworks.
Example Servers
---------------
.. toctree::
:maxdepth: 2
asyncio-example
twisted-example
eventlet-example
curio-example
tornado-example
wsgi-example
Example Clients
---------------
.. toctree::
:maxdepth: 2
twisted-head-example
twisted-post-example

Просмотреть файл

@ -1,41 +0,0 @@
.. hyper-h2 documentation master file, created by
sphinx-quickstart on Thu Sep 17 10:06:02 2015.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Hyper-h2: A pure-Python HTTP/2 protocol stack
=============================================
Hyper-h2 is a HTTP/2 protocol stack, written entirely in Python. The goal of
Hyper-h2 is to be a common HTTP/2 stack for the Python ecosystem,
usable in all programs regardless of concurrency model or environment.
To achieve this, Hyper-h2 is entirely self-contained: it does no I/O of any
kind, leaving that up to a wrapper library to control. This ensures that it can
seamlessly work in all kinds of environments, from single-threaded code to
Twisted.
Its goal is to be 100% compatible with RFC 7540, implementing a complete HTTP/2
protocol stack build on a set of finite state machines. Its secondary goals are
to be fast, clear, and efficient.
For usage examples, see :doc:`basic-usage` or consult the examples in the
repository.
Contents
--------
.. toctree::
:maxdepth: 2
installation
basic-usage
negotiating-http2
examples
advanced-usage
low-level
api
testimonials
release-process
release-notes
contributors

Просмотреть файл

@ -1,18 +0,0 @@
Installation
============
Hyper-h2 is a pure-python project. This means installing it is extremely
simple. To get the latest release from PyPI, simply run:
.. code-block:: console
$ pip install h2
Alternatively, feel free to download one of the release tarballs from
`our GitHub page`_, extract it to your favourite directory, and then run
.. code-block:: console
$ python setup.py install
.. _our GitHub page: https://github.com/python-hyper/hyper-h2

Просмотреть файл

@ -1,159 +0,0 @@
Low-Level Details
=================
.. warning:: This section of the documentation covers low-level implementation
details of hyper-h2. This is most likely to be of use to hyper-h2
developers and to other HTTP/2 implementers, though it could well
be of general interest. Feel free to peruse it, but if you're
looking for information about how to *use* hyper-h2 you should
consider looking elsewhere.
State Machines
--------------
hyper-h2 is fundamentally built on top of a pair of interacting Finite State
Machines. One of these FSMs manages per-connection state, and another manages
per-stream state. Almost without exception (see :ref:`priority` for more
details) every single frame is unconditionally translated into events for
both state machines and those state machines are turned.
The advantages of a system such as this is that the finite state machines can
very densely encode the kinds of things that are allowed at any particular
moment in a HTTP/2 connection. However, most importantly, almost all protocols
are defined *in terms* of finite state machines: that is, protocol descriptions
can be reduced to a number of states and inputs. That makes FSMs a very natural
tool for implementing protocol stacks.
Indeed, most protocol implementations that do not explicitly encode a finite
state machine almost always *implicitly* encode a finite state machine, by
using classes with a bunch of variables that amount to state-tracking
variables, or by using the call-stack as an implicit state tracking mechanism.
While these methods are not immediately problematic, they tend to lack
*explicitness*, and can lead to subtle bugs of the form "protocol action X is
incorrectly allowed in state Y".
For these reasons, we have implemented two *explicit* finite state machines.
These machines aim to encode most of the protocol-specific state, in particular
regarding what frame is allowed at what time. This target goal is sometimes not
achieved: in particular, as of this writing the *stream* FSM contains a number
of other state variables that really ought to be rolled into the state machine
itself in the form of new states, or in the form of a transformation of the
FSM to use state *vectors* instead of state *scalars*.
The following sections contain some implementers notes on these FSMs.
Connection State Machine
~~~~~~~~~~~~~~~~~~~~~~~~
The "outer" state machine, the first one that is encountered when sending or
receiving data, is the connection state machine. This state machine tracks
whole-connection state.
This state machine is primarily intended to forbid certain actions on the basis
of whether the implementation is acting as a client or a server. For example,
clients are not permitted to send ``PUSH_PROMISE`` frames: this state machine
forbids that by refusing to define a valid transition from the ``CLIENT_OPEN``
state for the ``SEND_PUSH_PROMISE`` event.
Otherwise, this particular state machine triggers no side-effects. It has a
very coarse, high-level, functionality.
A visual representation of this FSM is shown below:
.. image:: _static/h2.connection.H2ConnectionStateMachine.dot.png
:alt: A visual representation of the connection FSM.
:target: _static/h2.connection.H2ConnectionStateMachine.dot.png
.. _stream-state-machine:
Stream State Machine
~~~~~~~~~~~~~~~~~~~~
Once the connection state machine has been spun, any frame that belongs to a
stream is passed to the stream state machine for its given stream. Each stream
has its own instance of the state machine, but all of them share the transition
table: this is because the table itself is sufficiently large that having it be
per-instance would be a ridiculous memory overhead.
Unlike the connection state machine, the stream state machine is quite complex.
This is because it frequently needs to encode some side-effects. The most
common side-effect is emitting a ``RST_STREAM`` frame when an error is
encountered: the need to do this means that far more transitions need to be
encoded than for the connection state machine.
Many of the side-effect functions in this state machine also raise
:class:`ProtocolError <h2.exceptions.ProtocolError>` exceptions. This is almost
always done on the basis of an extra state variable, which is an annoying code
smell: it should always be possible for the state machine itself to police
these using explicit state management. A future refactor will hopefully address
this problem by making these additional state variables part of the state
definitions in the FSM, which will lead to an expansion of the number of states
but a greater degree of simplicity in understanding and tracking what is going
on in the state machine.
The other action taken by the side-effect functions defined here is returning
:ref:`events <h2-events-basic>`. Most of these events are returned directly to
the user, and reflect the specific state transition that has taken place, but
some of the events are purely *internal*: they are used to signal to other
parts of the hyper-h2 codebase what action has been taken.
The major use of the internal events functionality at this time is for
validating header blocks: there are different rules for request headers than
there are for response headers, and different rules again for trailers. The
internal events are used to determine *exactly what* kind of data the user is
attempting to send, and using that information to do the correct kind of
validation. This approach ensures that the final source of truth about what's
happening at the protocol level lives inside the FSM, which is an extremely
important design principle we want to continue to enshrine in hyper-h2.
A visual representation of this FSM is shown below:
.. image:: _static/h2.stream.H2StreamStateMachine.dot.png
:alt: A visual representation of the stream FSM.
:target: _static/h2.stream.H2StreamStateMachine.dot.png
.. _priority:
Priority
~~~~~~~~
In the :ref:`stream-state-machine` section we said that any frame that belongs
to a stream is passed to the stream state machine. This turns out to be not
quite true.
Specifically, while ``PRIORITY`` frames are technically sent on a given stream
(that is, `RFC 7540 Section 6.3`_ defines them as "always identifying a stream"
and forbids the use of stream ID ``0`` for them), in practice they are almost
completely exempt from the usual stream FSM behaviour. Specifically, the RFC
has this to say:
The ``PRIORITY`` frame can be sent on a stream in any state, though it
cannot be sent between consecutive frames that comprise a single
header block (Section 4.3).
Given that the consecutive header block requirement is handled outside of the
FSMs, this section of the RFC essentially means that there is *never* a
situation where it is invalid to receive a ``PRIORITY`` frame. This means that
including it in the stream FSM would require that we allow ``SEND_PRIORITY``
and ``RECV_PRIORITY`` in all states.
This is not a totally onerous task: however, another key note is that hyper-h2
uses the *absence* of a stream state machine to flag a closed stream. This is
primarily for memory conservation reasons: if we needed to keep around an FSM
for every stream we've ever seen, that would cause long-lived HTTP/2
connections to consume increasingly large amounts of memory. On top of this,
it would require us to create a stream FSM each time we received a ``PRIORITY``
frame for a given stream, giving a malicious peer an easy route to force a
hyper-h2 user to allocate nearly unbounded amounts of memory.
For this reason, hyper-h2 circumvents the stream FSM entirely for ``PRIORITY``
frames. Instead, these frames are treated as being connection-level frames that
*just happen* to identify a specific stream. They do not bring streams into
being, or in any sense interact with hyper-h2's view of streams. Their stream
details are treated as strictly metadata that hyper-h2 is not interested in
beyond being able to parse it out.
.. _RFC 7540 Section 6.3: https://tools.ietf.org/html/rfc7540#section-6.3

Просмотреть файл

@ -1,100 +0,0 @@
Negotiating HTTP/2
==================
`RFC 7540`_ specifies three methods of negotiating HTTP/2 connections. This document outlines how to use Hyper-h2 with each one.
.. _starting-alpn:
HTTPS URLs (ALPN and NPN)
-------------------------
Starting HTTP/2 for HTTPS URLs is outlined in `RFC 7540 Section 3.3`_. In this case, the client and server use a TLS extension to negotiate HTTP/2: typically either or both of `NPN`_ or `ALPN`_. How to use NPN and ALPN is currently not covered in this document: please consult the documentation for either the :mod:`ssl module <python:ssl>` in the standard library, or the :mod:`PyOpenSSL <pyopenssl:OpenSSL.SSL>` third-party modules, for more on this topic.
This method is the simplest to use once the TLS connection is established. To use it with Hyper-h2, after you've established the connection and confirmed that HTTP/2 has been negotiated with `ALPN`_, create a :class:`H2Connection <h2.connection.H2Connection>` object and call :meth:`H2Connection.initiate_connection <h2.connection.H2Connection.initiate_connection>`. This will ensure that the appropriate preamble data is placed in the data buffer. You should then immediately send the data returned by :meth:`H2Connection.data_to_send <h2.connection.H2Connection.data_to_send>` on your TLS connection.
At this point, you're free to use all the HTTP/2 functionality provided by Hyper-h2.
Server Setup Example
~~~~~~~~~~~~~~~~~~~~
This example uses the APIs as defined in Python 3.5. If you are using an older version of Python you may not have access to the APIs used here. As noted above, please consult the documentation for the :mod:`ssl module <python:ssl>` to confirm.
.. literalinclude:: ../../examples/fragments/server_https_setup_fragment.py
:language: python
:linenos:
:encoding: utf-8
Client Setup Example
~~~~~~~~~~~~~~~~~~~~
The client example is very similar to the server example above. The :class:`SSLContext <python:ssl.SSLContext>` object requires some minor changes, as does the :class:`H2Connection <h2.connection.H2Connection>`, but the bulk of the code is the same.
.. literalinclude:: ../../examples/fragments/client_https_setup_fragment.py
:language: python
:linenos:
:encoding: utf-8
.. _starting-upgrade:
HTTP URLs (Upgrade)
-------------------
Starting HTTP/2 for HTTP URLs is outlined in `RFC 7540 Section 3.2`_. In this case, the client and server use the HTTP Upgrade mechanism originally described in `RFC 7230 Section 6.7`_. The client sends its initial HTTP/1.1 request with two extra headers. The first is ``Upgrade: h2c``, which requests upgrade to cleartext HTTP/2. The second is a ``HTTP2-Settings`` header, which contains a specially formatted string that encodes a HTTP/2 Settings frame.
To do this with Hyper-h2 you have two slightly different flows: one for clients, one for servers.
Clients
~~~~~~~
For a client, when sending the first request you should manually add your ``Upgrade`` header. You should then create a :class:`H2Connection <h2.connection.H2Connection>` object and call :meth:`H2Connection.initiate_upgrade_connection <h2.connection.H2Connection.initiate_upgrade_connection>` with no arguments. This method will return a bytestring to use as the value of your ``HTTP2-Settings`` header.
If the server returns a ``101`` status code, it has accepted the upgrade, and you should immediately send the data returned by :meth:`H2Connection.data_to_send <h2.connection.H2Connection.data_to_send>`. Now you should consume the entire ``101`` header block. All data after the ``101`` header block is HTTP/2 data that should be fed directly to :meth:`H2Connection.receive_data <h2.connection.H2Connection.receive_data>` and handled as normal with Hyper-h2.
If the server does not return a ``101`` status code then it is not upgrading. Continue with HTTP/1.1 as normal: you may throw away your :class:`H2Connection <h2.connection.H2Connection>` object, as it is of no further use.
The server will respond to your original request in HTTP/2. Please pay attention to the events received from Hyper-h2, as they will define the server's response.
Client Example
^^^^^^^^^^^^^^
The code below demonstrates how to handle a plaintext upgrade from the perspective of the client. For the purposes of keeping the example code as simple and generic as possible it uses the synchronous socket API that comes with the Python standard library: if you want to use asynchronous I/O, you will need to translate this code to the appropriate idiom.
.. literalinclude:: ../../examples/fragments/client_upgrade_fragment.py
:language: python
:linenos:
:encoding: utf-8
Servers
~~~~~~~
If the first request you receive on a connection from the client contains an ``Upgrade`` header with the ``h2c`` token in it, and you're willing to upgrade, you should create a :class:`H2Connection <h2.connection.H2Connection>` object and call :meth:`H2Connection.initiate_upgrade_connection <h2.connection.H2Connection.initiate_upgrade_connection>` with the value of the ``HTTP2-Settings`` header (as a bytestring) as the only argument.
Then, you should send back a ``101`` response that contains ``h2c`` in the ``Upgrade`` header. That response will inform the client that you're switching to HTTP/2. Then, you should immediately send the data that is returned to you by :meth:`H2Connection.data_to_send <h2.connection.H2Connection.data_to_send>` on the connection: this is a necessary part of the HTTP/2 upgrade process.
At this point, you may now respond to the original HTTP/1.1 request in HTTP/2 by calling the appropriate methods on the :class:`H2Connection <h2.connection.H2Connection>` object. No further HTTP/1.1 may be sent on this connection: from this point onward, all data sent by you and the client will be HTTP/2 data.
Server Example
^^^^^^^^^^^^^^
The code below demonstrates how to handle a plaintext upgrade from the perspective of the server. For the purposes of keeping the example code as simple and generic as possible it uses the synchronous socket API that comes with the Python standard library: if you want to use asynchronous I/O, you will need to translate this code to the appropriate idiom.
.. literalinclude:: ../../examples/fragments/server_upgrade_fragment.py
:language: python
:linenos:
:encoding: utf-8
Prior Knowledge
---------------
It's possible that you as a client know that a particular server supports HTTP/2, and that you do not need to perform any of the negotiations described above. In that case, you may follow the steps in :ref:`starting-alpn`, ignoring all references to ALPN and NPN: there's no need to perform the upgrade dance described in :ref:`starting-upgrade`.
.. _RFC 7540: https://tools.ietf.org/html/rfc7540
.. _RFC 7540 Section 3.2: https://tools.ietf.org/html/rfc7540#section-3.2
.. _RFC 7540 Section 3.3: https://tools.ietf.org/html/rfc7540#section-3.3
.. _NPN: https://en.wikipedia.org/wiki/Application-Layer_Protocol_Negotiation
.. _ALPN: https://en.wikipedia.org/wiki/Application-Layer_Protocol_Negotiation
.. _RFC 7230 Section 6.7: https://tools.ietf.org/html/rfc7230#section-6.7

Просмотреть файл

@ -1,101 +0,0 @@
Release Notes
=============
This document contains release notes for Hyper-h2. In addition to the
:ref:`detailed-release-notes` found at the bottom of this document, this
document also includes a high-level prose overview of each major release after
1.0.0.
High Level Notes
----------------
3.0.0: 24 March 2017
~~~~~~~~~~~~~~~~~~~~
The Hyper-h2 team and the Hyper project are delighted to announce the release
of Hyper-h2 version 3.0.0! Unlike the really notable 2.0.0 release, this
release is proportionally quite small: however, it has the effect of removing a
lot of cruft and complexity that has built up in the codebase over the lifetime
of the v2 release series.
This release was motivated primarily by discovering that applications that
attempted to use both HTTP/1.1 and HTTP/2 using hyper-h2 would encounter
problems with cookies, because hyper-h2 did not join together cookie headers as
required by RFC 7540. Normally adding such behaviour would be a non-breaking
change, but we previously had no flags to prevent normalization of received
HTTP headers.
Because it makes no sense for the cookie to be split *by default*, we needed to
add a controlling flag and set it to true. The breaking nature of this change
is very subtle, and it's possible most users would never notice, but
nevertheless it *is* a breaking change and we need to treat it as such.
Happily, we can take this opportunity to finalise a bunch of deprecations we'd
made over the past year. The v2 release series was long-lived and successful,
having had a series of releases across the past year-and-a-bit, and the Hyper
team are very proud of it. However, it's time to open a new chapter, and remove
the deprecated code.
The past year has been enormously productive for the Hyper team. A total of 30
v2 releases were made, an enormous amount of work. A good number of people have
made their first contribution in this time, more than I can thank reasonably
without taking up an unreasonable amount of space in this document, so instead
I invite you to check out `our awesome contributor list`_.
We're looking forward to the next chapter in hyper-h2: it's been a fun ride so
far, and we hope even more of you come along and join in the fun over the next
year!
.. _our awesome contributor list: https://github.com/python-hyper/hyper-h2/graphs/contributors
2.0.0: 25 January 2016
~~~~~~~~~~~~~~~~~~~~~~
The Hyper-h2 team and the Hyper project are delighted to announce the release
of Hyper-h2 version 2.0.0! This is an enormous release that contains a gigantic
collection of new features and fixes, with the goal of making it easier than
ever to use Hyper-h2 to build a compliant HTTP/2 server or client.
An enormous chunk of this work has been focused on tighter enforcement of
restrictions in RFC 7540, ensuring that we correctly police the actions of
remote peers, and error appropriately when those peers violate the
specification. Several of these constitute breaking changes, because data that
was previously received and handled without obvious error now raises
``ProtocolError`` exceptions and causes the connection to be terminated.
Additionally, the public API was cleaned up and had several helper methods that
had been inavertently exposed removed from the public API. The team wants to
stress that while Hyper-h2 follows semantic versioning, the guarantees of
semver apply only to the public API as documented in :doc:`api`. Reducing the
surface area of these APIs makes it easier for us to continue to ensure that
the guarantees of semver are respected on our public API.
We also attempted to clear up some of the warts that had appeared in the API,
and add features that are helpful for implementing HTTP/2 endpoints. For
example, the :class:`H2Connection <h2.connection.H2Connection>` object now
exposes a method for generating the next stream ID that your client or server
can use to initiate a connection (:meth:`get_next_available_stream_id
<h2.connection.H2Connection.get_next_available_stream_id>`). We also removed
some needless return values that were guaranteed to return empty lists, which
were an attempt to make a forward-looking guarantee that was entirely unneeded.
Altogether, this has been an extremely productive period for Hyper-h2, and a
lot of great work has been done by the community. To that end, we'd also like
to extend a great thankyou to those contributors who made their first contribution
to the project between release 1.0.0 and 2.0.0. Many thanks to:
`Thomas Kriechbaumer`_, `Alex Chan`_, `Maximilian Hils`_, and `Glyph`_. For a
full historical list of contributors, see :doc:`contributors`.
We're looking forward to the next few months of Python HTTP/2 work, and hoping
that you'll find lots of excellent HTTP/2 applications to build with Hyper-h2!
.. _Thomas Kriechbaumer: https://github.com/Kriechi
.. _Alex Chan: https://github.com/alexwlchan
.. _Maximilian Hils: https://github.com/mhils
.. _Glyph: https://github.com/glyph
.. _detailed-release-notes:
.. include:: ../../HISTORY.rst

Просмотреть файл

@ -1,56 +0,0 @@
Release Process
===============
Because of Hyper-h2's place at the bottom of the dependency tree, it is
extremely important that the project maintains a diligent release schedule.
This document outlines our process for managing releases.
Versioning
----------
Hyper-h2 follows `semantic versioning`_ of its public API when it comes to
numbering releases. The public API of Hyper-h2 is strictly limited to the
entities listed in the :doc:`api` documentation: anything not mentioned in that
document is not considered part of the public API and is not covered by the
versioning guarantees given by semantic versioning.
Maintenance
-----------
Hyper-h2 has the notion of a "release series", given by a major and minor
version number: for example, there is the 2.1 release series. When each minor
release is made and a release series is born, a branch is made off the release
tag: for example, for the 2.1 release series, the 2.1.X branch.
All changes merged into the master branch will be evaluated for whether they
can be considered 'bugfixes' only (that is, they do not affect the public API).
If they can, they will also be cherry-picked back to all active maintenance
branches that require the bugfix. If the bugfix is not necessary, because the
branch in question is unaffected by that bug, the bugfix will not be
backported.
Supported Release Series'
-------------------------
The developers of Hyper-h2 commit to supporting the following release series:
- The most recent, as identified by the first two numbers in the highest
version currently released.
- The immediately prior release series.
The only exception to this policy is that no release series earlier than the
2.1 series will be supported. In this context, "supported" means that they will
continue to receive bugfix releases.
For releases other than the ones identified above, no support is guaranteed.
The developers may *choose* to support such a release series, but they do not
promise to.
The exception here is for security vulnerabilities. If a security vulnerability
is identified in an out-of-support release series, the developers will do their
best to patch it and issue an emergency release. For more information, see
`our security documentation`_.
.. _semantic versioning: http://semver.org/
.. _our security documentation: http://python-hyper.org/en/latest/security.html

Просмотреть файл

@ -1,9 +0,0 @@
Testimonials
============
Glyph Lefkowitz
~~~~~~~~~~~~~~~
Frankly, Hyper-h2 is almost SURREAL in how well-factored and decoupled the implementation is from I/O. If libraries in the Python ecosystem looked like this generally, Twisted would be a much better platform than it is. (Frankly, most of Twisted's _own_ protocol implementations should aspire to such cleanliness.)
(`Source <https://twistedmatrix.com/pipermail/twisted-python/2015-November/029894.html>`_)

Просмотреть файл

@ -1,16 +0,0 @@
Tornado Example Server
======================
This example is a basic HTTP/2 server written using the `Tornado`_ asynchronous
networking library.
The server returns the request headers as a JSON document to the caller, just
like the example from the :doc:`basic-usage` document.
.. literalinclude:: ../../examples/tornado/tornado-server.py
:language: python
:linenos:
:encoding: utf-8
.. _Tornado: http://www.tornadoweb.org/

Просмотреть файл

@ -1,18 +0,0 @@
Twisted Example Server
======================
This example is a basic HTTP/2 server written for the `Twisted`_ asynchronous
networking framework. This is a relatively fleshed out example, and in
particular it makes sure to obey HTTP/2 flow control rules.
This server differs from some of the other example servers by serving files,
rather than simply sending JSON responses. This makes the example lengthier,
but also brings it closer to a real-world use-case.
.. literalinclude:: ../../examples/twisted/twisted-server.py
:language: python
:linenos:
:encoding: utf-8
.. _Twisted: https://twistedmatrix.com/

Просмотреть файл

@ -1,17 +0,0 @@
Twisted Example Client: Head Requests
=====================================
This example is a basic HTTP/2 client written for the `Twisted`_ asynchronous
networking framework.
This client is fairly simple: it makes a hard-coded HEAD request to
http2bin.org and prints out the response data. Its purpose is to demonstrate
how to write a very basic HTTP/2 client implementation.
.. literalinclude:: ../../examples/twisted/head_request.py
:language: python
:linenos:
:encoding: utf-8
.. _Twisted: https://twistedmatrix.com/

Просмотреть файл

@ -1,18 +0,0 @@
Twisted Example Client: Post Requests
=====================================
This example is a basic HTTP/2 client written for the `Twisted`_ asynchronous
networking framework.
This client is fairly simple: it makes a hard-coded POST request to
http2bin.org and prints out the response data, sending a file that is provided
on the command line or the script itself. Its purpose is to demonstrate how to
write a HTTP/2 client implementation that handles flow control.
.. literalinclude:: ../../examples/twisted/post_request.py
:language: python
:linenos:
:encoding: utf-8
.. _Twisted: https://twistedmatrix.com/

Просмотреть файл

@ -1,23 +0,0 @@
Example HTTP/2-only WSGI Server
===============================
This example is a more complex HTTP/2 server that acts as a WSGI server,
passing data to an arbitrary WSGI application. This example is written using
`asyncio`_. The server supports most of PEP-3333, and so could in principle be
used as a production WSGI server: however, that's *not recommended* as certain
shortcuts have been taken to ensure ease of implementation and understanding.
The main advantages of this example are:
1. It properly demonstrates HTTP/2 flow control management.
2. It demonstrates how to plug hyper-h2 into a larger, more complex
application.
.. literalinclude:: ../../examples/asyncio/wsgi-server.py
:language: python
:linenos:
:encoding: utf-8
.. _asyncio: https://docs.python.org/3/library/asyncio.html

Просмотреть файл

@ -1,155 +0,0 @@
# -*- coding: utf-8 -*-
"""
asyncio-server.py
~~~~~~~~~~~~~~~~~
A fully-functional HTTP/2 server using asyncio. Requires Python 3.5+.
This example demonstrates handling requests with bodies, as well as handling
those without. In particular, it demonstrates the fact that DataReceived may
be called multiple times, and that applications must handle that possibility.
Please note that this example does not handle flow control, and so only works
properly for relatively small requests. Please see other examples to understand
how flow control should work.
"""
import asyncio
import io
import json
import ssl
import collections
from typing import List, Tuple
from h2.config import H2Configuration
from h2.connection import H2Connection
from h2.events import (
ConnectionTerminated, DataReceived, RequestReceived, StreamEnded
)
from h2.errors import ErrorCodes
from h2.exceptions import ProtocolError
RequestData = collections.namedtuple('RequestData', ['headers', 'data'])
class H2Protocol(asyncio.Protocol):
def __init__(self):
config = H2Configuration(client_side=False, header_encoding='utf-8')
self.conn = H2Connection(config=config)
self.transport = None
self.stream_data = {}
def connection_made(self, transport: asyncio.Transport):
self.transport = transport
self.conn.initiate_connection()
self.transport.write(self.conn.data_to_send())
def data_received(self, data: bytes):
try:
events = self.conn.receive_data(data)
except ProtocolError as e:
self.transport.write(self.conn.data_to_send())
self.transport.close()
else:
self.transport.write(self.conn.data_to_send())
for event in events:
if isinstance(event, RequestReceived):
self.request_received(event.headers, event.stream_id)
elif isinstance(event, DataReceived):
self.receive_data(event.data, event.stream_id)
elif isinstance(event, StreamEnded):
self.stream_complete(event.stream_id)
elif isinstance(event, ConnectionTerminated):
self.transport.close()
self.transport.write(self.conn.data_to_send())
def request_received(self, headers: List[Tuple[str, str]], stream_id: int):
headers = collections.OrderedDict(headers)
method = headers[':method']
# We only support GET and POST.
if method not in ('GET', 'POST'):
self.return_405(headers, stream_id)
return
# Store off the request data.
request_data = RequestData(headers, io.BytesIO())
self.stream_data[stream_id] = request_data
def stream_complete(self, stream_id: int):
"""
When a stream is complete, we can send our response.
"""
try:
request_data = self.stream_data[stream_id]
except KeyError:
# Just return, we probably 405'd this already
return
headers = request_data.headers
body = request_data.data.getvalue().decode('utf-8')
data = json.dumps(
{"headers": headers, "body": body}, indent=4
).encode("utf8")
response_headers = (
(':status', '200'),
('content-type', 'application/json'),
('content-length', str(len(data))),
('server', 'asyncio-h2'),
)
self.conn.send_headers(stream_id, response_headers)
self.conn.send_data(stream_id, data, end_stream=True)
def return_405(self, headers: List[Tuple[str, str]], stream_id: int):
"""
We don't support the given method, so we want to return a 405 response.
"""
response_headers = (
(':status', '405'),
('content-length', '0'),
('server', 'asyncio-h2'),
)
self.conn.send_headers(stream_id, response_headers, end_stream=True)
def receive_data(self, data: bytes, stream_id: int):
"""
We've received some data on a stream. If that stream is one we're
expecting data on, save it off. Otherwise, reset the stream.
"""
try:
stream_data = self.stream_data[stream_id]
except KeyError:
self.conn.reset_stream(
stream_id, error_code=ErrorCodes.PROTOCOL_ERROR
)
else:
stream_data.data.write(data)
ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_context.options |= (
ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_COMPRESSION
)
ssl_context.set_ciphers("ECDHE+AESGCM")
ssl_context.load_cert_chain(certfile="cert.crt", keyfile="cert.key")
ssl_context.set_alpn_protocols(["h2"])
loop = asyncio.get_event_loop()
# Each client connection will create a new protocol instance
coro = loop.create_server(H2Protocol, '127.0.0.1', 8443, ssl=ssl_context)
server = loop.run_until_complete(coro)
# Serve requests until Ctrl+C is pressed
print('Serving on {}'.format(server.sockets[0].getsockname()))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()

Просмотреть файл

@ -1,21 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDhTCCAm2gAwIBAgIJAOrxh0dOYJLdMA0GCSqGSIb3DQEBCwUAMFkxCzAJBgNV
BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX
aWRnaXRzIFB0eSBMdGQxEjAQBgNVBAMMCWxvY2FsaG9zdDAeFw0xNTA5MTkxNDE2
NDRaFw0xNTEwMTkxNDE2NDRaMFkxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21l
LVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxEjAQBgNV
BAMMCWxvY2FsaG9zdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMqt
A1iu8EN00FU0eBcBGlLVmNEgV7Jkbukra+kwS8j/U2y50QPGJc/FiIVDfuBqk5dL
ACTNc6A/FQcXvWmOc5ixmC3QKKasMpuofqKz0V9C6irZdYXZ9rcsW0gHQIr989yd
R+N1VbIlEVW/T9FJL3B2UD9GVIkUELzm47CSOWZvAxQUlsx8CUNuUCWqyZJoqTFN
j0LeJDOWGCsug1Pkj0Q1x+jMVL6l6Zf6vMkLNOMsOsWsxUk+0L3tl/OzcTgUOCsw
UzY59RIi6Rudrp0oaU8NuHr91yiSqPbKFlX10M9KwEEdnIpcxhND3dacrDycj3ux
eWlqKync2vOFUkhwiaMCAwEAAaNQME4wHQYDVR0OBBYEFA0PN+PGoofZ+QIys2Jy
1Zz94vBOMB8GA1UdIwQYMBaAFA0PN+PGoofZ+QIys2Jy1Zz94vBOMAwGA1UdEwQF
MAMBAf8wDQYJKoZIhvcNAQELBQADggEBAEplethBoPpcP3EbR5Rz6snDDIcbtAJu
Ngd0YZppGT+P0DYnPJva4vRG3bb84ZMSuppz5j67qD6DdWte8UXhK8BzWiHzwmQE
QmbKyzzTMKQgTNFntpx5cgsSvTtrHpNYoMHzHOmyAOboNeM0DWiRXsYLkWTitLTN
qbOpstwPubExbT9lPjLclntShT/lCupt+zsbnrR9YiqlYFY/fDzfAybZhrD5GMBY
XdMPItwAc/sWvH31yztarjkLmld76AGCcO5r8cSR/cX98SicyfjOBbSco8GkjYNY
582gTPkKGYpStuN7GNT5tZmxvMq935HRa2XZvlAIe8ufp8EHVoYiF3c=
-----END CERTIFICATE-----

Просмотреть файл

@ -1,27 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAyq0DWK7wQ3TQVTR4FwEaUtWY0SBXsmRu6Str6TBLyP9TbLnR
A8Ylz8WIhUN+4GqTl0sAJM1zoD8VBxe9aY5zmLGYLdAopqwym6h+orPRX0LqKtl1
hdn2tyxbSAdAiv3z3J1H43VVsiURVb9P0UkvcHZQP0ZUiRQQvObjsJI5Zm8DFBSW
zHwJQ25QJarJkmipMU2PQt4kM5YYKy6DU+SPRDXH6MxUvqXpl/q8yQs04yw6xazF
ST7Qve2X87NxOBQ4KzBTNjn1EiLpG52unShpTw24ev3XKJKo9soWVfXQz0rAQR2c
ilzGE0Pd1pysPJyPe7F5aWorKdza84VSSHCJowIDAQABAoIBACp+nh4BB/VMz8Wd
q7Q/EfLeQB1Q57JKpoqTBRwueSVai3ZXe4CMEi9/HkG6xiZtkiZ9njkZLq4hq9oB
2z//kzMnwV2RsIRJxI6ohGy+wR51HD4BvEdlTPpY/Yabpqe92VyfSYxidKZWaU0O
QMED1EODOw4ZQ+4928iPrJu//PMB4e7TFao0b9Fk/XLWtu5/tQZz9jsrlTi1zthh
7n+oaGNhfTeIJJL4jrhTrKW1CLHXATtr9SJlfZ3wbMxQVeyj2wUlP1V0M6kBuhNj
tbGbMpixD5iCNJ49Cm2PHg+wBOfS3ADGIpi3PcGw5mb8nB3N9eGBRPhLShAlq5Hi
Lv4tyykCgYEA8u3b3xJ04pxWYN25ou/Sc8xzgDCK4XvDNdHVTuZDjLVA+VTVPzql
lw7VvJArsx47MSPvsaX/+4hQXYtfnR7yJpx6QagvQ+z4ludnIZYrQwdUmb9pFL1s
8UNj+3j9QFRPenIiIQ8qxxNIQ9w2HsVQ8scvc9CjYop/YYAPaQyHaL8CgYEA1ZSz
CR4NcpfgRSILdhb1dLcyw5Qus1VOSAx3DYkhDkMiB8XZwgMdJjwehJo9yaqRCLE8
Sw5znMnkfoZpu7+skrjK0FqmMpXMH9gIszHvFG8wSw/6+2HIWS19/wOu8dh95LuC
0zurMk8rFqxgWMWF20afhgYrUz42cvUTo10FVB0CgYEAt7mW6W3PArfUSCxIwmb4
VmXREKkl0ATHDYQl/Cb//YHzot467TgQll883QB4XF5HzBFurX9rSzO7/BN1e6I0
52i+ubtWC9xD4fUetXMaQvZfUGxIL8xXgVxDWKQXfLiG54c8Mp6C7s6xf8kjEUCP
yR1F0SSA/Pzb+8RbY0p7eocCgYA+1rs+SXtHZev0KyoYGnUpW+Uxqd17ofOgOxqj
/t6c5Z+TjeCdtnDTGQkZlo/rT6XQWuUUaDIXxUbW+xEMzj4mBPyXBLS1WWFvVQ5q
OpzO9E/PJeqAH6rkof/aEelc+oc/zvOU1o9uA+D3kMvgEm1psIOq2RHSMhGvDPA0
NmAk+QKBgQCwd1681GagdIYSZUCBecnLtevXmIsJyDW2yR1NNcIe/ukcVQREMDvy
5DDkhnGDgnV1D5gYcXb34g9vYvbfTnBMl/JXmMAAG1kIS+3pvHyN6f1poVe3yJV1
yHVuvymnJxKnyaV0L3ntepVvV0vVNIkA3oauoUTLto6txBI+b/ImDA==
-----END RSA PRIVATE KEY-----

Просмотреть файл

@ -1,760 +0,0 @@
# -*- coding: utf-8 -*-
"""
asyncio-server.py
~~~~~~~~~~~~~~~~~
A fully-functional WSGI server, written using hyper-h2. Requires asyncio.
To test it, try installing httpin from pip (``pip install httpbin``) and then
running the server (``python asyncio-server.py httpbin:app``).
This server does not support HTTP/1.1: it is a HTTP/2-only WSGI server. The
purpose of this code is to demonstrate how to integrate hyper-h2 into a more
complex application, and to demonstrate several principles of concurrent
programming.
The architecture looks like this:
+---------------------------------+
| 1x HTTP/2 Server Thread |
| (running asyncio) |
+---------------------------------+
+---------------------------------+
| N WSGI Application Threads |
| (no asyncio) |
+---------------------------------+
Essentially, we spin up an asyncio-based event loop in the main thread. This
launches one HTTP/2 Protocol instance for each inbound connection, all of which
will read and write data from within the main thread in an asynchronous manner.
When each HTTP request comes in, the server will build the WSGI environment
dictionary and create a ``Stream`` object. This object will hold the relevant
state for the request/response pair and will act as the WSGI side of the logic.
That object will then be passed to a background thread pool, and when a worker
is available the WSGI logic will begin to be executed. This model ensures that
the asyncio web server itself is never blocked by the WSGI application.
The WSGI application and the HTTP/2 server communicate via an asyncio queue,
together with locks and threading events. The locks themselves are implicit in
asyncio's "call_soon_threadsafe", which allows for a background thread to
register an action with the main asyncio thread. When the asyncio thread
eventually takes the action in question it sets as threading event, signaling
to the background thread that it is free to continue its work.
To make the WSGI application work with flow control, there is a very important
invariant that must be observed. Any WSGI action that would cause data to be
emitted to the network MUST be accompanied by a threading Event that is not
set until that data has been written to the transport. This ensures that the
WSGI application *blocks* until the data is actually sent. The reason we
require this invariant is that the HTTP/2 server may choose to re-order some
data chunks for flow control reasons: that is, the application for stream X may
have actually written its data first, but the server may elect to send the data
for stream Y first. This means that it's vital that there not be *two* writes
for stream X active at any one point or they may get reordered, which would be
particularly terrible.
Thus, the server must cooperate to ensure that each threading event only fires
when the *complete* data for that event has been written to the asyncio
transport. Any earlier will cause untold craziness.
"""
import asyncio
import importlib
import queue
import ssl
import sys
import threading
from h2.config import H2Configuration
from h2.connection import H2Connection
from h2.events import (
DataReceived, RequestReceived, WindowUpdated, StreamEnded, StreamReset
)
# Used to signal that a request has completed.
#
# This is a convenient way to do "in-band" signaling of stream completion
# without doing anything so heavyweight as using a class. Essentially, we can
# test identity against this empty object. In fact, this is so convenient that
# we use this object for all streams, for data in both directions: in and out.
END_DATA_SENTINEL = object()
# The WSGI callable. Stored here so that the protocol instances can get hold
# of the data.
APPLICATION = None
class H2Protocol(asyncio.Protocol):
def __init__(self):
config = H2Configuration(client_side=False, header_encoding='utf-8')
# Our server-side state machine.
self.conn = H2Connection(config=config)
# The backing transport.
self.transport = None
# A dictionary of ``Stream`` objects, keyed by their stream ID. This
# makes it easy to route data to the correct WSGI application instance.
self.streams = {}
# A queue of data emitted by WSGI applications that has not yet been
# sent. Each stream may only have one chunk of data in either this
# queue or the flow_controlled_data dictionary at any one time.
self._stream_data = asyncio.Queue()
# Data that has been pulled off the queue that is for a stream blocked
# behind flow control limitations. This is used to avoid spinning on
# _stream_data queue when a stream cannot have its data sent. Data that
# cannot be sent on the connection when it is popped off the queue gets
# placed here until the stream flow control window opens up again.
self._flow_controlled_data = {}
# A reference to the loop in which this protocol runs. This is needed
# to synchronise up with background threads.
self._loop = asyncio.get_event_loop()
# Any streams that have been remotely reset. We keep track of these to
# ensure that we don't emit data from a WSGI application whose stream
# has been cancelled.
self._reset_streams = set()
# Keep track of the loop sending task so we can kill it when the
# connection goes away.
self._send_loop_task = None
def connection_made(self, transport):
"""
The connection has been made. Here we need to save off our transport,
do basic HTTP/2 connection setup, and then start our data writing
coroutine.
"""
self.transport = transport
self.conn.initiate_connection()
self.transport.write(self.conn.data_to_send())
self._send_loop_task = self._loop.create_task(self.sending_loop())
def connection_lost(self, exc):
"""
With the end of the connection, we just want to cancel our data sending
coroutine.
"""
self._send_loop_task.cancel()
def data_received(self, data):
"""
Process inbound data.
"""
events = self.conn.receive_data(data)
for event in events:
if isinstance(event, RequestReceived):
self.request_received(event)
elif isinstance(event, DataReceived):
self.data_frame_received(event)
elif isinstance(event, WindowUpdated):
self.window_opened(event)
elif isinstance(event, StreamEnded):
self.end_stream(event)
elif isinstance(event, StreamReset):
self.reset_stream(event)
outbound_data = self.conn.data_to_send()
if outbound_data:
self.transport.write(outbound_data)
def window_opened(self, event):
"""
The flow control window got opened.
This is important because it's possible that we were unable to send
some WSGI data because the flow control window was too small. If that
happens, the sending_loop coroutine starts buffering data.
As the window gets opened, we need to unbuffer the data. We do that by
placing the data chunks back on the back of the send queue and letting
the sending loop take another shot at sending them.
This system only works because we require that each stream only have
*one* data chunk in the sending queue at any time. The threading events
force this invariant to remain true.
"""
if event.stream_id:
# This is specific to a single stream.
if event.stream_id in self._flow_controlled_data:
self._stream_data.put_nowait(
self._flow_controlled_data.pop(event.stream_id)
)
else:
# This event is specific to the connection. Free up *all* the
# streams. This is a bit tricky, but we *must not* yield the flow
# of control here or it all goes wrong.
for data in self._flow_controlled_data.values():
self._stream_data.put_nowait(data)
self._flow_controlled_data = {}
@asyncio.coroutine
def sending_loop(self):
"""
A call that loops forever, attempting to send data. This sending loop
contains most of the flow-control smarts of this class: it pulls data
off of the asyncio queue and then attempts to send it.
The difficulties here are all around flow control. Specifically, a
chunk of data may be too large to send. In this case, what will happen
is that this coroutine will attempt to send what it can and will then
store the unsent data locally. When a flow control event comes in that
data will be freed up and placed back onto the asyncio queue, causing
it to pop back up into the sending logic of this coroutine.
This method explicitly *does not* handle HTTP/2 priority. That adds an
extra layer of complexity to what is already a fairly complex method,
and we'll look at how to do it another time.
This coroutine explicitly *does not end*.
"""
while True:
stream_id, data, event = yield from self._stream_data.get()
# If this stream got reset, just drop the data on the floor. Note
# that we need to reset the event here to make sure that
# application doesn't lock up.
if stream_id in self._reset_streams:
event.set()
# Check if the body is done. If it is, this is really easy! Again,
# we *must* set the event here or the application will lock up.
if data is END_DATA_SENTINEL:
self.conn.end_stream(stream_id)
self.transport.write(self.conn.data_to_send())
event.set()
continue
# We need to send data, but not to exceed the flow control window.
# For that reason, grab only the data that fits: we'll buffer the
# rest.
window_size = self.conn.local_flow_control_window(stream_id)
chunk_size = min(window_size, len(data))
data_to_send = data[:chunk_size]
data_to_buffer = data[chunk_size:]
if data_to_send:
# There's a maximum frame size we have to respect. Because we
# aren't paying any attention to priority here, we can quite
# safely just split this string up into chunks of max frame
# size and blast them out.
#
# In a *real* application you'd want to consider priority here.
max_size = self.conn.max_outbound_frame_size
chunks = (
data_to_send[x:x+max_size]
for x in range(0, len(data_to_send), max_size)
)
for chunk in chunks:
self.conn.send_data(stream_id, chunk)
self.transport.write(self.conn.data_to_send())
# If there's data left to buffer, we should do that. Put it in a
# dictionary and *don't set the event*: the app must not generate
# any more data until we got rid of all of this data.
if data_to_buffer:
self._flow_controlled_data[stream_id] = (
stream_id, data_to_buffer, event
)
else:
# We sent everything. We can let the WSGI app progress.
event.set()
def request_received(self, event):
"""
A HTTP/2 request has been received. We need to invoke the WSGI
application in a background thread to handle it.
"""
# First, we are going to want an object to hold all the relevant state
# for this request/response. For that, we have a stream object. We
# need to store the stream object somewhere reachable for when data
# arrives later.
s = Stream(event.stream_id, self)
self.streams[event.stream_id] = s
# Next, we need to build the WSGI environ dictionary.
environ = _build_environ_dict(event.headers, s)
# Finally, we want to throw these arguments out to a threadpool and
# let it run.
self._loop.run_in_executor(
None,
s.run_in_threadpool,
APPLICATION,
environ,
)
def data_frame_received(self, event):
"""
Data has been received by WSGI server and needs to be dispatched to a
running application.
Note that the flow control window is not modified here. That's
deliberate: see Stream.__next__ for a longer discussion of why.
"""
# Grab the stream in question from our dictionary and pass it on.
stream = self.streams[event.stream_id]
stream.receive_data(event.data, event.flow_controlled_length)
def end_stream(self, event):
"""
The stream data is complete.
"""
stream = self.streams[event.stream_id]
stream.request_complete()
def reset_stream(self, event):
"""
A stream got forcefully reset.
This is a tricky thing to deal with because WSGI doesn't really have a
good notion for it. Essentially, you have to let the application run
until completion, but not actually let it send any data.
We do that by discarding any data we currently have for it, and then
marking the stream as reset to allow us to spot when that stream is
trying to send data and drop that data on the floor.
We then *also* signal the WSGI application that no more data is
incoming, to ensure that it does not attempt to do further reads of the
data.
"""
if event.stream_id in self._flow_controlled_data:
del self._flow_controlled_data
self._reset_streams.add(event.stream_id)
self.end_stream(event)
def data_for_stream(self, stream_id, data):
"""
Thread-safe method called from outside the main asyncio thread in order
to send data on behalf of a WSGI application.
Places data being written by a stream on an asyncio queue. Returns a
threading event that will fire when that data is sent.
"""
event = threading.Event()
self._loop.call_soon_threadsafe(
self._stream_data.put_nowait,
(stream_id, data, event)
)
return event
def send_response(self, stream_id, headers):
"""
Thread-safe method called from outside the main asyncio thread in order
to send the HTTP response headers on behalf of a WSGI application.
Returns a threading event that will fire when the headers have been
emitted to the network.
"""
event = threading.Event()
def _inner_send(stream_id, headers, event):
self.conn.send_headers(stream_id, headers, end_stream=False)
self.transport.write(self.conn.data_to_send())
event.set()
self._loop.call_soon_threadsafe(
_inner_send,
stream_id,
headers,
event
)
return event
def open_flow_control_window(self, stream_id, increment):
"""
Opens a flow control window for the given stream by the given amount.
Called from a WSGI thread. Does not return an event because there's no
need to block on this action, it may take place at any time.
"""
def _inner_open(stream_id, increment):
self.conn.increment_flow_control_window(increment, stream_id)
self.conn.increment_flow_control_window(increment, None)
self.transport.write(self.conn.data_to_send())
self._loop.call_soon_threadsafe(
_inner_open,
stream_id,
increment,
)
class Stream:
"""
This class holds all of the state for a single stream. It also provides
several of the callables used by the WSGI application. Finally, it provides
the logic for actually interfacing with the WSGI application.
For these reasons, the object has *strict* requirements on thread-safety.
While the object can be initialized in the main WSGI thread, the
``run_in_threadpool`` method *must* be called from outside that thread. At
that point, the main WSGI thread may only call specific methods.
"""
def __init__(self, stream_id, protocol):
self.stream_id = stream_id
self._protocol = protocol
# Queue for data that has been received from the network. This is a
# thread-safe queue, to allow both the WSGI application to block on
# receiving more data and to allow the asyncio server to keep sending
# more data.
#
# This queue is unbounded in size, but in practice it cannot contain
# too much data because the flow control window doesn't get adjusted
# unless data is removed from it.
self._received_data = queue.Queue()
# This buffer is used to hold partial chunks of data from
# _received_data that were not returned out of ``read`` and friends.
self._temp_buffer = b''
# Temporary variables that allow us to keep hold of the headers and
# response status until such time as the application needs us to send
# them.
self._response_status = b''
self._response_headers = []
self._headers_emitted = False
# Whether the application has received all the data from the network
# or not. This allows us to short-circuit some reads.
self._complete = False
def receive_data(self, data, flow_controlled_size):
"""
Called by the H2Protocol when more data has been received from the
network.
Places the data directly on the queue in a thread-safe manner without
blocking. Does not introspect or process the data.
"""
self._received_data.put_nowait((data, flow_controlled_size))
def request_complete(self):
"""
Called by the H2Protocol when all the request data has been received.
This works by placing the ``END_DATA_SENTINEL`` on the queue. The
reading code knows, when it sees the ``END_DATA_SENTINEL``, to expect
no more data from the network. This ensures that the state of the
application only changes when it has finished processing the data from
the network, even though the server may have long-since finished
receiving all the data for this request.
"""
self._received_data.put_nowait((END_DATA_SENTINEL, None))
def run_in_threadpool(self, wsgi_application, environ):
"""
This method should be invoked in a threadpool. At the point this method
is invoked, the only safe methods to call from the original thread are
``receive_data`` and ``request_complete``: any other method is unsafe.
This method handles the WSGI logic. It invokes the application callable
in this thread, passing control over to the WSGI application. It then
ensures that the data makes it back to the HTTP/2 connection via
the thread-safe APIs provided below.
"""
result = wsgi_application(environ, self.start_response)
try:
for data in result:
self.write(data)
finally:
# This signals that we're done with data. The server will know that
# this allows it to clean up its state: we're done here.
self.write(END_DATA_SENTINEL)
# The next few methods are called by the WSGI application. Firstly, the
# three methods provided by the input stream.
def read(self, size=None):
"""
Called by the WSGI application to read data.
This method is the one of two that explicitly pumps the input data
queue, which means it deals with the ``_complete`` flag and the
``END_DATA_SENTINEL``.
"""
# If we've already seen the END_DATA_SENTINEL, return immediately.
if self._complete:
return b''
# If we've been asked to read everything, just iterate over ourselves.
if size is None:
return b''.join(self)
# Otherwise, as long as we don't have enough data, spin looking for
# another data chunk.
data = b''
while len(data) < size:
try:
chunk = next(self)
except StopIteration:
break
# Concatenating strings this way is slow, but that's ok, this is
# just a demo.
data += chunk
# We have *at least* enough data to return, but we may have too much.
# If we do, throw it on a buffer: we'll use it later.
to_return = data[:size]
self._temp_buffer = data[size:]
return to_return
def readline(self, hint=None):
"""
Called by the WSGI application to read a single line of data.
This method rigorously observes the ``hint`` parameter: it will only
ever read that much data. It then splits the data on a newline
character and throws everything it doesn't need into a buffer.
"""
data = self.read(hint)
first_newline = data.find(b'\n')
if first_newline == -1:
# No newline, return all the data
return data
# We want to slice the data so that the head *includes* the first
# newline. Then, any data left in this line we don't care about should
# be prepended to the internal buffer.
head, tail = data[:first_newline + 1], data[first_newline + 1:]
self._temp_buffer = tail + self._temp_buffer
return head
def readlines(self, hint=None):
"""
Called by the WSGI application to read several lines of data.
This method is really pretty stupid. It rigorously observes the
``hint`` parameter, and quite happily returns the input split into
lines.
"""
# This method is *crazy inefficient*, but it's also a pretty stupid
# method to call.
data = self.read(hint)
lines = data.split(b'\n')
# Split removes the newline character, but we want it, so put it back.
lines = [line + b'\n' for line in lines]
# Except if the last character was a newline character we now have an
# extra line that is just a newline: pull that out.
if lines[-1] == b'\n':
lines = lines[:-1]
return lines
def start_response(self, status, response_headers, exc_info=None):
"""
This is the PEP-3333 mandated start_response callable.
All it does is store the headers for later sending, and return our
```write`` callable.
"""
if self._headers_emitted and exc_info is not None:
raise exc_info[1].with_traceback(exc_info[2])
assert not self._response_status or exc_info is not None
self._response_status = status
self._response_headers = response_headers
return self.write
def write(self, data):
"""
Provides some data to write.
This function *blocks* until such time as the data is allowed by
HTTP/2 flow control. This allows a client to slow or pause the response
as needed.
This function is not supposed to be used, according to PEP-3333, but
once we have it it becomes quite convenient to use it, so this app
actually runs all writes through this function.
"""
if not self._headers_emitted:
self._emit_headers()
event = self._protocol.data_for_stream(self.stream_id, data)
event.wait()
return
def _emit_headers(self):
"""
Sends the response headers.
This is only called from the write callable and should only ever be
called once. It does some minor processing (converts the status line
into a status code because reason phrases are evil) and then passes
the headers on to the server. This call explicitly blocks until the
server notifies us that the headers have reached the network.
"""
assert self._response_status and self._response_headers
assert not self._headers_emitted
self._headers_emitted = True
# We only need the status code
status = self._response_status.split(" ", 1)[0]
headers = [(":status", status)]
headers.extend(self._response_headers)
event = self._protocol.send_response(self.stream_id, headers)
event.wait()
return
# These two methods implement the iterator protocol. This allows a WSGI
# application to iterate over this Stream object to get the data.
def __iter__(self):
return self
def __next__(self):
# If the complete request has been read, abort immediately.
if self._complete:
raise StopIteration()
# If we have data stored in a temporary buffer for any reason, return
# that and clear the buffer.
#
# This can actually only happen when the application uses one of the
# read* callables, but that's fine.
if self._temp_buffer:
buffered_data = self._temp_buffer
self._temp_buffer = b''
return buffered_data
# Otherwise, pull data off the queue (blocking as needed). If this is
# the end of the request, we're done here: mark ourselves as complete
# and call it time. Otherwise, open the flow control window an
# appropriate amount and hand the chunk off.
chunk, chunk_size = self._received_data.get()
if chunk is END_DATA_SENTINEL:
self._complete = True
raise StopIteration()
# Let's talk a little bit about why we're opening the flow control
# window *here*, and not in the server thread.
#
# The purpose of HTTP/2 flow control is to allow for servers and
# clients to avoid needing to buffer data indefinitely because their
# peer is producing data faster than they can consume it. As a result,
# it's important that the flow control window be opened as late in the
# processing as possible. In this case, we open the flow control window
# exactly when the server hands the data to the application. This means
# that the flow control window essentially signals to the remote peer
# how much data hasn't even been *seen* by the application yet.
#
# If you wanted to be really clever you could consider not opening the
# flow control window until the application asks for the *next* chunk
# of data. That means that any buffers at the application level are now
# included in the flow control window processing. In my opinion, the
# advantage of that process does not outweigh the extra logical
# complexity involved in doing it, so we don't bother here.
#
# Another note: you'll notice that we don't include the _temp_buffer in
# our flow control considerations. This means you could in principle
# lead us to buffer slightly more than one connection flow control
# window's worth of data. That risk is considered acceptable for the
# much simpler logic available here.
#
# Finally, this is a pretty dumb flow control window management scheme:
# it causes us to emit a *lot* of window updates. A smarter server
# would want to use the content-length header to determine whether
# flow control window updates need to be emitted at all, and then to be
# more efficient about emitting them to avoid firing them off really
# frequently. For an example like this, there's very little gained by
# worrying about that.
self._protocol.open_flow_control_window(self.stream_id, chunk_size)
return chunk
def _build_environ_dict(headers, stream):
"""
Build the WSGI environ dictionary for a given request. To do that, we'll
temporarily create a dictionary for the headers. While this isn't actually
a valid way to represent headers, we know that the special headers we need
can only have one appearance in the block.
This code is arguably somewhat incautious: the conversion to dictionary
should only happen in a way that allows us to correctly join headers that
appear multiple times. That's acceptable in a demo app: in a productised
version you'd want to fix it.
"""
header_dict = dict(headers)
path = header_dict.pop(u':path')
try:
path, query = path.split(u'?', 1)
except ValueError:
query = u""
server_name = header_dict.pop(u':authority')
try:
server_name, port = server_name.split(u':', 1)
except ValueError as e:
port = "8443"
environ = {
u'REQUEST_METHOD': header_dict.pop(u':method'),
u'SCRIPT_NAME': u'',
u'PATH_INFO': path,
u'QUERY_STRING': query,
u'SERVER_NAME': server_name,
u'SERVER_PORT': port,
u'SERVER_PROTOCOL': u'HTTP/2',
u'HTTPS': u"on",
u'SSL_PROTOCOL': u'TLSv1.2',
u'wsgi.version': (1, 0),
u'wsgi.url_scheme': header_dict.pop(u':scheme'),
u'wsgi.input': stream,
u'wsgi.errors': sys.stderr,
u'wsgi.multithread': True,
u'wsgi.multiprocess': False,
u'wsgi.run_once': False,
}
if u'content-type' in header_dict:
environ[u'CONTENT_TYPE'] = header_dict[u'content-type']
if u'content-length' in header_dict:
environ[u'CONTENT_LENGTH'] = header_dict[u'content-length']
for name, value in header_dict.items():
environ[u'HTTP_' + name.upper()] = value
return environ
# Set up the WSGI app.
application_string = sys.argv[1]
path, func = application_string.split(':', 1)
module = importlib.import_module(path)
APPLICATION = getattr(module, func)
# Set up TLS
ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_context.options |= (
ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_COMPRESSION
)
ssl_context.set_ciphers("ECDHE+AESGCM")
ssl_context.load_cert_chain(certfile="cert.crt", keyfile="cert.key")
ssl_context.set_alpn_protocols(["h2"])
# Do the asnycio bits
loop = asyncio.get_event_loop()
# Each client connection will create a new protocol instance
coro = loop.create_server(H2Protocol, '127.0.0.1', 8443, ssl=ssl_context)
server = loop.run_until_complete(coro)
# Serve requests until Ctrl+C is pressed
print('Serving on {}'.format(server.sockets[0].getsockname()))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()

Просмотреть файл

@ -1,208 +0,0 @@
#!/usr/bin/env python3.5
# -*- coding: utf-8 -*-
"""
curio-server.py
~~~~~~~~~~~~~~~
A fully-functional HTTP/2 server written for curio.
Requires Python 3.5+.
"""
import mimetypes
import os
import sys
from curio import Kernel, Event, spawn, socket, ssl
import h2.config
import h2.connection
import h2.events
# The maximum amount of a file we'll send in a single DATA frame.
READ_CHUNK_SIZE = 8192
def create_listening_ssl_socket(address, certfile, keyfile):
"""
Create and return a listening TLS socket on a given address.
"""
ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_context.options |= (
ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_COMPRESSION
)
ssl_context.set_ciphers("ECDHE+AESGCM")
ssl_context.load_cert_chain(certfile=certfile, keyfile=keyfile)
ssl_context.set_alpn_protocols(["h2"])
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock = ssl_context.wrap_socket(sock)
sock.bind(address)
sock.listen()
return sock
async def h2_server(address, root, certfile, keyfile):
"""
Create an HTTP/2 server at the given address.
"""
sock = create_listening_ssl_socket(address, certfile, keyfile)
print("Now listening on %s:%d" % address)
async with sock:
while True:
client, _ = await sock.accept()
server = H2Server(client, root)
await spawn(server.run())
class H2Server:
"""
A basic HTTP/2 file server. This is essentially very similar to
SimpleHTTPServer from the standard library, but uses HTTP/2 instead of
HTTP/1.1.
"""
def __init__(self, sock, root):
config = h2.config.H2Configuration(
client_side=False, header_encoding='utf-8'
)
self.sock = sock
self.conn = h2.connection.H2Connection(config=config)
self.root = root
self.flow_control_events = {}
async def run(self):
"""
Loop over the connection, managing it appropriately.
"""
self.conn.initiate_connection()
await self.sock.sendall(self.conn.data_to_send())
while True:
# 65535 is basically arbitrary here: this amounts to "give me
# whatever data you have".
data = await self.sock.recv(65535)
if not data:
break
events = self.conn.receive_data(data)
for event in events:
if isinstance(event, h2.events.RequestReceived):
await spawn(
self.request_received(event.headers, event.stream_id)
)
elif isinstance(event, h2.events.DataReceived):
self.conn.reset_stream(event.stream_id)
elif isinstance(event, h2.events.WindowUpdated):
await self.window_updated(event)
await self.sock.sendall(self.conn.data_to_send())
async def request_received(self, headers, stream_id):
"""
Handle a request by attempting to serve a suitable file.
"""
headers = dict(headers)
assert headers[':method'] == 'GET'
path = headers[':path'].lstrip('/')
full_path = os.path.join(self.root, path)
if not os.path.exists(full_path):
response_headers = (
(':status', '404'),
('content-length', '0'),
('server', 'curio-h2'),
)
self.conn.send_headers(
stream_id, response_headers, end_stream=True
)
await self.sock.sendall(self.conn.data_to_send())
else:
await self.send_file(full_path, stream_id)
async def send_file(self, file_path, stream_id):
"""
Send a file, obeying the rules of HTTP/2 flow control.
"""
filesize = os.stat(file_path).st_size
content_type, content_encoding = mimetypes.guess_type(file_path)
response_headers = [
(':status', '200'),
('content-length', str(filesize)),
('server', 'curio-h2'),
]
if content_type:
response_headers.append(('content-type', content_type))
if content_encoding:
response_headers.append(('content-encoding', content_encoding))
self.conn.send_headers(stream_id, response_headers)
await self.sock.sendall(self.conn.data_to_send())
with open(file_path, 'rb', buffering=0) as f:
await self._send_file_data(f, stream_id)
async def _send_file_data(self, fileobj, stream_id):
"""
Send the data portion of a file. Handles flow control rules.
"""
while True:
while not self.conn.local_flow_control_window(stream_id):
await self.wait_for_flow_control(stream_id)
chunk_size = min(
self.conn.local_flow_control_window(stream_id),
READ_CHUNK_SIZE,
)
data = fileobj.read(chunk_size)
keep_reading = (len(data) == chunk_size)
self.conn.send_data(stream_id, data, not keep_reading)
await self.sock.sendall(self.conn.data_to_send())
if not keep_reading:
break
async def wait_for_flow_control(self, stream_id):
"""
Blocks until the flow control window for a given stream is opened.
"""
evt = Event()
self.flow_control_events[stream_id] = evt
await evt.wait()
async def window_updated(self, event):
"""
Unblock streams waiting on flow control, if needed.
"""
stream_id = event.stream_id
if stream_id and stream_id in self.flow_control_events:
evt = self.flow_control_events.pop(stream_id)
await evt.set()
elif not stream_id:
# Need to keep a real list here to use only the events present at
# this time.
blocked_streams = list(self.flow_control_events.keys())
for stream_id in blocked_streams:
event = self.flow_control_events.pop(stream_id)
await event.set()
return
if __name__ == '__main__':
host = sys.argv[2] if len(sys.argv) > 2 else "localhost"
kernel = Kernel(with_monitor=True)
print("Try GETting:")
print(" On OSX after 'brew install curl --with-c-ares --with-libidn --with-nghttp2 --with-openssl':")
print("/usr/local/opt/curl/bin/curl --tlsv1.2 --http2 -k https://localhost:5000/bundle.js")
print("Or open a browser to: https://localhost:5000/")
print(" (Accept all the warnings)")
kernel.run(h2_server((host, 5000),
sys.argv[1],
"{}.crt.pem".format(host),
"{}.key".format(host)))

Просмотреть файл

@ -1,21 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDhTCCAm2gAwIBAgIJAOrxh0dOYJLdMA0GCSqGSIb3DQEBCwUAMFkxCzAJBgNV
BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX
aWRnaXRzIFB0eSBMdGQxEjAQBgNVBAMMCWxvY2FsaG9zdDAeFw0xNTA5MTkxNDE2
NDRaFw0xNTEwMTkxNDE2NDRaMFkxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21l
LVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxEjAQBgNV
BAMMCWxvY2FsaG9zdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMqt
A1iu8EN00FU0eBcBGlLVmNEgV7Jkbukra+kwS8j/U2y50QPGJc/FiIVDfuBqk5dL
ACTNc6A/FQcXvWmOc5ixmC3QKKasMpuofqKz0V9C6irZdYXZ9rcsW0gHQIr989yd
R+N1VbIlEVW/T9FJL3B2UD9GVIkUELzm47CSOWZvAxQUlsx8CUNuUCWqyZJoqTFN
j0LeJDOWGCsug1Pkj0Q1x+jMVL6l6Zf6vMkLNOMsOsWsxUk+0L3tl/OzcTgUOCsw
UzY59RIi6Rudrp0oaU8NuHr91yiSqPbKFlX10M9KwEEdnIpcxhND3dacrDycj3ux
eWlqKync2vOFUkhwiaMCAwEAAaNQME4wHQYDVR0OBBYEFA0PN+PGoofZ+QIys2Jy
1Zz94vBOMB8GA1UdIwQYMBaAFA0PN+PGoofZ+QIys2Jy1Zz94vBOMAwGA1UdEwQF
MAMBAf8wDQYJKoZIhvcNAQELBQADggEBAEplethBoPpcP3EbR5Rz6snDDIcbtAJu
Ngd0YZppGT+P0DYnPJva4vRG3bb84ZMSuppz5j67qD6DdWte8UXhK8BzWiHzwmQE
QmbKyzzTMKQgTNFntpx5cgsSvTtrHpNYoMHzHOmyAOboNeM0DWiRXsYLkWTitLTN
qbOpstwPubExbT9lPjLclntShT/lCupt+zsbnrR9YiqlYFY/fDzfAybZhrD5GMBY
XdMPItwAc/sWvH31yztarjkLmld76AGCcO5r8cSR/cX98SicyfjOBbSco8GkjYNY
582gTPkKGYpStuN7GNT5tZmxvMq935HRa2XZvlAIe8ufp8EHVoYiF3c=
-----END CERTIFICATE-----

Просмотреть файл

@ -1,27 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAyq0DWK7wQ3TQVTR4FwEaUtWY0SBXsmRu6Str6TBLyP9TbLnR
A8Ylz8WIhUN+4GqTl0sAJM1zoD8VBxe9aY5zmLGYLdAopqwym6h+orPRX0LqKtl1
hdn2tyxbSAdAiv3z3J1H43VVsiURVb9P0UkvcHZQP0ZUiRQQvObjsJI5Zm8DFBSW
zHwJQ25QJarJkmipMU2PQt4kM5YYKy6DU+SPRDXH6MxUvqXpl/q8yQs04yw6xazF
ST7Qve2X87NxOBQ4KzBTNjn1EiLpG52unShpTw24ev3XKJKo9soWVfXQz0rAQR2c
ilzGE0Pd1pysPJyPe7F5aWorKdza84VSSHCJowIDAQABAoIBACp+nh4BB/VMz8Wd
q7Q/EfLeQB1Q57JKpoqTBRwueSVai3ZXe4CMEi9/HkG6xiZtkiZ9njkZLq4hq9oB
2z//kzMnwV2RsIRJxI6ohGy+wR51HD4BvEdlTPpY/Yabpqe92VyfSYxidKZWaU0O
QMED1EODOw4ZQ+4928iPrJu//PMB4e7TFao0b9Fk/XLWtu5/tQZz9jsrlTi1zthh
7n+oaGNhfTeIJJL4jrhTrKW1CLHXATtr9SJlfZ3wbMxQVeyj2wUlP1V0M6kBuhNj
tbGbMpixD5iCNJ49Cm2PHg+wBOfS3ADGIpi3PcGw5mb8nB3N9eGBRPhLShAlq5Hi
Lv4tyykCgYEA8u3b3xJ04pxWYN25ou/Sc8xzgDCK4XvDNdHVTuZDjLVA+VTVPzql
lw7VvJArsx47MSPvsaX/+4hQXYtfnR7yJpx6QagvQ+z4ludnIZYrQwdUmb9pFL1s
8UNj+3j9QFRPenIiIQ8qxxNIQ9w2HsVQ8scvc9CjYop/YYAPaQyHaL8CgYEA1ZSz
CR4NcpfgRSILdhb1dLcyw5Qus1VOSAx3DYkhDkMiB8XZwgMdJjwehJo9yaqRCLE8
Sw5znMnkfoZpu7+skrjK0FqmMpXMH9gIszHvFG8wSw/6+2HIWS19/wOu8dh95LuC
0zurMk8rFqxgWMWF20afhgYrUz42cvUTo10FVB0CgYEAt7mW6W3PArfUSCxIwmb4
VmXREKkl0ATHDYQl/Cb//YHzot467TgQll883QB4XF5HzBFurX9rSzO7/BN1e6I0
52i+ubtWC9xD4fUetXMaQvZfUGxIL8xXgVxDWKQXfLiG54c8Mp6C7s6xf8kjEUCP
yR1F0SSA/Pzb+8RbY0p7eocCgYA+1rs+SXtHZev0KyoYGnUpW+Uxqd17ofOgOxqj
/t6c5Z+TjeCdtnDTGQkZlo/rT6XQWuUUaDIXxUbW+xEMzj4mBPyXBLS1WWFvVQ5q
OpzO9E/PJeqAH6rkof/aEelc+oc/zvOU1o9uA+D3kMvgEm1psIOq2RHSMhGvDPA0
NmAk+QKBgQCwd1681GagdIYSZUCBecnLtevXmIsJyDW2yR1NNcIe/ukcVQREMDvy
5DDkhnGDgnV1D5gYcXb34g9vYvbfTnBMl/JXmMAAG1kIS+3pvHyN6f1poVe3yJV1
yHVuvymnJxKnyaV0L3ntepVvV0vVNIkA3oauoUTLto6txBI+b/ImDA==
-----END RSA PRIVATE KEY-----

Просмотреть файл

@ -1,102 +0,0 @@
# -*- coding: utf-8 -*-
"""
eventlet-server.py
~~~~~~~~~~~~~~~~~~
A fully-functional HTTP/2 server written for Eventlet.
"""
import collections
import json
import eventlet
from eventlet.green.OpenSSL import SSL, crypto
from h2.config import H2Configuration
from h2.connection import H2Connection
from h2.events import RequestReceived, DataReceived
class ConnectionManager(object):
"""
An object that manages a single HTTP/2 connection.
"""
def __init__(self, sock):
config = H2Configuration(client_side=False)
self.sock = sock
self.conn = H2Connection(config=config)
def run_forever(self):
self.conn.initiate_connection()
self.sock.sendall(self.conn.data_to_send())
while True:
data = self.sock.recv(65535)
if not data:
break
events = self.conn.receive_data(data)
for event in events:
if isinstance(event, RequestReceived):
self.request_received(event.headers, event.stream_id)
elif isinstance(event, DataReceived):
self.conn.reset_stream(event.stream_id)
self.sock.sendall(self.conn.data_to_send())
def request_received(self, headers, stream_id):
headers = collections.OrderedDict(headers)
data = json.dumps({'headers': headers}, indent=4).encode('utf-8')
response_headers = (
(':status', '200'),
('content-type', 'application/json'),
('content-length', len(data)),
('server', 'eventlet-h2'),
)
self.conn.send_headers(stream_id, response_headers)
self.conn.send_data(stream_id, data, end_stream=True)
def alpn_callback(conn, protos):
if b'h2' in protos:
return b'h2'
raise RuntimeError("No acceptable protocol offered!")
def npn_advertise_cb(conn):
return [b'h2']
# Let's set up SSL. This is a lot of work in PyOpenSSL.
options = (
SSL.OP_NO_COMPRESSION |
SSL.OP_NO_SSLv2 |
SSL.OP_NO_SSLv3 |
SSL.OP_NO_TLSv1 |
SSL.OP_NO_TLSv1_1
)
context = SSL.Context(SSL.SSLv23_METHOD)
context.set_options(options)
context.set_verify(SSL.VERIFY_NONE, lambda *args: True)
context.use_privatekey_file('server.key')
context.use_certificate_file('server.crt')
context.set_npn_advertise_callback(npn_advertise_cb)
context.set_alpn_select_callback(alpn_callback)
context.set_cipher_list(
"ECDHE+AESGCM"
)
context.set_tmp_ecdh(crypto.get_elliptic_curve(u'prime256v1'))
server = eventlet.listen(('0.0.0.0', 443))
server = SSL.Connection(context, server)
pool = eventlet.GreenPool()
while True:
try:
new_sock, _ = server.accept()
manager = ConnectionManager(new_sock)
pool.spawn_n(manager.run_forever)
except (SystemExit, KeyboardInterrupt):
break

Просмотреть файл

@ -1,20 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDUjCCAjoCCQCQmNzzpQTCijANBgkqhkiG9w0BAQUFADBrMQswCQYDVQQGEwJH
QjEPMA0GA1UECBMGTG9uZG9uMQ8wDQYDVQQHEwZMb25kb24xETAPBgNVBAoTCGh5
cGVyLWgyMREwDwYDVQQLEwhoeXBleS1oMjEUMBIGA1UEAxMLZXhhbXBsZS5jb20w
HhcNMTUwOTE2MjAyOTA0WhcNMTYwOTE1MjAyOTA0WjBrMQswCQYDVQQGEwJHQjEP
MA0GA1UECBMGTG9uZG9uMQ8wDQYDVQQHEwZMb25kb24xETAPBgNVBAoTCGh5cGVy
LWgyMREwDwYDVQQLEwhoeXBleS1oMjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wggEi
MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC74ZeB4Jdb5cnC9KXXLJuzjwTg
45q5EeShDYQe0TbKgreiUP6clU3BR0fFAVedN1q/LOuQ1HhvrDk1l4TfGF2bpCIq
K+U9CnzcQknvdpyyVeOLtSsCjOPk4xydHwkQxwJvHVdtJx4CzDDqGbHNHCF/9gpQ
lsa3JZW+tIZLK0XMEPFQ4XFXgegxTStO7kBBPaVIgG9Ooqc2MG4rjMNUpxa28WF1
SyqWTICf2N8T/C+fPzbQLKCWrFrKUP7WQlOaqPNQL9bCDhSTPRTwQOc2/MzVZ9gT
Xr0Z+JMTXwkSMKO52adE1pmKt00jJ1ecZBiJFyjx0X6hH+/59dLbG/7No+PzAgMB
AAEwDQYJKoZIhvcNAQEFBQADggEBAG3UhOCa0EemL2iY+C+PR6CwEHQ+n7vkBzNz
gKOG+Q39spyzqU1qJAzBxLTE81bIQbDg0R8kcLWHVH2y4zViRxZ0jHUFKMgjONW+
Aj4evic/2Y/LxpLxCajECq/jeMHYrmQONszf9pbc0+exrQpgnwd8asfsM3d/FJS2
5DIWryCKs/61m9vYL8icWx/9cnfPkBoNv1ER+V1L1TH3ARvABh406SBaeqLTm/kG
MNuKytKWJsQbNlxzWHVgkKzVsBKvYj0uIEJpClIhbe6XNYRDy8T8mKXVWhJuxH4p
/agmCG3nxO8aCrUK/EVmbWmVIfCH3t7jlwMX1nJ8MsRE7Ydnk8I=
-----END CERTIFICATE-----

Просмотреть файл

@ -1,27 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpQIBAAKCAQEAu+GXgeCXW+XJwvSl1yybs48E4OOauRHkoQ2EHtE2yoK3olD+
nJVNwUdHxQFXnTdavyzrkNR4b6w5NZeE3xhdm6QiKivlPQp83EJJ73acslXji7Ur
Aozj5OMcnR8JEMcCbx1XbSceAsww6hmxzRwhf/YKUJbGtyWVvrSGSytFzBDxUOFx
V4HoMU0rTu5AQT2lSIBvTqKnNjBuK4zDVKcWtvFhdUsqlkyAn9jfE/wvnz820Cyg
lqxaylD+1kJTmqjzUC/Wwg4Ukz0U8EDnNvzM1WfYE169GfiTE18JEjCjudmnRNaZ
irdNIydXnGQYiRco8dF+oR/v+fXS2xv+zaPj8wIDAQABAoIBAQCsdq278+0c13d4
tViSh4k5r1w8D9IUdp9XU2/nVgckqA9nOVAvbkJc3FC+P7gsQgbUHKj0XoVbhU1S
q461t8kduPH/oiGhAcKR8WurHEdE0OC6ewhLJAeCMRQwCrAorXXHh7icIt9ClCuG
iSWUcXEy5Cidx3oL3r1xvIbV85fzdDtE9RC1I/kMjAy63S47YGiqh5vYmJkCa8rG
Dsd1sEMDPr63XJpqJj3uHRcPvySgXTa+ssTmUH8WJlPTjvDB5hnPz+lkk2JKVPNu
8adzftZ6hSun+tsc4ZJp8XhGu/m/7MjxWh8MeupLHlXcOEsnj4uHQQsOM3zHojr3
aDCZiC1pAoGBAOAhwe1ujoS2VJ5RXJ9KMs7eBER/02MDgWZjo54Jv/jFxPWGslKk
QQceuTe+PruRm41nzvk3q4iZXt8pG0bvpgigN2epcVx/O2ouRsUWWBT0JrVlEzha
TIvWjtZ5tSQExXgHL3VlM9+ka40l+NldLSPn25+prizaqhalWuvTpP23AoGBANaY
VhEI6yhp0BBUSATEv9lRgkwx3EbcnXNXPQjDMOthsyfq7FxbdOBEK1rwSDyuE6Ij
zQGcTOfdiur5Ttg0OQilTJIXJAlpoeecOQ9yGma08c5FMXVJJvcZUuWRZWg1ocQj
/hx0WVE9NwOoKwTBERv8HX7vJOFRZyvgkJwFxoulAoGAe4m/1XoZrga9z2GzNs10
AdgX7BW00x+MhH4pIiPnn1yK+nYa9jg4647Asnv3IfXZEnEEgRNxReKbi0+iDFBt
aNW+lDGuHTi37AfD1EBDnpEQgO1MUcRb6rwBkTAWatsCaO00+HUmyX9cFLm4Vz7n
caILyQ6CxZBlLgRIgDHxADMCgYEAtubsJGTHmZBmSCStpXLUWbOBLNQqfTM398DZ
QoirP1PsUQ+IGUfSG/u+QCogR6fPEBkXeFHxsoY/Cvsm2lvYaKgK1VFn46Xm2vNq
JuIH4pZCqp6LAv4weddZslT0a5eaowRSZ4o7PmTAaRuCXvD3VjTSJwhJFMo+90TV
vEWn7gkCgYEAkk+unX9kYmKoUdLh22/tzQekBa8WqMxXDwzBCECTAs2GlpL/f73i
zD15TnaNfLP6Q5RNb0N9tb0Gz1wSkwI1+jGAQLnh2K9X9cIVIqJn8Mf/KQa/wUDV
Tb1j7FoGUEgX7vbsyWuTd8P76kNYyGqCss1XmbttcSolqpbIdlSUcO0=
-----END RSA PRIVATE KEY-----

Просмотреть файл

@ -1,110 +0,0 @@
# -*- coding: utf-8 -*-
"""
Client HTTPS Setup
~~~~~~~~~~~~~~~~~~
This example code fragment demonstrates how to set up a HTTP/2 client that
negotiates HTTP/2 using NPN and ALPN. For the sake of maximum explanatory value
this code uses the synchronous, low-level sockets API: however, if you're not
using sockets directly (e.g. because you're using asyncio), you should focus on
the set up required for the SSLContext object. For other concurrency libraries
you may need to use other setup (e.g. for Twisted you'll need to use
IProtocolNegotiationFactory).
This code requires Python 3.5 or later.
"""
import h2.connection
import socket
import ssl
def establish_tcp_connection():
"""
This function establishes a client-side TCP connection. How it works isn't
very important to this example. For the purpose of this example we connect
to localhost.
"""
return socket.create_connection(('localhost', 443))
def get_http2_ssl_context():
"""
This function creates an SSLContext object that is suitably configured for
HTTP/2. If you're working with Python TLS directly, you'll want to do the
exact same setup as this function does.
"""
# Get the basic context from the standard library.
ctx = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH)
# RFC 7540 Section 9.2: Implementations of HTTP/2 MUST use TLS version 1.2
# or higher. Disable TLS 1.1 and lower.
ctx.options |= (
ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
)
# RFC 7540 Section 9.2.1: A deployment of HTTP/2 over TLS 1.2 MUST disable
# compression.
ctx.options |= ssl.OP_NO_COMPRESSION
# RFC 7540 Section 9.2.2: "deployments of HTTP/2 that use TLS 1.2 MUST
# support TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256". In practice, the
# blacklist defined in this section allows only the AES GCM and ChaCha20
# cipher suites with ephemeral key negotiation.
ctx.set_ciphers("ECDHE+AESGCM:ECDHE+CHACHA20:DHE+AESGCM:DHE+CHACHA20")
# We want to negotiate using NPN and ALPN. ALPN is mandatory, but NPN may
# be absent, so allow that. This setup allows for negotiation of HTTP/1.1.
ctx.set_alpn_protocols(["h2", "http/1.1"])
try:
ctx.set_npn_protocols(["h2", "http/1.1"])
except NotImplementedError:
pass
return ctx
def negotiate_tls(tcp_conn, context):
"""
Given an established TCP connection and a HTTP/2-appropriate TLS context,
this function:
1. wraps TLS around the TCP connection.
2. confirms that HTTP/2 was negotiated and, if it was not, throws an error.
"""
# Note that SNI is mandatory for HTTP/2, so you *must* pass the
# server_hostname argument.
tls_conn = context.wrap_socket(tcp_conn, server_hostname='localhost')
# Always prefer the result from ALPN to that from NPN.
# You can only check what protocol was negotiated once the handshake is
# complete.
negotiated_protocol = tls_conn.selected_alpn_protocol()
if negotiated_protocol is None:
negotiated_protocol = tls_conn.selected_npn_protocol()
if negotiated_protocol != "h2":
raise RuntimeError("Didn't negotiate HTTP/2!")
return tls_conn
def main():
# Step 1: Set up your TLS context.
context = get_http2_ssl_context()
# Step 2: Create a TCP connection.
connection = establish_tcp_connection()
# Step 3: Wrap the connection in TLS and validate that we negotiated HTTP/2
tls_connection = negotiate_tls(connection, context)
# Step 4: Create a client-side H2 connection.
http2_connection = h2.connection.H2Connection()
# Step 5: Initiate the connection
http2_connection.initiate_connection()
tls_connection.sendall(http2_connection.data_to_send())
# The TCP, TLS, and HTTP/2 handshakes are now complete. You can enter your
# main loop now.

Просмотреть файл

@ -1,103 +0,0 @@
# -*- coding: utf-8 -*-
"""
Client Plaintext Upgrade
~~~~~~~~~~~~~~~~~~~~~~~~
This example code fragment demonstrates how to set up a HTTP/2 client that uses
the plaintext HTTP Upgrade mechanism to negotiate HTTP/2 connectivity. For
maximum explanatory value it uses the synchronous socket API that comes with
the Python standard library. In product code you will want to use an actual
HTTP/1.1 client if possible.
This code requires Python 3.5 or later.
"""
import h2.connection
import socket
def establish_tcp_connection():
"""
This function establishes a client-side TCP connection. How it works isn't
very important to this example. For the purpose of this example we connect
to localhost.
"""
return socket.create_connection(('localhost', 80))
def send_initial_request(connection, settings):
"""
For the sake of this upgrade demonstration, we're going to issue a GET
request against the root of the site. In principle the best request to
issue for an upgrade is actually ``OPTIONS *``, but this is remarkably
poorly supported and can break in weird ways.
"""
# Craft our initial request per RFC 7540 Section 3.2. This requires two
# special header fields: the Upgrade headre, and the HTTP2-Settings header.
# The value of the HTTP2-Settings header field comes from h2.
request = (
b"GET / HTTP/1.1\r\n" +
b"Host: localhost\r\n" +
b"Upgrade: h2c\r\n" +
b"HTTP2-Settings: " + settings + "\r\n"
b"\r\n"
)
connection.sendall(request)
def get_upgrade_response(connection):
"""
This function reads from the socket until the HTTP/1.1 end-of-headers
sequence (CRLFCRLF) is received. It then checks what the status code of the
response is.
This is not a substitute for proper HTTP/1.1 parsing, but it's good enough
for example purposes.
"""
data = b''
while b'\r\n\r\n' not in data:
data += connection.recv(8192)
headers, rest = data.split(b'\r\n\r\n', 1)
# An upgrade response begins HTTP/1.1 101 Switching Protocols. Look for the
# code. In production code you should also check that the upgrade is to
# h2c, but here we know we only offered one upgrade so there's only one
# possible upgrade in use.
split_headers = headers.split()
if split_headers[1] != b'101':
raise RuntimeError("Not upgrading!")
# We don't care about the HTTP/1.1 data anymore, but we do care about
# any other data we read from the socket: this is going to be HTTP/2 data
# that must be passed to the H2Connection.
return rest
def main():
"""
The client upgrade flow.
"""
# Step 1: Establish the TCP connecton.
connection = establish_tcp_connection()
# Step 2: Create H2 Connection object, put it in upgrade mode, and get the
# value of the HTTP2-Settings header we want to use.
h2_connection = h2.connection.H2Connection()
settings_header_value = h2_connection.initiate_upgrade_connection()
# Step 3: Send the initial HTTP/1.1 request with the upgrade fields.
send_initial_request(connection, settings_header_value)
# Step 4: Read the HTTP/1.1 response, look for 101 response.
extra_data = get_upgrade_response(connection)
# Step 5: Immediately send the pending HTTP/2 data.
connection.sendall(h2_connection.data_to_send())
# Step 6: Feed the body data to the connection.
events = connection.receive_data(extra_data)
# Now you can enter your main loop, beginning by processing the first set
# of events above. These events may include ResponseReceived, which will
# contain the response to the request we made in Step 3.
main_loop(events)

Просмотреть файл

@ -1,112 +0,0 @@
# -*- coding: utf-8 -*-
"""
Server HTTPS Setup
~~~~~~~~~~~~~~~~~~
This example code fragment demonstrates how to set up a HTTP/2 server that
negotiates HTTP/2 using NPN and ALPN. For the sake of maximum explanatory value
this code uses the synchronous, low-level sockets API: however, if you're not
using sockets directly (e.g. because you're using asyncio), you should focus on
the set up required for the SSLContext object. For other concurrency libraries
you may need to use other setup (e.g. for Twisted you'll need to use
IProtocolNegotiationFactory).
This code requires Python 3.5 or later.
"""
import h2.config
import h2.connection
import socket
import ssl
def establish_tcp_connection():
"""
This function establishes a server-side TCP connection. How it works isn't
very important to this example.
"""
bind_socket = socket.socket()
bind_socket.bind(('', 443))
bind_socket.listen(5)
return bind_socket.accept()[0]
def get_http2_ssl_context():
"""
This function creates an SSLContext object that is suitably configured for
HTTP/2. If you're working with Python TLS directly, you'll want to do the
exact same setup as this function does.
"""
# Get the basic context from the standard library.
ctx = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)
# RFC 7540 Section 9.2: Implementations of HTTP/2 MUST use TLS version 1.2
# or higher. Disable TLS 1.1 and lower.
ctx.options |= (
ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
)
# RFC 7540 Section 9.2.1: A deployment of HTTP/2 over TLS 1.2 MUST disable
# compression.
ctx.options |= ssl.OP_NO_COMPRESSION
# RFC 7540 Section 9.2.2: "deployments of HTTP/2 that use TLS 1.2 MUST
# support TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256". In practice, the
# blacklist defined in this section allows only the AES GCM and ChaCha20
# cipher suites with ephemeral key negotiation.
ctx.set_ciphers("ECDHE+AESGCM:ECDHE+CHACHA20:DHE+AESGCM:DHE+CHACHA20")
# We want to negotiate using NPN and ALPN. ALPN is mandatory, but NPN may
# be absent, so allow that. This setup allows for negotiation of HTTP/1.1.
ctx.set_alpn_protocols(["h2", "http/1.1"])
try:
ctx.set_npn_protocols(["h2", "http/1.1"])
except NotImplementedError:
pass
return ctx
def negotiate_tls(tcp_conn, context):
"""
Given an established TCP connection and a HTTP/2-appropriate TLS context,
this function:
1. wraps TLS around the TCP connection.
2. confirms that HTTP/2 was negotiated and, if it was not, throws an error.
"""
tls_conn = context.wrap_socket(tcp_conn, server_side=True)
# Always prefer the result from ALPN to that from NPN.
# You can only check what protocol was negotiated once the handshake is
# complete.
negotiated_protocol = tls_conn.selected_alpn_protocol()
if negotiated_protocol is None:
negotiated_protocol = tls_conn.selected_npn_protocol()
if negotiated_protocol != "h2":
raise RuntimeError("Didn't negotiate HTTP/2!")
return tls_conn
def main():
# Step 1: Set up your TLS context.
context = get_http2_ssl_context()
# Step 2: Receive a TCP connection.
connection = establish_tcp_connection()
# Step 3: Wrap the connection in TLS and validate that we negotiated HTTP/2
tls_connection = negotiate_tls(connection, context)
# Step 4: Create a server-side H2 connection.
config = h2.config.H2Configuration(client_side=False)
http2_connection = h2.connection.H2Connection(config=config)
# Step 5: Initiate the connection
http2_connection.initiate_connection()
tls_connection.sendall(http2_connection.data_to_send())
# The TCP, TLS, and HTTP/2 handshakes are now complete. You can enter your
# main loop now.

Просмотреть файл

@ -1,100 +0,0 @@
# -*- coding: utf-8 -*-
"""
Server Plaintext Upgrade
~~~~~~~~~~~~~~~~~~~~~~~~
This example code fragment demonstrates how to set up a HTTP/2 server that uses
the plaintext HTTP Upgrade mechanism to negotiate HTTP/2 connectivity. For
maximum explanatory value it uses the synchronous socket API that comes with
the Python standard library. In product code you will want to use an actual
HTTP/1.1 server library if possible.
This code requires Python 3.5 or later.
"""
import h2.config
import h2.connection
import re
import socket
def establish_tcp_connection():
"""
This function establishes a server-side TCP connection. How it works isn't
very important to this example.
"""
bind_socket = socket.socket()
bind_socket.bind(('', 443))
bind_socket.listen(5)
return bind_socket.accept()[0]
def receive_initial_request(connection):
"""
We're going to receive a request. For the sake of this example, we're going
to assume that the first request has no body. If it doesn't have the
Upgrade: h2c header field and the HTTP2-Settings header field, we'll throw
errors.
In production code, you should use a proper HTTP/1.1 parser and actually
serve HTTP/1.1 requests!
Returns the value of the HTTP2-Settings header field.
"""
data = b''
while not data.endswith(b'\r\n\r\n'):
data += connection.recv(8192)
match = re.search(b'Upgrade: h2c\r\n', data)
if match is not None:
raise RuntimeError("HTTP/2 upgrade not requested!")
# We need to look for the HTTP2-Settings header field. Again, in production
# code you shouldn't use regular expressions for this, but it's good enough
# for the example.
match = re.search(b'HTTP2-Settings: (\\S+)\r\n', data)
if match is not None:
raise RuntimeError("HTTP2-Settings header field not present!")
return match.group(1)
def send_upgrade_response(connection):
"""
This function writes the 101 Switching Protocols response.
"""
response = (
b"HTTP/1.1 101 Switching Protocols\r\n"
b"Upgrade: h2c\r\n"
b"\r\n"
)
connection.sendall(response)
def main():
"""
The server upgrade flow.
"""
# Step 1: Establish the TCP connecton.
connection = establish_tcp_connection()
# Step 2: Read the response. We expect this to request an upgrade.
settings_header_value = receive_initial_request(connection)
# Step 3: Create a H2Connection object in server mode, and pass it the
# value of the HTTP2-Settings header field.
config = h2.config.H2Configuration(client_side=False)
h2_connection = h2.connection.H2Connection(config=config)
h2_connection.initiate_upgrade_connection(
settings_header=settings_header_value
)
# Step 4: Send the 101 Switching Protocols response.
send_upgrade_response(connection)
# Step 5: Send pending HTTP/2 data.
connection.sendall(h2_connection.data_to_send())
# At this point, you can enter your main loop. The first step has to be to
# send the response to the initial HTTP/1.1 request you received on stream
# 1.
main_loop()

Просмотреть файл

@ -1,20 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDUjCCAjoCCQCQmNzzpQTCijANBgkqhkiG9w0BAQUFADBrMQswCQYDVQQGEwJH
QjEPMA0GA1UECBMGTG9uZG9uMQ8wDQYDVQQHEwZMb25kb24xETAPBgNVBAoTCGh5
cGVyLWgyMREwDwYDVQQLEwhoeXBleS1oMjEUMBIGA1UEAxMLZXhhbXBsZS5jb20w
HhcNMTUwOTE2MjAyOTA0WhcNMTYwOTE1MjAyOTA0WjBrMQswCQYDVQQGEwJHQjEP
MA0GA1UECBMGTG9uZG9uMQ8wDQYDVQQHEwZMb25kb24xETAPBgNVBAoTCGh5cGVy
LWgyMREwDwYDVQQLEwhoeXBleS1oMjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wggEi
MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC74ZeB4Jdb5cnC9KXXLJuzjwTg
45q5EeShDYQe0TbKgreiUP6clU3BR0fFAVedN1q/LOuQ1HhvrDk1l4TfGF2bpCIq
K+U9CnzcQknvdpyyVeOLtSsCjOPk4xydHwkQxwJvHVdtJx4CzDDqGbHNHCF/9gpQ
lsa3JZW+tIZLK0XMEPFQ4XFXgegxTStO7kBBPaVIgG9Ooqc2MG4rjMNUpxa28WF1
SyqWTICf2N8T/C+fPzbQLKCWrFrKUP7WQlOaqPNQL9bCDhSTPRTwQOc2/MzVZ9gT
Xr0Z+JMTXwkSMKO52adE1pmKt00jJ1ecZBiJFyjx0X6hH+/59dLbG/7No+PzAgMB
AAEwDQYJKoZIhvcNAQEFBQADggEBAG3UhOCa0EemL2iY+C+PR6CwEHQ+n7vkBzNz
gKOG+Q39spyzqU1qJAzBxLTE81bIQbDg0R8kcLWHVH2y4zViRxZ0jHUFKMgjONW+
Aj4evic/2Y/LxpLxCajECq/jeMHYrmQONszf9pbc0+exrQpgnwd8asfsM3d/FJS2
5DIWryCKs/61m9vYL8icWx/9cnfPkBoNv1ER+V1L1TH3ARvABh406SBaeqLTm/kG
MNuKytKWJsQbNlxzWHVgkKzVsBKvYj0uIEJpClIhbe6XNYRDy8T8mKXVWhJuxH4p
/agmCG3nxO8aCrUK/EVmbWmVIfCH3t7jlwMX1nJ8MsRE7Ydnk8I=
-----END CERTIFICATE-----

Просмотреть файл

@ -1,27 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpQIBAAKCAQEAu+GXgeCXW+XJwvSl1yybs48E4OOauRHkoQ2EHtE2yoK3olD+
nJVNwUdHxQFXnTdavyzrkNR4b6w5NZeE3xhdm6QiKivlPQp83EJJ73acslXji7Ur
Aozj5OMcnR8JEMcCbx1XbSceAsww6hmxzRwhf/YKUJbGtyWVvrSGSytFzBDxUOFx
V4HoMU0rTu5AQT2lSIBvTqKnNjBuK4zDVKcWtvFhdUsqlkyAn9jfE/wvnz820Cyg
lqxaylD+1kJTmqjzUC/Wwg4Ukz0U8EDnNvzM1WfYE169GfiTE18JEjCjudmnRNaZ
irdNIydXnGQYiRco8dF+oR/v+fXS2xv+zaPj8wIDAQABAoIBAQCsdq278+0c13d4
tViSh4k5r1w8D9IUdp9XU2/nVgckqA9nOVAvbkJc3FC+P7gsQgbUHKj0XoVbhU1S
q461t8kduPH/oiGhAcKR8WurHEdE0OC6ewhLJAeCMRQwCrAorXXHh7icIt9ClCuG
iSWUcXEy5Cidx3oL3r1xvIbV85fzdDtE9RC1I/kMjAy63S47YGiqh5vYmJkCa8rG
Dsd1sEMDPr63XJpqJj3uHRcPvySgXTa+ssTmUH8WJlPTjvDB5hnPz+lkk2JKVPNu
8adzftZ6hSun+tsc4ZJp8XhGu/m/7MjxWh8MeupLHlXcOEsnj4uHQQsOM3zHojr3
aDCZiC1pAoGBAOAhwe1ujoS2VJ5RXJ9KMs7eBER/02MDgWZjo54Jv/jFxPWGslKk
QQceuTe+PruRm41nzvk3q4iZXt8pG0bvpgigN2epcVx/O2ouRsUWWBT0JrVlEzha
TIvWjtZ5tSQExXgHL3VlM9+ka40l+NldLSPn25+prizaqhalWuvTpP23AoGBANaY
VhEI6yhp0BBUSATEv9lRgkwx3EbcnXNXPQjDMOthsyfq7FxbdOBEK1rwSDyuE6Ij
zQGcTOfdiur5Ttg0OQilTJIXJAlpoeecOQ9yGma08c5FMXVJJvcZUuWRZWg1ocQj
/hx0WVE9NwOoKwTBERv8HX7vJOFRZyvgkJwFxoulAoGAe4m/1XoZrga9z2GzNs10
AdgX7BW00x+MhH4pIiPnn1yK+nYa9jg4647Asnv3IfXZEnEEgRNxReKbi0+iDFBt
aNW+lDGuHTi37AfD1EBDnpEQgO1MUcRb6rwBkTAWatsCaO00+HUmyX9cFLm4Vz7n
caILyQ6CxZBlLgRIgDHxADMCgYEAtubsJGTHmZBmSCStpXLUWbOBLNQqfTM398DZ
QoirP1PsUQ+IGUfSG/u+QCogR6fPEBkXeFHxsoY/Cvsm2lvYaKgK1VFn46Xm2vNq
JuIH4pZCqp6LAv4weddZslT0a5eaowRSZ4o7PmTAaRuCXvD3VjTSJwhJFMo+90TV
vEWn7gkCgYEAkk+unX9kYmKoUdLh22/tzQekBa8WqMxXDwzBCECTAs2GlpL/f73i
zD15TnaNfLP6Q5RNb0N9tb0Gz1wSkwI1+jGAQLnh2K9X9cIVIqJn8Mf/KQa/wUDV
Tb1j7FoGUEgX7vbsyWuTd8P76kNYyGqCss1XmbttcSolqpbIdlSUcO0=
-----END RSA PRIVATE KEY-----

Просмотреть файл

@ -1,92 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
tornado-server.py
~~~~~~~~~~~~~~~~~
A fully-functional HTTP/2 server written for Tornado.
"""
import collections
import json
import ssl
import tornado.gen
import tornado.ioloop
import tornado.iostream
import tornado.tcpserver
from h2.config import H2Configuration
from h2.connection import H2Connection
from h2.events import RequestReceived, DataReceived
def create_ssl_context(certfile, keyfile):
ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_context.options |= (
ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_COMPRESSION
)
ssl_context.set_ciphers("ECDHE+AESGCM")
ssl_context.load_cert_chain(certfile=certfile, keyfile=keyfile)
ssl_context.set_alpn_protocols(["h2"])
return ssl_context
class H2Server(tornado.tcpserver.TCPServer):
@tornado.gen.coroutine
def handle_stream(self, stream, address):
handler = EchoHeadersHandler(stream)
yield handler.handle()
class EchoHeadersHandler(object):
def __init__(self, stream):
self.stream = stream
config = H2Configuration(client_side=False)
self.conn = H2Connection(config=config)
@tornado.gen.coroutine
def handle(self):
self.conn.initiate_connection()
yield self.stream.write(self.conn.data_to_send())
while True:
try:
data = yield self.stream.read_bytes(65535, partial=True)
if not data:
break
events = self.conn.receive_data(data)
for event in events:
if isinstance(event, RequestReceived):
self.request_received(event.headers, event.stream_id)
elif isinstance(event, DataReceived):
self.conn.reset_stream(event.stream_id)
yield self.stream.write(self.conn.data_to_send())
except tornado.iostream.StreamClosedError:
break
def request_received(self, headers, stream_id):
headers = collections.OrderedDict(headers)
data = json.dumps({'headers': headers}, indent=4).encode('utf-8')
response_headers = (
(':status', '200'),
('content-type', 'application/json'),
('content-length', str(len(data))),
('server', 'tornado-h2'),
)
self.conn.send_headers(stream_id, response_headers)
self.conn.send_data(stream_id, data, end_stream=True)
if __name__ == '__main__':
ssl_context = create_ssl_context('server.crt', 'server.key')
server = H2Server(ssl_options=ssl_context)
server.listen(8888)
io_loop = tornado.ioloop.IOLoop.current()
io_loop.start()

Просмотреть файл

@ -1,111 +0,0 @@
# -*- coding: utf-8 -*-
"""
head_request.py
~~~~~~~~~~~~~~~
A short example that demonstrates a client that makes HEAD requests to certain
websites.
This example is intended as a reproduction of nghttp2 issue 396, for the
purposes of compatibility testing.
"""
from __future__ import print_function
from twisted.internet import reactor
from twisted.internet.endpoints import connectProtocol, SSL4ClientEndpoint
from twisted.internet.protocol import Protocol
from twisted.internet.ssl import optionsForClientTLS
from hyperframe.frame import SettingsFrame
from h2.connection import H2Connection
from h2.events import (
ResponseReceived, DataReceived, StreamEnded,
StreamReset, SettingsAcknowledged,
)
AUTHORITY = u'http2bin.org'
PATH = '/'
SIZE = 4096
class H2Protocol(Protocol):
def __init__(self):
self.conn = H2Connection()
self.known_proto = None
self.request_made = False
def connectionMade(self):
self.conn.initiate_connection()
# This reproduces the error in #396, by changing the header table size.
self.conn.update_settings({SettingsFrame.HEADER_TABLE_SIZE: SIZE})
self.transport.write(self.conn.data_to_send())
def dataReceived(self, data):
if not self.known_proto:
self.known_proto = self.transport.negotiatedProtocol
assert self.known_proto == b'h2'
events = self.conn.receive_data(data)
for event in events:
if isinstance(event, ResponseReceived):
self.handleResponse(event.headers, event.stream_id)
elif isinstance(event, DataReceived):
self.handleData(event.data, event.stream_id)
elif isinstance(event, StreamEnded):
self.endStream(event.stream_id)
elif isinstance(event, SettingsAcknowledged):
self.settingsAcked(event)
elif isinstance(event, StreamReset):
reactor.stop()
raise RuntimeError("Stream reset: %d" % event.error_code)
else:
print(event)
data = self.conn.data_to_send()
if data:
self.transport.write(data)
def settingsAcked(self, event):
# Having received the remote settings change, lets send our request.
if not self.request_made:
self.sendRequest()
def handleResponse(self, response_headers, stream_id):
for name, value in response_headers:
print("%s: %s" % (name.decode('utf-8'), value.decode('utf-8')))
print("")
def handleData(self, data, stream_id):
print(data, end='')
def endStream(self, stream_id):
self.conn.close_connection()
self.transport.write(self.conn.data_to_send())
self.transport.loseConnection()
reactor.stop()
def sendRequest(self):
request_headers = [
(':method', 'HEAD'),
(':authority', AUTHORITY),
(':scheme', 'https'),
(':path', PATH),
('user-agent', 'hyper-h2/1.0.0'),
]
self.conn.send_headers(1, request_headers, end_stream=True)
self.request_made = True
options = optionsForClientTLS(
hostname=AUTHORITY,
acceptableProtocols=[b'h2'],
)
connectProtocol(
SSL4ClientEndpoint(reactor, AUTHORITY, 443, options),
H2Protocol()
)
reactor.run()

Просмотреть файл

@ -1,249 +0,0 @@
# -*- coding: utf-8 -*-
"""
post_request.py
~~~~~~~~~~~~~~~
A short example that demonstrates a client that makes POST requests to certain
websites.
This example is intended to demonstrate how to handle uploading request bodies.
In this instance, a file will be uploaded. In order to handle arbitrary files,
this example also demonstrates how to obey HTTP/2 flow control rules.
Takes one command-line argument: a path to a file in the filesystem to upload.
If none is present, uploads this file.
"""
from __future__ import print_function
import mimetypes
import os
import sys
from twisted.internet import reactor, defer
from twisted.internet.endpoints import connectProtocol, SSL4ClientEndpoint
from twisted.internet.protocol import Protocol
from twisted.internet.ssl import optionsForClientTLS
from h2.connection import H2Connection
from h2.events import (
ResponseReceived, DataReceived, StreamEnded, StreamReset, WindowUpdated,
SettingsAcknowledged,
)
AUTHORITY = u'http2bin.org'
PATH = '/post'
class H2Protocol(Protocol):
def __init__(self, file_path):
self.conn = H2Connection()
self.known_proto = None
self.request_made = False
self.request_complete = False
self.file_path = file_path
self.flow_control_deferred = None
self.fileobj = None
self.file_size = None
def connectionMade(self):
"""
Called by Twisted when the TCP connection is established. We can start
sending some data now: we should open with the connection preamble.
"""
self.conn.initiate_connection()
self.transport.write(self.conn.data_to_send())
def dataReceived(self, data):
"""
Called by Twisted when data is received on the connection.
We need to check a few things here. Firstly, we want to validate that
we actually negotiated HTTP/2: if we didn't, we shouldn't proceed!
Then, we want to pass the data to the protocol stack and check what
events occurred.
"""
if not self.known_proto:
self.known_proto = self.transport.negotiatedProtocol
assert self.known_proto == b'h2'
events = self.conn.receive_data(data)
for event in events:
if isinstance(event, ResponseReceived):
self.handleResponse(event.headers)
elif isinstance(event, DataReceived):
self.handleData(event.data)
elif isinstance(event, StreamEnded):
self.endStream()
elif isinstance(event, SettingsAcknowledged):
self.settingsAcked(event)
elif isinstance(event, StreamReset):
reactor.stop()
raise RuntimeError("Stream reset: %d" % event.error_code)
elif isinstance(event, WindowUpdated):
self.windowUpdated(event)
data = self.conn.data_to_send()
if data:
self.transport.write(data)
def settingsAcked(self, event):
"""
Called when the remote party ACKs our settings. We send a SETTINGS
frame as part of the preamble, so if we want to be very polite we can
wait until the ACK for that frame comes before we start sending our
request.
"""
if not self.request_made:
self.sendRequest()
def handleResponse(self, response_headers):
"""
Handle the response by printing the response headers.
"""
for name, value in response_headers:
print("%s: %s" % (name.decode('utf-8'), value.decode('utf-8')))
print("")
def handleData(self, data):
"""
We handle data that's received by just printing it.
"""
print(data, end='')
def endStream(self):
"""
We call this when the stream is cleanly ended by the remote peer. That
means that the response is complete.
Because this code only makes a single HTTP/2 request, once we receive
the complete response we can safely tear the connection down and stop
the reactor. We do that as cleanly as possible.
"""
self.request_complete = True
self.conn.close_connection()
self.transport.write(self.conn.data_to_send())
self.transport.loseConnection()
def windowUpdated(self, event):
"""
We call this when the flow control window for the connection or the
stream has been widened. If there's a flow control deferred present
(that is, if we're blocked behind the flow control), we fire it.
Otherwise, we do nothing.
"""
if self.flow_control_deferred is None:
return
# Make sure we remove the flow control deferred to avoid firing it
# more than once.
flow_control_deferred = self.flow_control_deferred
self.flow_control_deferred = None
flow_control_deferred.callback(None)
def connectionLost(self, reason=None):
"""
Called by Twisted when the connection is gone. Regardless of whether
it was clean or not, we want to stop the reactor.
"""
if self.fileobj is not None:
self.fileobj.close()
if reactor.running:
reactor.stop()
def sendRequest(self):
"""
Send the POST request.
A POST request is made up of one headers frame, and then 0+ data
frames. This method begins by sending the headers, and then starts a
series of calls to send data.
"""
# First, we need to work out how large the file is.
self.file_size = os.stat(self.file_path).st_size
# Next, we want to guess a content-type and content-encoding.
content_type, content_encoding = mimetypes.guess_type(self.file_path)
# Now we can build a header block.
request_headers = [
(':method', 'POST'),
(':authority', AUTHORITY),
(':scheme', 'https'),
(':path', PATH),
('user-agent', 'hyper-h2/1.0.0'),
('content-length', str(self.file_size)),
]
if content_type is not None:
request_headers.append(('content-type', content_type))
if content_encoding is not None:
request_headers.append(('content-encoding', content_encoding))
self.conn.send_headers(1, request_headers)
self.request_made = True
# We can now open the file.
self.fileobj = open(self.file_path, 'rb')
# We now need to send all the relevant data. We do this by checking
# what the acceptable amount of data is to send, and sending it. If we
# find ourselves blocked behind flow control, we then place a deferred
# and wait until that deferred fires.
self.sendFileData()
def sendFileData(self):
"""
Send some file data on the connection.
"""
# Firstly, check what the flow control window is for stream 1.
window_size = self.conn.local_flow_control_window(stream_id=1)
# Next, check what the maximum frame size is.
max_frame_size = self.conn.max_outbound_frame_size
# We will send no more than the window size or the remaining file size
# of data in this call, whichever is smaller.
bytes_to_send = min(window_size, self.file_size)
# We now need to send a number of data frames.
while bytes_to_send > 0:
chunk_size = min(bytes_to_send, max_frame_size)
data_chunk = self.fileobj.read(chunk_size)
self.conn.send_data(stream_id=1, data=data_chunk)
bytes_to_send -= chunk_size
self.file_size -= chunk_size
# We've prepared a whole chunk of data to send. If the file is fully
# sent, we also want to end the stream: we're done here.
if self.file_size == 0:
self.conn.end_stream(stream_id=1)
else:
# We've still got data left to send but the window is closed. Save
# a Deferred that will call us when the window gets opened.
self.flow_control_deferred = defer.Deferred()
self.flow_control_deferred.addCallback(self.sendFileData)
self.transport.write(self.conn.data_to_send())
try:
filename = sys.argv[1]
except IndexError:
filename = __file__
options = optionsForClientTLS(
hostname=AUTHORITY,
acceptableProtocols=[b'h2'],
)
connectProtocol(
SSL4ClientEndpoint(reactor, AUTHORITY, 443, options),
H2Protocol(filename)
)
reactor.run()

Просмотреть файл

@ -1,20 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDUjCCAjoCCQCQmNzzpQTCijANBgkqhkiG9w0BAQUFADBrMQswCQYDVQQGEwJH
QjEPMA0GA1UECBMGTG9uZG9uMQ8wDQYDVQQHEwZMb25kb24xETAPBgNVBAoTCGh5
cGVyLWgyMREwDwYDVQQLEwhoeXBleS1oMjEUMBIGA1UEAxMLZXhhbXBsZS5jb20w
HhcNMTUwOTE2MjAyOTA0WhcNMTYwOTE1MjAyOTA0WjBrMQswCQYDVQQGEwJHQjEP
MA0GA1UECBMGTG9uZG9uMQ8wDQYDVQQHEwZMb25kb24xETAPBgNVBAoTCGh5cGVy
LWgyMREwDwYDVQQLEwhoeXBleS1oMjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wggEi
MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC74ZeB4Jdb5cnC9KXXLJuzjwTg
45q5EeShDYQe0TbKgreiUP6clU3BR0fFAVedN1q/LOuQ1HhvrDk1l4TfGF2bpCIq
K+U9CnzcQknvdpyyVeOLtSsCjOPk4xydHwkQxwJvHVdtJx4CzDDqGbHNHCF/9gpQ
lsa3JZW+tIZLK0XMEPFQ4XFXgegxTStO7kBBPaVIgG9Ooqc2MG4rjMNUpxa28WF1
SyqWTICf2N8T/C+fPzbQLKCWrFrKUP7WQlOaqPNQL9bCDhSTPRTwQOc2/MzVZ9gT
Xr0Z+JMTXwkSMKO52adE1pmKt00jJ1ecZBiJFyjx0X6hH+/59dLbG/7No+PzAgMB
AAEwDQYJKoZIhvcNAQEFBQADggEBAG3UhOCa0EemL2iY+C+PR6CwEHQ+n7vkBzNz
gKOG+Q39spyzqU1qJAzBxLTE81bIQbDg0R8kcLWHVH2y4zViRxZ0jHUFKMgjONW+
Aj4evic/2Y/LxpLxCajECq/jeMHYrmQONszf9pbc0+exrQpgnwd8asfsM3d/FJS2
5DIWryCKs/61m9vYL8icWx/9cnfPkBoNv1ER+V1L1TH3ARvABh406SBaeqLTm/kG
MNuKytKWJsQbNlxzWHVgkKzVsBKvYj0uIEJpClIhbe6XNYRDy8T8mKXVWhJuxH4p
/agmCG3nxO8aCrUK/EVmbWmVIfCH3t7jlwMX1nJ8MsRE7Ydnk8I=
-----END CERTIFICATE-----

Просмотреть файл

@ -1,17 +0,0 @@
-----BEGIN CERTIFICATE REQUEST-----
MIICsDCCAZgCAQAwazELMAkGA1UEBhMCR0IxDzANBgNVBAgTBkxvbmRvbjEPMA0G
A1UEBxMGTG9uZG9uMREwDwYDVQQKEwhoeXBlci1oMjERMA8GA1UECxMIaHlwZXkt
aDIxFDASBgNVBAMTC2V4YW1wbGUuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
MIIBCgKCAQEAu+GXgeCXW+XJwvSl1yybs48E4OOauRHkoQ2EHtE2yoK3olD+nJVN
wUdHxQFXnTdavyzrkNR4b6w5NZeE3xhdm6QiKivlPQp83EJJ73acslXji7UrAozj
5OMcnR8JEMcCbx1XbSceAsww6hmxzRwhf/YKUJbGtyWVvrSGSytFzBDxUOFxV4Ho
MU0rTu5AQT2lSIBvTqKnNjBuK4zDVKcWtvFhdUsqlkyAn9jfE/wvnz820Cyglqxa
ylD+1kJTmqjzUC/Wwg4Ukz0U8EDnNvzM1WfYE169GfiTE18JEjCjudmnRNaZirdN
IydXnGQYiRco8dF+oR/v+fXS2xv+zaPj8wIDAQABoAAwDQYJKoZIhvcNAQEFBQAD
ggEBACZpSoZWxHU5uagpM2Vinh2E7CXiMAlBc6NXhQMD/3fycr9sX4d/+y9Gy3bL
OfEOHBPlQVGrt05aiTh7m5s3HQfsH8l3RfKpfzCfoqd2ESVwgB092bJwY9fBnkw/
UzIHvSnlaKc78h+POUoATOb4faQ8P04wzJHzckbCDI8zRzBZTMVGuiWUopq7K5Ce
VSesbqHHnW9ob/apigKNE0k7et/28NOXNEP90tTsz98yN3TP+Nv9puwvT9JZOOoG
0PZIQKJIaZ1NZoNQHLN9gXz012XWa99cBE0qNiBUugXlNhXjkIIM8FIhDQOREB18
0KDxEma+A0quyjnDMwPSoZsMca4=
-----END CERTIFICATE REQUEST-----

Просмотреть файл

@ -1,27 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpQIBAAKCAQEAu+GXgeCXW+XJwvSl1yybs48E4OOauRHkoQ2EHtE2yoK3olD+
nJVNwUdHxQFXnTdavyzrkNR4b6w5NZeE3xhdm6QiKivlPQp83EJJ73acslXji7Ur
Aozj5OMcnR8JEMcCbx1XbSceAsww6hmxzRwhf/YKUJbGtyWVvrSGSytFzBDxUOFx
V4HoMU0rTu5AQT2lSIBvTqKnNjBuK4zDVKcWtvFhdUsqlkyAn9jfE/wvnz820Cyg
lqxaylD+1kJTmqjzUC/Wwg4Ukz0U8EDnNvzM1WfYE169GfiTE18JEjCjudmnRNaZ
irdNIydXnGQYiRco8dF+oR/v+fXS2xv+zaPj8wIDAQABAoIBAQCsdq278+0c13d4
tViSh4k5r1w8D9IUdp9XU2/nVgckqA9nOVAvbkJc3FC+P7gsQgbUHKj0XoVbhU1S
q461t8kduPH/oiGhAcKR8WurHEdE0OC6ewhLJAeCMRQwCrAorXXHh7icIt9ClCuG
iSWUcXEy5Cidx3oL3r1xvIbV85fzdDtE9RC1I/kMjAy63S47YGiqh5vYmJkCa8rG
Dsd1sEMDPr63XJpqJj3uHRcPvySgXTa+ssTmUH8WJlPTjvDB5hnPz+lkk2JKVPNu
8adzftZ6hSun+tsc4ZJp8XhGu/m/7MjxWh8MeupLHlXcOEsnj4uHQQsOM3zHojr3
aDCZiC1pAoGBAOAhwe1ujoS2VJ5RXJ9KMs7eBER/02MDgWZjo54Jv/jFxPWGslKk
QQceuTe+PruRm41nzvk3q4iZXt8pG0bvpgigN2epcVx/O2ouRsUWWBT0JrVlEzha
TIvWjtZ5tSQExXgHL3VlM9+ka40l+NldLSPn25+prizaqhalWuvTpP23AoGBANaY
VhEI6yhp0BBUSATEv9lRgkwx3EbcnXNXPQjDMOthsyfq7FxbdOBEK1rwSDyuE6Ij
zQGcTOfdiur5Ttg0OQilTJIXJAlpoeecOQ9yGma08c5FMXVJJvcZUuWRZWg1ocQj
/hx0WVE9NwOoKwTBERv8HX7vJOFRZyvgkJwFxoulAoGAe4m/1XoZrga9z2GzNs10
AdgX7BW00x+MhH4pIiPnn1yK+nYa9jg4647Asnv3IfXZEnEEgRNxReKbi0+iDFBt
aNW+lDGuHTi37AfD1EBDnpEQgO1MUcRb6rwBkTAWatsCaO00+HUmyX9cFLm4Vz7n
caILyQ6CxZBlLgRIgDHxADMCgYEAtubsJGTHmZBmSCStpXLUWbOBLNQqfTM398DZ
QoirP1PsUQ+IGUfSG/u+QCogR6fPEBkXeFHxsoY/Cvsm2lvYaKgK1VFn46Xm2vNq
JuIH4pZCqp6LAv4weddZslT0a5eaowRSZ4o7PmTAaRuCXvD3VjTSJwhJFMo+90TV
vEWn7gkCgYEAkk+unX9kYmKoUdLh22/tzQekBa8WqMxXDwzBCECTAs2GlpL/f73i
zD15TnaNfLP6Q5RNb0N9tb0Gz1wSkwI1+jGAQLnh2K9X9cIVIqJn8Mf/KQa/wUDV
Tb1j7FoGUEgX7vbsyWuTd8P76kNYyGqCss1XmbttcSolqpbIdlSUcO0=
-----END RSA PRIVATE KEY-----

Просмотреть файл

@ -1,182 +0,0 @@
# -*- coding: utf-8 -*-
"""
twisted-server.py
~~~~~~~~~~~~~~~~~
A fully-functional HTTP/2 server written for Twisted.
"""
import functools
import mimetypes
import os
import os.path
import sys
from OpenSSL import crypto
from twisted.internet.defer import Deferred, inlineCallbacks
from twisted.internet.protocol import Protocol, Factory
from twisted.internet import endpoints, reactor, ssl
from h2.config import H2Configuration
from h2.connection import H2Connection
from h2.events import (
RequestReceived, DataReceived, WindowUpdated
)
def close_file(file, d):
file.close()
READ_CHUNK_SIZE = 8192
class H2Protocol(Protocol):
def __init__(self, root):
config = H2Configuration(client_side=False)
self.conn = H2Connection(config=config)
self.known_proto = None
self.root = root
self._flow_control_deferreds = {}
def connectionMade(self):
self.conn.initiate_connection()
self.transport.write(self.conn.data_to_send())
def dataReceived(self, data):
if not self.known_proto:
self.known_proto = True
events = self.conn.receive_data(data)
if self.conn.data_to_send:
self.transport.write(self.conn.data_to_send())
for event in events:
if isinstance(event, RequestReceived):
self.requestReceived(event.headers, event.stream_id)
elif isinstance(event, DataReceived):
self.dataFrameReceived(event.stream_id)
elif isinstance(event, WindowUpdated):
self.windowUpdated(event)
def requestReceived(self, headers, stream_id):
headers = dict(headers) # Invalid conversion, fix later.
assert headers[b':method'] == b'GET'
path = headers[b':path'].lstrip(b'/')
full_path = os.path.join(self.root, path)
if not os.path.exists(full_path):
response_headers = (
(':status', '404'),
('content-length', '0'),
('server', 'twisted-h2'),
)
self.conn.send_headers(
stream_id, response_headers, end_stream=True
)
self.transport.write(self.conn.data_to_send())
else:
self.sendFile(full_path, stream_id)
return
def dataFrameReceived(self, stream_id):
self.conn.reset_stream(stream_id)
self.transport.write(self.conn.data_to_send())
def sendFile(self, file_path, stream_id):
filesize = os.stat(file_path).st_size
content_type, content_encoding = mimetypes.guess_type(file_path)
response_headers = [
(':status', '200'),
('content-length', str(filesize)),
('server', 'twisted-h2'),
]
if content_type:
response_headers.append(('content-type', content_type))
if content_encoding:
response_headers.append(('content-encoding', content_encoding))
self.conn.send_headers(stream_id, response_headers)
self.transport.write(self.conn.data_to_send())
f = open(file_path, 'rb')
d = self._send_file(f, stream_id)
d.addErrback(functools.partial(close_file, f))
def windowUpdated(self, event):
"""
Handle a WindowUpdated event by firing any waiting data sending
callbacks.
"""
stream_id = event.stream_id
if stream_id and stream_id in self._flow_control_deferreds:
d = self._flow_control_deferreds.pop(stream_id)
d.callback(event.delta)
elif not stream_id:
for d in self._flow_control_deferreds.values():
d.callback(event.delta)
self._flow_control_deferreds = {}
return
@inlineCallbacks
def _send_file(self, file, stream_id):
"""
This callback sends more data for a given file on the stream.
"""
keep_reading = True
while keep_reading:
while not self.conn.remote_flow_control_window(stream_id):
yield self.wait_for_flow_control(stream_id)
chunk_size = min(
self.conn.remote_flow_control_window(stream_id), READ_CHUNK_SIZE
)
data = file.read(chunk_size)
keep_reading = len(data) == chunk_size
self.conn.send_data(stream_id, data, not keep_reading)
self.transport.write(self.conn.data_to_send())
if not keep_reading:
break
file.close()
def wait_for_flow_control(self, stream_id):
"""
Returns a Deferred that fires when the flow control window is opened.
"""
d = Deferred()
self._flow_control_deferreds[stream_id] = d
return d
class H2Factory(Factory):
def __init__(self, root):
self.root = root
def buildProtocol(self, addr):
return H2Protocol(self.root)
root = sys.argv[1]
with open('server.crt', 'r') as f:
cert_data = f.read()
with open('server.key', 'r') as f:
key_data = f.read()
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_data)
key = crypto.load_privatekey(crypto.FILETYPE_PEM, key_data)
options = ssl.CertificateOptions(
privateKey=key,
certificate=cert,
acceptableProtocols=[b'h2'],
)
endpoint = endpoints.SSL4ServerEndpoint(reactor, 8080, options, backlog=128)
endpoint.listen(H2Factory(root))
reactor.run()

Просмотреть файл

@ -1,8 +0,0 @@
# -*- coding: utf-8 -*-
"""
h2
~~
A HTTP/2 implementation.
"""
__version__ = '3.0.1'

Просмотреть файл

@ -1,164 +0,0 @@
# -*- coding: utf-8 -*-
"""
h2/config
~~~~~~~~~
Objects for controlling the configuration of the HTTP/2 stack.
"""
class _BooleanConfigOption(object):
"""
Descriptor for handling a boolean config option. This will block
attempts to set boolean config options to non-bools.
"""
def __init__(self, name):
self.name = name
self.attr_name = '_%s' % self.name
def __get__(self, instance, owner):
return getattr(instance, self.attr_name)
def __set__(self, instance, value):
if not isinstance(value, bool):
raise ValueError("%s must be a bool" % self.name)
setattr(instance, self.attr_name, value)
class DummyLogger(object):
"""
An Logger object that does not actual logging, hence a DummyLogger.
For the class the log operation is merely a no-op. The intent is to avoid
conditionals being sprinkled throughout the hyper-h2 code for calls to
logging functions when no logger is passed into the corresponding object.
"""
def __init__(self, *vargs):
pass
def debug(self, *vargs, **kwargs):
"""
No-op logging. Only level needed for now.
"""
pass
class H2Configuration(object):
"""
An object that controls the way a single HTTP/2 connection behaves.
This object allows the users to customize behaviour. In particular, it
allows users to enable or disable optional features, or to otherwise handle
various unusual behaviours.
This object has very little behaviour of its own: it mostly just ensures
that configuration is self-consistent.
:param client_side: Whether this object is to be used on the client side of
a connection, or on the server side. Affects the logic used by the
state machine, the default settings values, the allowable stream IDs,
and several other properties. Defaults to ``True``.
:type client_side: ``bool``
:param header_encoding: Controls whether the headers emitted by this object
in events are transparently decoded to ``unicode`` strings, and what
encoding is used to do that decoding. This defaults to ``None``,
meaning that headers will be returned as bytes. To automatically
decode headers (that is, to return them as unicode strings), this can
be set to the string name of any encoding, e.g. ``'utf-8'``.
.. versionchanged:: 3.0.0
Changed default value from ``'utf-8'`` to ``None``
:type header_encoding: ``str``, ``False``, or ``None``
:param validate_outbound_headers: Controls whether the headers emitted
by this object are validated against the rules in RFC 7540.
Disabling this setting will cause outbound header validation to
be skipped, and allow the object to emit headers that may be illegal
according to RFC 7540. Defaults to ``True``.
:type validate_outbound_headers: ``bool``
:param normalize_outbound_headers: Controls whether the headers emitted
by this object are normalized before sending. Disabling this setting
will cause outbound header normalization to be skipped, and allow
the object to emit headers that may be illegal according to
RFC 7540. Defaults to ``True``.
:type normalize_outbound_headers: ``bool``
:param validate_inbound_headers: Controls whether the headers received
by this object are validated against the rules in RFC 7540.
Disabling this setting will cause inbound header validation to
be skipped, and allow the object to receive headers that may be illegal
according to RFC 7540. Defaults to ``True``.
:type validate_inbound_headers: ``bool``
:param normalize_inbound_headers: Controls whether the headers received by
this object are normalized according to the rules of RFC 7540.
Disabling this setting may lead to hyper-h2 emitting header blocks that
some RFCs forbid, e.g. with multiple cookie fields.
.. versionadded:: 3.0.0
:type normalize_inbound_headers: ``bool``
:param logger: A logger that conforms to the requirements for this module,
those being no I/O and no context switches, which is needed in order
to run in asynchronous operation.
.. versionadded:: 2.6.0
:type logger: ``logging.Logger``
"""
client_side = _BooleanConfigOption('client_side')
validate_outbound_headers = _BooleanConfigOption(
'validate_outbound_headers'
)
normalize_outbound_headers = _BooleanConfigOption(
'normalize_outbound_headers'
)
validate_inbound_headers = _BooleanConfigOption(
'validate_inbound_headers'
)
normalize_inbound_headers = _BooleanConfigOption(
'normalize_inbound_headers'
)
def __init__(self,
client_side=True,
header_encoding=None,
validate_outbound_headers=True,
normalize_outbound_headers=True,
validate_inbound_headers=True,
normalize_inbound_headers=True,
logger=None):
self.client_side = client_side
self.header_encoding = header_encoding
self.validate_outbound_headers = validate_outbound_headers
self.normalize_outbound_headers = normalize_outbound_headers
self.validate_inbound_headers = validate_inbound_headers
self.normalize_inbound_headers = normalize_inbound_headers
self.logger = logger or DummyLogger(__name__)
@property
def header_encoding(self):
"""
Controls whether the headers emitted by this object in events are
transparently decoded to ``unicode`` strings, and what encoding is used
to do that decoding. This defaults to ``None``, meaning that headers
will be returned as bytes. To automatically decode headers (that is, to
return them as unicode strings), this can be set to the string name of
any encoding, e.g. ``'utf-8'``.
"""
return self._header_encoding
@header_encoding.setter
def header_encoding(self, value):
"""
Enforces constraints on the value of header encoding.
"""
if not isinstance(value, (bool, str, type(None))):
raise ValueError("header_encoding must be bool, string, or None")
if value is True:
raise ValueError("header_encoding cannot be True")
self._header_encoding = value

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,75 +0,0 @@
# -*- coding: utf-8 -*-
"""
h2/errors
~~~~~~~~~~~~~~~~~~~
Global error code registry containing the established HTTP/2 error codes.
The current registry is available at:
https://tools.ietf.org/html/rfc7540#section-11.4
"""
import enum
class ErrorCodes(enum.IntEnum):
"""
All known HTTP/2 error codes.
.. versionadded:: 2.5.0
"""
#: Graceful shutdown.
NO_ERROR = 0x0
#: Protocol error detected.
PROTOCOL_ERROR = 0x1
#: Implementation fault.
INTERNAL_ERROR = 0x2
#: Flow-control limits exceeded.
FLOW_CONTROL_ERROR = 0x3
#: Settings not acknowledged.
SETTINGS_TIMEOUT = 0x4
#: Frame received for closed stream.
STREAM_CLOSED = 0x5
#: Frame size incorrect.
FRAME_SIZE_ERROR = 0x6
#: Stream not processed.
REFUSED_STREAM = 0x7
#: Stream cancelled.
CANCEL = 0x8
#: Compression state not updated.
COMPRESSION_ERROR = 0x9
#: TCP connection error for CONNECT method.
CONNECT_ERROR = 0xa
#: Processing capacity exceeded.
ENHANCE_YOUR_CALM = 0xb
#: Negotiated TLS parameters not acceptable.
INADEQUATE_SECURITY = 0xc
#: Use HTTP/1.1 for the request.
HTTP_1_1_REQUIRED = 0xd
def _error_code_from_int(code):
"""
Given an integer error code, returns either one of :class:`ErrorCodes
<h2.errors.ErrorCodes>` or, if not present in the known set of codes,
returns the integer directly.
"""
try:
return ErrorCodes(code)
except ValueError:
return code
__all__ = ['ErrorCodes']

Просмотреть файл

@ -1,619 +0,0 @@
# -*- coding: utf-8 -*-
"""
h2/events
~~~~~~~~~
Defines Event types for HTTP/2.
Events are returned by the H2 state machine to allow implementations to keep
track of events triggered by receiving data. Each time data is provided to the
H2 state machine it processes the data and returns a list of Event objects.
"""
import binascii
from .settings import ChangedSetting, _setting_code_from_int
class Event(object):
"""
Base class for h2 events.
"""
pass
class RequestReceived(Event):
"""
The RequestReceived event is fired whenever request headers are received.
This event carries the HTTP headers for the given request and the stream ID
of the new stream.
.. versionchanged:: 2.3.0
Changed the type of ``headers`` to :class:`HeaderTuple
<hpack:hpack.HeaderTuple>`. This has no effect on current users.
.. versionchanged:: 2.4.0
Added ``stream_ended`` and ``priority_updated`` properties.
"""
def __init__(self):
#: The Stream ID for the stream this request was made on.
self.stream_id = None
#: The request headers.
self.headers = None
#: If this request also ended the stream, the associated
#: :class:`StreamEnded <h2.events.StreamEnded>` event will be available
#: here.
#:
#: .. versionadded:: 2.4.0
self.stream_ended = None
#: If this request also had associated priority information, the
#: associated :class:`PriorityUpdated <h2.events.PriorityUpdated>`
#: event will be available here.
#:
#: .. versionadded:: 2.4.0
self.priority_updated = None
def __repr__(self):
return "<RequestReceived stream_id:%s, headers:%s>" % (
self.stream_id, self.headers
)
class ResponseReceived(Event):
"""
The ResponseReceived event is fired whenever response headers are received.
This event carries the HTTP headers for the given response and the stream
ID of the new stream.
.. versionchanged:: 2.3.0
Changed the type of ``headers`` to :class:`HeaderTuple
<hpack:hpack.HeaderTuple>`. This has no effect on current users.
.. versionchanged:: 2.4.0
Added ``stream_ended`` and ``priority_updated`` properties.
"""
def __init__(self):
#: The Stream ID for the stream this response was made on.
self.stream_id = None
#: The response headers.
self.headers = None
#: If this response also ended the stream, the associated
#: :class:`StreamEnded <h2.events.StreamEnded>` event will be available
#: here.
#:
#: .. versionadded:: 2.4.0
self.stream_ended = None
#: If this response also had associated priority information, the
#: associated :class:`PriorityUpdated <h2.events.PriorityUpdated>`
#: event will be available here.
#:
#: .. versionadded:: 2.4.0
self.priority_updated = None
def __repr__(self):
return "<ResponseReceived stream_id:%s, headers:%s>" % (
self.stream_id, self.headers
)
class TrailersReceived(Event):
"""
The TrailersReceived event is fired whenever trailers are received on a
stream. Trailers are a set of headers sent after the body of the
request/response, and are used to provide information that wasn't known
ahead of time (e.g. content-length). This event carries the HTTP header
fields that form the trailers and the stream ID of the stream on which they
were received.
.. versionchanged:: 2.3.0
Changed the type of ``headers`` to :class:`HeaderTuple
<hpack:hpack.HeaderTuple>`. This has no effect on current users.
.. versionchanged:: 2.4.0
Added ``stream_ended`` and ``priority_updated`` properties.
"""
def __init__(self):
#: The Stream ID for the stream on which these trailers were received.
self.stream_id = None
#: The trailers themselves.
self.headers = None
#: Trailers always end streams. This property has the associated
#: :class:`StreamEnded <h2.events.StreamEnded>` in it.
#:
#: .. versionadded:: 2.4.0
self.stream_ended = None
#: If the trailers also set associated priority information, the
#: associated :class:`PriorityUpdated <h2.events.PriorityUpdated>`
#: event will be available here.
#:
#: .. versionadded:: 2.4.0
self.priority_updated = None
def __repr__(self):
return "<TrailersReceived stream_id:%s, headers:%s>" % (
self.stream_id, self.headers
)
class _HeadersSent(Event):
"""
The _HeadersSent event is fired whenever headers are sent.
This is an internal event, used to determine validation steps on
outgoing header blocks.
"""
pass
class _ResponseSent(_HeadersSent):
"""
The _ResponseSent event is fired whenever response headers are sent
on a stream.
This is an internal event, used to determine validation steps on
outgoing header blocks.
"""
pass
class _RequestSent(_HeadersSent):
"""
The _RequestSent event is fired whenever request headers are sent
on a stream.
This is an internal event, used to determine validation steps on
outgoing header blocks.
"""
pass
class _TrailersSent(_HeadersSent):
"""
The _TrailersSent event is fired whenever trailers are sent on a
stream. Trailers are a set of headers sent after the body of the
request/response, and are used to provide information that wasn't known
ahead of time (e.g. content-length).
This is an internal event, used to determine validation steps on
outgoing header blocks.
"""
pass
class _PushedRequestSent(_HeadersSent):
"""
The _PushedRequestSent event is fired whenever pushed request headers are
sent.
This is an internal event, used to determine validation steps on outgoing
header blocks.
"""
pass
class InformationalResponseReceived(Event):
"""
The InformationalResponseReceived event is fired when an informational
response (that is, one whose status code is a 1XX code) is received from
the remote peer.
The remote peer may send any number of these, from zero upwards. These
responses are most commonly sent in response to requests that have the
``expect: 100-continue`` header field present. Most users can safely
ignore this event unless you are intending to use the
``expect: 100-continue`` flow, or are for any reason expecting a different
1XX status code.
.. versionadded:: 2.2.0
.. versionchanged:: 2.3.0
Changed the type of ``headers`` to :class:`HeaderTuple
<hpack:hpack.HeaderTuple>`. This has no effect on current users.
.. versionchanged:: 2.4.0
Added ``priority_updated`` property.
"""
def __init__(self):
#: The Stream ID for the stream this informational response was made
#: on.
self.stream_id = None
#: The headers for this informational response.
self.headers = None
#: If this response also had associated priority information, the
#: associated :class:`PriorityUpdated <h2.events.PriorityUpdated>`
#: event will be available here.
#:
#: .. versionadded:: 2.4.0
self.priority_updated = None
def __repr__(self):
return "<InformationalResponseReceived stream_id:%s, headers:%s>" % (
self.stream_id, self.headers
)
class DataReceived(Event):
"""
The DataReceived event is fired whenever data is received on a stream from
the remote peer. The event carries the data itself, and the stream ID on
which the data was received.
.. versionchanged:: 2.4.0
Added ``stream_ended`` property.
"""
def __init__(self):
#: The Stream ID for the stream this data was received on.
self.stream_id = None
#: The data itself.
self.data = None
#: The amount of data received that counts against the flow control
#: window. Note that padding counts against the flow control window, so
#: when adjusting flow control you should always use this field rather
#: than ``len(data)``.
self.flow_controlled_length = None
#: If this data chunk also completed the stream, the associated
#: :class:`StreamEnded <h2.events.StreamEnded>` event will be available
#: here.
#:
#: .. versionadded:: 2.4.0
self.stream_ended = None
def __repr__(self):
return (
"<DataReceived stream_id:%s, "
"flow_controlled_length:%s, "
"data:%s>" % (
self.stream_id,
self.flow_controlled_length,
_bytes_representation(self.data[:20]),
)
)
class WindowUpdated(Event):
"""
The WindowUpdated event is fired whenever a flow control window changes
size. HTTP/2 defines flow control windows for connections and streams: this
event fires for both connections and streams. The event carries the ID of
the stream to which it applies (set to zero if the window update applies to
the connection), and the delta in the window size.
"""
def __init__(self):
#: The Stream ID of the stream whose flow control window was changed.
#: May be ``0`` if the connection window was changed.
self.stream_id = None
#: The window delta.
self.delta = None
def __repr__(self):
return "<WindowUpdated stream_id:%s, delta:%s>" % (
self.stream_id, self.delta
)
class RemoteSettingsChanged(Event):
"""
The RemoteSettingsChanged event is fired whenever the remote peer changes
its settings. It contains a complete inventory of changed settings,
including their previous values.
In HTTP/2, settings changes need to be acknowledged. hyper-h2 automatically
acknowledges settings changes for efficiency. However, it is possible that
the caller may not be happy with the changed setting.
When this event is received, the caller should confirm that the new
settings are acceptable. If they are not acceptable, the user should close
the connection with the error code :data:`PROTOCOL_ERROR
<h2.errors.ErrorCodes.PROTOCOL_ERROR>`.
.. versionchanged:: 2.0.0
Prior to this version the user needed to acknowledge settings changes.
This is no longer the case: hyper-h2 now automatically acknowledges
them.
"""
def __init__(self):
#: A dictionary of setting byte to
#: :class:`ChangedSetting <h2.settings.ChangedSetting>`, representing
#: the changed settings.
self.changed_settings = {}
@classmethod
def from_settings(cls, old_settings, new_settings):
"""
Build a RemoteSettingsChanged event from a set of changed settings.
:param old_settings: A complete collection of old settings, in the form
of a dictionary of ``{setting: value}``.
:param new_settings: All the changed settings and their new values, in
the form of a dictionary of ``{setting: value}``.
"""
e = cls()
for setting, new_value in new_settings.items():
setting = _setting_code_from_int(setting)
original_value = old_settings.get(setting)
change = ChangedSetting(setting, original_value, new_value)
e.changed_settings[setting] = change
return e
def __repr__(self):
return "<RemoteSettingsChanged changed_settings:{%s}>" % (
", ".join(repr(cs) for cs in self.changed_settings.values()),
)
class PingAcknowledged(Event):
"""
The PingAcknowledged event is fired whenever a user-emitted PING is
acknowledged. This contains the data in the ACK'ed PING, allowing the
user to correlate PINGs and calculate RTT.
"""
def __init__(self):
#: The data included on the ping.
self.ping_data = None
def __repr__(self):
return "<PingAcknowledged ping_data:%s>" % (
_bytes_representation(self.ping_data),
)
class StreamEnded(Event):
"""
The StreamEnded event is fired whenever a stream is ended by a remote
party. The stream may not be fully closed if it has not been closed
locally, but no further data or headers should be expected on that stream.
"""
def __init__(self):
#: The Stream ID of the stream that was closed.
self.stream_id = None
def __repr__(self):
return "<StreamEnded stream_id:%s>" % self.stream_id
class StreamReset(Event):
"""
The StreamReset event is fired in two situations. The first is when the
remote party forcefully resets the stream. The second is when the remote
party has made a protocol error which only affects a single stream. In this
case, Hyper-h2 will terminate the stream early and return this event.
.. versionchanged:: 2.0.0
This event is now fired when Hyper-h2 automatically resets a stream.
"""
def __init__(self):
#: The Stream ID of the stream that was reset.
self.stream_id = None
#: The error code given. Either one of :class:`ErrorCodes
#: <h2.errors.ErrorCodes>` or ``int``
self.error_code = None
#: Whether the remote peer sent a RST_STREAM or we did.
self.remote_reset = True
def __repr__(self):
return "<StreamReset stream_id:%s, error_code:%s, remote_reset:%s>" % (
self.stream_id, self.error_code, self.remote_reset
)
class PushedStreamReceived(Event):
"""
The PushedStreamReceived event is fired whenever a pushed stream has been
received from a remote peer. The event carries on it the new stream ID, the
ID of the parent stream, and the request headers pushed by the remote peer.
"""
def __init__(self):
#: The Stream ID of the stream created by the push.
self.pushed_stream_id = None
#: The Stream ID of the stream that the push is related to.
self.parent_stream_id = None
#: The request headers, sent by the remote party in the push.
self.headers = None
def __repr__(self):
return (
"<PushedStreamReceived pushed_stream_id:%s, parent_stream_id:%s, "
"headers:%s>" % (
self.pushed_stream_id,
self.parent_stream_id,
self.headers,
)
)
class SettingsAcknowledged(Event):
"""
The SettingsAcknowledged event is fired whenever a settings ACK is received
from the remote peer. The event carries on it the settings that were
acknowedged, in the same format as
:class:`h2.events.RemoteSettingsChanged`.
"""
def __init__(self):
#: A dictionary of setting byte to
#: :class:`ChangedSetting <h2.settings.ChangedSetting>`, representing
#: the changed settings.
self.changed_settings = {}
def __repr__(self):
return "<SettingsAcknowledged changed_settings:{%s}>" % (
", ".join(repr(cs) for cs in self.changed_settings.values()),
)
class PriorityUpdated(Event):
"""
The PriorityUpdated event is fired whenever a stream sends updated priority
information. This can occur when the stream is opened, or at any time
during the stream lifetime.
This event is purely advisory, and does not need to be acted on.
.. versionadded:: 2.0.0
"""
def __init__(self):
#: The ID of the stream whose priority information is being updated.
self.stream_id = None
#: The new stream weight. May be the same as the original stream
#: weight. An integer between 1 and 256.
self.weight = None
#: The stream ID this stream now depends on. May be ``0``.
self.depends_on = None
#: Whether the stream *exclusively* depends on the parent stream. If it
#: does, this stream should inherit the current children of its new
#: parent.
self.exclusive = None
def __repr__(self):
return (
"<PriorityUpdated stream_id:%s, weight:%s, depends_on:%s, "
"exclusive:%s>" % (
self.stream_id,
self.weight,
self.depends_on,
self.exclusive
)
)
class ConnectionTerminated(Event):
"""
The ConnectionTerminated event is fired when a connection is torn down by
the remote peer using a GOAWAY frame. Once received, no further action may
be taken on the connection: a new connection must be established.
"""
def __init__(self):
#: The error code cited when tearing down the connection. Should be
#: one of :class:`ErrorCodes <h2.errors.ErrorCodes>`, but may not be if
#: unknown HTTP/2 extensions are being used.
self.error_code = None
#: The stream ID of the last stream the remote peer saw. This can
#: provide an indication of what data, if any, never reached the remote
#: peer and so can safely be resent.
self.last_stream_id = None
#: Additional debug data that can be appended to GOAWAY frame.
self.additional_data = None
def __repr__(self):
return (
"<ConnectionTerminated error_code:%s, last_stream_id:%s, "
"additional_data:%s>" % (
self.error_code,
self.last_stream_id,
_bytes_representation(
self.additional_data[:20]
if self.additional_data else None)
)
)
class AlternativeServiceAvailable(Event):
"""
The AlternativeServiceAvailable event is fired when the remote peer
advertises an `RFC 7838 <https://tools.ietf.org/html/rfc7838>`_ Alternative
Service using an ALTSVC frame.
This event always carries the origin to which the ALTSVC information
applies. That origin is either supplied by the server directly, or inferred
by hyper-h2 from the ``:authority`` pseudo-header field that was sent by
the user when initiating a given stream.
This event also carries what RFC 7838 calls the "Alternative Service Field
Value", which is formatted like a HTTP header field and contains the
relevant alternative service information. Hyper-h2 does not parse or in any
way modify that information: the user is required to do that.
This event can only be fired on the client end of a connection.
.. versionadded:: 2.3.0
"""
def __init__(self):
#: The origin to which the alternative service field value applies.
#: This field is either supplied by the server directly, or inferred by
#: hyper-h2 from the ``:authority`` pseudo-header field that was sent
#: by the user when initiating the stream on which the frame was
#: received.
self.origin = None
#: The ALTSVC field value. This contains information about the HTTP
#: alternative service being advertised by the server. Hyper-h2 does
#: not parse this field: it is left exactly as sent by the server. The
#: structure of the data in this field is given by `RFC 7838 Section 3
#: <https://tools.ietf.org/html/rfc7838#section-3>`_.
self.field_value = None
def __repr__(self):
return (
"<AlternativeServiceAvailable origin:%s, field_value:%s>" % (
self.origin.decode('utf-8', 'ignore'),
self.field_value.decode('utf-8', 'ignore'),
)
)
class UnknownFrameReceived(Event):
"""
The UnknownFrameReceived event is fired when the remote peer sends a frame
that hyper-h2 does not understand. This occurs primarily when the remote
peer is employing HTTP/2 extensions that hyper-h2 doesn't know anything
about.
RFC 7540 requires that HTTP/2 implementations ignore these frames. hyper-h2
does so. However, this event is fired to allow implementations to perform
special processing on those frames if needed (e.g. if the implementation
is capable of handling the frame itself).
.. versionadded:: 2.7.0
"""
def __init__(self):
#: The hyperframe Frame object that encapsulates the received frame.
self.frame = None
def __repr__(self):
return "<UnknownFrameReceived>"
def _bytes_representation(data):
"""
Converts a bytestring into something that is safe to print on all Python
platforms.
This function is relatively expensive, so it should not be called on the
mainline of the code. It's safe to use in things like object repr methods
though.
"""
if data is None:
return None
hex = binascii.hexlify(data)
# This is moderately clever: on all Python versions hexlify returns a byte
# string. On Python 3 we want an actual string, so we just check whether
# that's what we have.
if not isinstance(hex, str): # pragma: no cover
hex = hex.decode('ascii')
return hex

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше