inital changes for removing dependency on azure-storage-common
This commit is contained in:
Родитель
aa6a9b580e
Коммит
18d1682cf2
|
@ -1,6 +1,8 @@
|
|||
# Change Log
|
||||
|
||||
> See [BreakingChanges](BreakingChanges.md) for a detailed list of API breaks.
|
||||
## Version 1.0.6
|
||||
- Removed dependency on azure-storage-python
|
||||
|
||||
## Version 1.0.5
|
||||
- Packaging change to ensure futures is not installed in Python 3 environments
|
||||
|
|
|
@ -2,7 +2,16 @@
|
|||
|
||||
> See [BreakingChanges](BreakingChanges.md) for a detailed list of API breaks.
|
||||
|
||||
## Versino 1.0.3
|
||||
## Version 1.0.6
|
||||
- Removed dependency on azure-storage-python
|
||||
|
||||
## Version 1.0.5
|
||||
- Packaging change to ensure futures is not installed in Python 3 environments
|
||||
|
||||
## Version 1.0.4
|
||||
- Updated azure-storage-common dependency to >= 1.1.0, < 2.0.0
|
||||
|
||||
## Version 1.0.3
|
||||
- Require futures package only for python versions <= 2.7
|
||||
|
||||
## Version 1.0.2
|
||||
|
|
|
@ -0,0 +1,39 @@
|
|||
# -------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for
|
||||
# license information.
|
||||
# --------------------------------------------------------------------------
|
||||
from azure.cosmosdb.common._constants import (
|
||||
__author__,
|
||||
__version__,
|
||||
DEFAULT_X_MS_VERSION,
|
||||
)
|
||||
from azure.cosmosdb.common.cloudstorageaccount import CloudStorageAccount
|
||||
from azure.cosmosdb.common.models import (
|
||||
RetentionPolicy,
|
||||
Logging,
|
||||
Metrics,
|
||||
CorsRule,
|
||||
DeleteRetentionPolicy,
|
||||
StaticWebsite,
|
||||
ServiceProperties,
|
||||
AccessPolicy,
|
||||
ResourceTypes,
|
||||
Services,
|
||||
AccountPermissions,
|
||||
Protocol,
|
||||
ServiceStats,
|
||||
GeoReplication,
|
||||
LocationMode,
|
||||
RetryContext,
|
||||
)
|
||||
from azure.cosmosdb.common.retry import (
|
||||
ExponentialRetry,
|
||||
LinearRetry,
|
||||
no_retry,
|
||||
)
|
||||
from azure.cosmosdb.common.sharedaccesssignature import (
|
||||
SharedAccessSignature,
|
||||
)
|
||||
from azure.cosmosdb.common.tokencredential import TokenCredential
|
||||
from azure.cosmosdb.common._error import AzureSigningError
|
|
@ -0,0 +1,129 @@
|
|||
# -------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for
|
||||
# license information.
|
||||
# --------------------------------------------------------------------------
|
||||
from ._common_conversion import (
|
||||
_sign_string,
|
||||
)
|
||||
from ._constants import (
|
||||
DEV_ACCOUNT_NAME,
|
||||
DEV_ACCOUNT_SECONDARY_NAME
|
||||
)
|
||||
import sys
|
||||
if sys.version_info >= (3,):
|
||||
from urllib.parse import parse_qsl
|
||||
else:
|
||||
from urlparse import parse_qsl
|
||||
|
||||
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
from ._error import (
|
||||
AzureSigningError,
|
||||
_wrap_exception,
|
||||
)
|
||||
|
||||
|
||||
class _StorageSharedKeyAuthentication(object):
|
||||
def __init__(self, account_name, account_key, is_emulated=False):
|
||||
self.account_name = account_name
|
||||
self.account_key = account_key
|
||||
self.is_emulated = is_emulated
|
||||
|
||||
def _get_headers(self, request, headers_to_sign):
|
||||
headers = dict((name.lower(), value) for name, value in request.headers.items() if value)
|
||||
if 'content-length' in headers and headers['content-length'] == '0':
|
||||
del headers['content-length']
|
||||
return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n'
|
||||
|
||||
def _get_verb(self, request):
|
||||
return request.method + '\n'
|
||||
|
||||
def _get_canonicalized_resource(self, request):
|
||||
uri_path = request.path.split('?')[0]
|
||||
|
||||
# for emulator, use the DEV_ACCOUNT_NAME instead of DEV_ACCOUNT_SECONDARY_NAME
|
||||
# as this is how the emulator works
|
||||
if self.is_emulated and uri_path.find(DEV_ACCOUNT_SECONDARY_NAME) == 1:
|
||||
# only replace the first instance
|
||||
uri_path = uri_path.replace(DEV_ACCOUNT_SECONDARY_NAME, DEV_ACCOUNT_NAME, 1)
|
||||
|
||||
return '/' + self.account_name + uri_path
|
||||
|
||||
def _get_canonicalized_headers(self, request):
|
||||
string_to_sign = ''
|
||||
x_ms_headers = []
|
||||
for name, value in request.headers.items():
|
||||
if name.startswith('x-ms-'):
|
||||
x_ms_headers.append((name.lower(), value))
|
||||
x_ms_headers.sort()
|
||||
for name, value in x_ms_headers:
|
||||
if value is not None:
|
||||
string_to_sign += ''.join([name, ':', value, '\n'])
|
||||
return string_to_sign
|
||||
|
||||
def _add_authorization_header(self, request, string_to_sign):
|
||||
try:
|
||||
signature = _sign_string(self.account_key, string_to_sign)
|
||||
auth_string = 'SharedKey ' + self.account_name + ':' + signature
|
||||
request.headers['Authorization'] = auth_string
|
||||
except Exception as ex:
|
||||
# Wrap any error that occurred as signing error
|
||||
# Doing so will clarify/locate the source of problem
|
||||
raise _wrap_exception(ex, AzureSigningError)
|
||||
|
||||
|
||||
class _StorageSharedKeyAuthentication(_StorageSharedKeyAuthentication):
|
||||
def sign_request(self, request):
|
||||
string_to_sign = \
|
||||
self._get_verb(request) + \
|
||||
self._get_headers(
|
||||
request,
|
||||
[
|
||||
'content-encoding', 'content-language', 'content-length',
|
||||
'content-md5', 'content-type', 'date', 'if-modified-since',
|
||||
'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range'
|
||||
]
|
||||
) + \
|
||||
self._get_canonicalized_headers(request) + \
|
||||
self._get_canonicalized_resource(request) + \
|
||||
self._get_canonicalized_resource_query(request)
|
||||
|
||||
self._add_authorization_header(request, string_to_sign)
|
||||
logger.debug("String_to_sign=%s", string_to_sign)
|
||||
|
||||
def _get_canonicalized_resource_query(self, request):
|
||||
sorted_queries = [(name, value) for name, value in request.query.items()]
|
||||
sorted_queries.sort()
|
||||
|
||||
string_to_sign = ''
|
||||
for name, value in sorted_queries:
|
||||
if value is not None:
|
||||
string_to_sign += '\n' + name.lower() + ':' + value
|
||||
|
||||
return string_to_sign
|
||||
|
||||
|
||||
class _StorageNoAuthentication(object):
|
||||
def sign_request(self, request):
|
||||
pass
|
||||
|
||||
|
||||
class _StorageSASAuthentication(object):
|
||||
def __init__(self, sas_token):
|
||||
# ignore ?-prefix (added by tools such as Azure Portal) on sas tokens
|
||||
# doing so avoids double question marks when signing
|
||||
if sas_token[0] == '?':
|
||||
sas_token = sas_token[1:]
|
||||
|
||||
self.sas_qs = parse_qsl(sas_token)
|
||||
|
||||
def sign_request(self, request):
|
||||
# if 'sig' is present, then the request has already been signed
|
||||
# as is the case when performing retries
|
||||
if 'sig' in request.query:
|
||||
return
|
||||
|
||||
request.query.update(self.sas_qs)
|
|
@ -0,0 +1,126 @@
|
|||
# -------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for
|
||||
# license information.
|
||||
# --------------------------------------------------------------------------
|
||||
|
||||
import base64
|
||||
import hashlib
|
||||
import hmac
|
||||
import sys
|
||||
from io import (SEEK_SET)
|
||||
|
||||
from dateutil.tz import tzutc
|
||||
|
||||
from ._error import (
|
||||
_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM,
|
||||
_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM,
|
||||
)
|
||||
from .models import (
|
||||
_unicode_type,
|
||||
)
|
||||
|
||||
if sys.version_info < (3,):
|
||||
def _str(value):
|
||||
if isinstance(value, unicode):
|
||||
return value.encode('utf-8')
|
||||
|
||||
return str(value)
|
||||
else:
|
||||
_str = str
|
||||
|
||||
|
||||
def _to_str(value):
|
||||
return _str(value) if value is not None else None
|
||||
|
||||
|
||||
def _int_to_str(value):
|
||||
return str(int(value)) if value is not None else None
|
||||
|
||||
|
||||
def _bool_to_str(value):
|
||||
if value is None:
|
||||
return None
|
||||
|
||||
if isinstance(value, bool):
|
||||
if value:
|
||||
return 'true'
|
||||
else:
|
||||
return 'false'
|
||||
|
||||
return str(value)
|
||||
|
||||
|
||||
def _to_utc_datetime(value):
|
||||
return value.strftime('%Y-%m-%dT%H:%M:%SZ')
|
||||
|
||||
|
||||
def _datetime_to_utc_string(value):
|
||||
# Azure expects the date value passed in to be UTC.
|
||||
# Azure will always return values as UTC.
|
||||
# If a date is passed in without timezone info, it is assumed to be UTC.
|
||||
if value is None:
|
||||
return None
|
||||
|
||||
if value.tzinfo:
|
||||
value = value.astimezone(tzutc())
|
||||
|
||||
return value.strftime('%a, %d %b %Y %H:%M:%S GMT')
|
||||
|
||||
|
||||
def _encode_base64(data):
|
||||
if isinstance(data, _unicode_type):
|
||||
data = data.encode('utf-8')
|
||||
encoded = base64.b64encode(data)
|
||||
return encoded.decode('utf-8')
|
||||
|
||||
|
||||
def _decode_base64_to_bytes(data):
|
||||
if isinstance(data, _unicode_type):
|
||||
data = data.encode('utf-8')
|
||||
return base64.b64decode(data)
|
||||
|
||||
|
||||
def _decode_base64_to_text(data):
|
||||
decoded_bytes = _decode_base64_to_bytes(data)
|
||||
return decoded_bytes.decode('utf-8')
|
||||
|
||||
|
||||
def _sign_string(key, string_to_sign, key_is_base64=True):
|
||||
if key_is_base64:
|
||||
key = _decode_base64_to_bytes(key)
|
||||
else:
|
||||
if isinstance(key, _unicode_type):
|
||||
key = key.encode('utf-8')
|
||||
if isinstance(string_to_sign, _unicode_type):
|
||||
string_to_sign = string_to_sign.encode('utf-8')
|
||||
signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256)
|
||||
digest = signed_hmac_sha256.digest()
|
||||
encoded_digest = _encode_base64(digest)
|
||||
return encoded_digest
|
||||
|
||||
|
||||
def _get_content_md5(data):
|
||||
md5 = hashlib.md5()
|
||||
if isinstance(data, bytes):
|
||||
md5.update(data)
|
||||
elif hasattr(data, 'read'):
|
||||
pos = 0
|
||||
try:
|
||||
pos = data.tell()
|
||||
except:
|
||||
pass
|
||||
for chunk in iter(lambda: data.read(4096), b""):
|
||||
md5.update(chunk)
|
||||
try:
|
||||
data.seek(pos, SEEK_SET)
|
||||
except (AttributeError, IOError):
|
||||
raise ValueError(_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM.format('data'))
|
||||
else:
|
||||
raise ValueError(_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM.format('data'))
|
||||
|
||||
return base64.b64encode(md5.digest()).decode('utf-8')
|
||||
|
||||
|
||||
def _lower(text):
|
||||
return text.lower()
|
|
@ -0,0 +1,161 @@
|
|||
# -------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for
|
||||
# license information.
|
||||
# --------------------------------------------------------------------------
|
||||
import sys
|
||||
|
||||
if sys.version_info >= (3,):
|
||||
from urllib.parse import urlparse
|
||||
else:
|
||||
from urlparse import urlparse
|
||||
|
||||
from ._constants import (
|
||||
SERVICE_HOST_BASE,
|
||||
DEFAULT_PROTOCOL,
|
||||
DEV_ACCOUNT_NAME,
|
||||
DEV_ACCOUNT_SECONDARY_NAME,
|
||||
DEV_ACCOUNT_KEY,
|
||||
DEV_BLOB_HOST,
|
||||
DEV_QUEUE_HOST,
|
||||
)
|
||||
from ._error import (
|
||||
_ERROR_STORAGE_MISSING_INFO,
|
||||
)
|
||||
|
||||
_EMULATOR_ENDPOINTS = {
|
||||
'blob': DEV_BLOB_HOST,
|
||||
'queue': DEV_QUEUE_HOST,
|
||||
'file': '',
|
||||
}
|
||||
|
||||
_CONNECTION_ENDPOINTS = {
|
||||
'blob': 'BlobEndpoint',
|
||||
'queue': 'QueueEndpoint',
|
||||
'file': 'FileEndpoint',
|
||||
}
|
||||
|
||||
_CONNECTION_ENDPOINTS_SECONDARY = {
|
||||
'blob': 'BlobSecondaryEndpoint',
|
||||
'queue': 'QueueSecondaryEndpoint',
|
||||
'file': 'FileSecondaryEndpoint',
|
||||
}
|
||||
|
||||
|
||||
class _ServiceParameters(object):
|
||||
def __init__(self, service, account_name=None, account_key=None, sas_token=None, token_credential=None,
|
||||
is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE,
|
||||
custom_domain=None, custom_domain_secondary=None):
|
||||
|
||||
self.account_name = account_name
|
||||
self.account_key = account_key
|
||||
self.sas_token = sas_token
|
||||
self.token_credential = token_credential
|
||||
self.protocol = protocol or DEFAULT_PROTOCOL
|
||||
self.is_emulated = is_emulated
|
||||
|
||||
if is_emulated:
|
||||
self.account_name = DEV_ACCOUNT_NAME
|
||||
self.protocol = 'http'
|
||||
|
||||
# Only set the account key if a sas_token is not present to allow sas to be used with the emulator
|
||||
self.account_key = DEV_ACCOUNT_KEY if not self.sas_token else None
|
||||
emulator_endpoint = _EMULATOR_ENDPOINTS[service] if custom_domain is None else custom_domain
|
||||
|
||||
self.primary_endpoint = '{}/{}'.format(emulator_endpoint, DEV_ACCOUNT_NAME)
|
||||
self.secondary_endpoint = '{}/{}'.format(emulator_endpoint, DEV_ACCOUNT_SECONDARY_NAME)
|
||||
else:
|
||||
# Strip whitespace from the key
|
||||
if self.account_key:
|
||||
self.account_key = self.account_key.strip()
|
||||
|
||||
endpoint_suffix = endpoint_suffix or SERVICE_HOST_BASE
|
||||
|
||||
# Setup the primary endpoint
|
||||
if custom_domain:
|
||||
parsed_url = urlparse(custom_domain)
|
||||
|
||||
# Trim any trailing slashes from the path
|
||||
path = parsed_url.path.rstrip('/')
|
||||
|
||||
self.primary_endpoint = parsed_url.netloc + path
|
||||
self.protocol = self.protocol if parsed_url.scheme is '' else parsed_url.scheme
|
||||
else:
|
||||
if not self.account_name:
|
||||
raise ValueError(_ERROR_STORAGE_MISSING_INFO)
|
||||
self.primary_endpoint = '{}.{}.{}'.format(self.account_name, service, endpoint_suffix)
|
||||
|
||||
# Setup the secondary endpoint
|
||||
if custom_domain_secondary:
|
||||
if not custom_domain:
|
||||
raise ValueError(_ERROR_STORAGE_MISSING_INFO)
|
||||
|
||||
parsed_url = urlparse(custom_domain_secondary)
|
||||
|
||||
# Trim any trailing slashes from the path
|
||||
path = parsed_url.path.rstrip('/')
|
||||
|
||||
self.secondary_endpoint = parsed_url.netloc + path
|
||||
else:
|
||||
if self.account_name:
|
||||
self.secondary_endpoint = '{}-secondary.{}.{}'.format(self.account_name, service, endpoint_suffix)
|
||||
else:
|
||||
self.secondary_endpoint = None
|
||||
|
||||
@staticmethod
|
||||
def get_service_parameters(service, account_name=None, account_key=None, sas_token=None, token_credential= None,
|
||||
is_emulated=None, protocol=None, endpoint_suffix=None, custom_domain=None,
|
||||
request_session=None, connection_string=None, socket_timeout=None):
|
||||
if connection_string:
|
||||
params = _ServiceParameters._from_connection_string(connection_string, service)
|
||||
elif is_emulated:
|
||||
params = _ServiceParameters(service, is_emulated=True, custom_domain=custom_domain)
|
||||
elif account_name:
|
||||
if protocol.lower() != 'https' and token_credential is not None:
|
||||
raise ValueError("Token credential is only supported with HTTPS.")
|
||||
params = _ServiceParameters(service,
|
||||
account_name=account_name,
|
||||
account_key=account_key,
|
||||
sas_token=sas_token,
|
||||
token_credential=token_credential,
|
||||
is_emulated=is_emulated,
|
||||
protocol=protocol,
|
||||
endpoint_suffix=endpoint_suffix,
|
||||
custom_domain=custom_domain)
|
||||
else:
|
||||
raise ValueError(_ERROR_STORAGE_MISSING_INFO)
|
||||
|
||||
params.request_session = request_session
|
||||
params.socket_timeout = socket_timeout
|
||||
return params
|
||||
|
||||
@staticmethod
|
||||
def _from_connection_string(connection_string, service):
|
||||
# Split into key=value pairs removing empties, then split the pairs into a dict
|
||||
config = dict(s.split('=', 1) for s in connection_string.split(';') if s)
|
||||
|
||||
# Authentication
|
||||
account_name = config.get('AccountName')
|
||||
account_key = config.get('AccountKey')
|
||||
sas_token = config.get('SharedAccessSignature')
|
||||
|
||||
# Emulator
|
||||
is_emulated = config.get('UseDevelopmentStorage')
|
||||
|
||||
# Basic URL Configuration
|
||||
protocol = config.get('DefaultEndpointsProtocol')
|
||||
endpoint_suffix = config.get('EndpointSuffix')
|
||||
|
||||
# Custom URLs
|
||||
endpoint = config.get(_CONNECTION_ENDPOINTS[service])
|
||||
endpoint_secondary = config.get(_CONNECTION_ENDPOINTS_SECONDARY[service])
|
||||
|
||||
return _ServiceParameters(service,
|
||||
account_name=account_name,
|
||||
account_key=account_key,
|
||||
sas_token=sas_token,
|
||||
is_emulated=is_emulated,
|
||||
protocol=protocol,
|
||||
endpoint_suffix=endpoint_suffix,
|
||||
custom_domain=endpoint,
|
||||
custom_domain_secondary=endpoint_secondary)
|
|
@ -0,0 +1,51 @@
|
|||
# -------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for
|
||||
# license information.
|
||||
# --------------------------------------------------------------------------
|
||||
import platform
|
||||
import sys
|
||||
|
||||
__author__ = 'Microsoft Corp. <ptvshelp@microsoft.com>'
|
||||
__version__ = '1.4.2'
|
||||
|
||||
# UserAgent string sample: 'Azure-Storage/0.37.0-0.38.0 (Python CPython 3.4.2; Windows 8)'
|
||||
# First version(0.37.0) is the common package, and the second version(0.38.0) is the service package
|
||||
USER_AGENT_STRING_PREFIX = 'Azure-Storage/{}-'.format(__version__)
|
||||
USER_AGENT_STRING_SUFFIX = '(Python {} {}; {} {})'.format(platform.python_implementation(),
|
||||
platform.python_version(), platform.system(),
|
||||
platform.release())
|
||||
|
||||
# default values for common package, in case it is used directly
|
||||
DEFAULT_X_MS_VERSION = '2018-03-28'
|
||||
DEFAULT_USER_AGENT_STRING = '{}None {}'.format(USER_AGENT_STRING_PREFIX, USER_AGENT_STRING_SUFFIX)
|
||||
|
||||
# Live ServiceClient URLs
|
||||
SERVICE_HOST_BASE = 'core.windows.net'
|
||||
DEFAULT_PROTOCOL = 'https'
|
||||
|
||||
# Development ServiceClient URLs
|
||||
DEV_BLOB_HOST = '127.0.0.1:10000'
|
||||
DEV_QUEUE_HOST = '127.0.0.1:10001'
|
||||
|
||||
# Default credentials for Development Storage Service
|
||||
DEV_ACCOUNT_NAME = 'devstoreaccount1'
|
||||
DEV_ACCOUNT_SECONDARY_NAME = 'devstoreaccount1-secondary'
|
||||
DEV_ACCOUNT_KEY = 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=='
|
||||
|
||||
# Socket timeout in seconds
|
||||
DEFAULT_SOCKET_TIMEOUT = 20
|
||||
|
||||
# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned)
|
||||
# The socket timeout is now the maximum total duration to send all data.
|
||||
if sys.version_info >= (3, 5):
|
||||
# the timeout to connect is 20 seconds, and the read timeout is 2000 seconds
|
||||
# the 2000 seconds was calculated with: 100MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed)
|
||||
DEFAULT_SOCKET_TIMEOUT = (20, 2000)
|
||||
|
||||
# Encryption constants
|
||||
_ENCRYPTION_PROTOCOL_V1 = '1.0'
|
||||
|
||||
_AUTHORIZATION_HEADER_NAME = 'Authorization'
|
||||
_COPY_SOURCE_HEADER_NAME = 'x-ms-copy-source'
|
||||
_REDACTED_VALUE = 'REDACTED'
|
|
@ -0,0 +1,384 @@
|
|||
# -------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for
|
||||
# license information.
|
||||
# --------------------------------------------------------------------------
|
||||
from dateutil import parser
|
||||
|
||||
from ._common_conversion import _to_str
|
||||
|
||||
try:
|
||||
from xml.etree import cElementTree as ETree
|
||||
except ImportError:
|
||||
from xml.etree import ElementTree as ETree
|
||||
|
||||
from .models import (
|
||||
ServiceProperties,
|
||||
Logging,
|
||||
Metrics,
|
||||
CorsRule,
|
||||
AccessPolicy,
|
||||
_dict,
|
||||
GeoReplication,
|
||||
ServiceStats,
|
||||
DeleteRetentionPolicy,
|
||||
StaticWebsite,
|
||||
)
|
||||
|
||||
|
||||
def _to_int(value):
|
||||
return value if value is None else int(value)
|
||||
|
||||
|
||||
def _bool(value):
|
||||
return value.lower() == 'true'
|
||||
|
||||
|
||||
def _to_upper_str(value):
|
||||
return _to_str(value).upper() if value is not None else None
|
||||
|
||||
|
||||
def _get_download_size(start_range, end_range, resource_size):
|
||||
if start_range is not None:
|
||||
end_range = end_range if end_range else (resource_size if resource_size else None)
|
||||
if end_range is not None:
|
||||
return end_range - start_range
|
||||
else:
|
||||
return None
|
||||
else:
|
||||
return resource_size
|
||||
|
||||
|
||||
GET_PROPERTIES_ATTRIBUTE_MAP = {
|
||||
'last-modified': (None, 'last_modified', parser.parse),
|
||||
'etag': (None, 'etag', _to_str),
|
||||
'x-ms-blob-type': (None, 'blob_type', _to_str),
|
||||
'content-length': (None, 'content_length', _to_int),
|
||||
'content-range': (None, 'content_range', _to_str),
|
||||
'x-ms-blob-sequence-number': (None, 'page_blob_sequence_number', _to_int),
|
||||
'x-ms-blob-committed-block-count': (None, 'append_blob_committed_block_count', _to_int),
|
||||
'x-ms-blob-public-access': (None, 'public_access', _to_str),
|
||||
'x-ms-access-tier': (None, 'blob_tier', _to_str),
|
||||
'x-ms-access-tier-change-time': (None, 'blob_tier_change_time', parser.parse),
|
||||
'x-ms-access-tier-inferred': (None, 'blob_tier_inferred', _bool),
|
||||
'x-ms-archive-status': (None, 'rehydration_status', _to_str),
|
||||
'x-ms-share-quota': (None, 'quota', _to_int),
|
||||
'x-ms-server-encrypted': (None, 'server_encrypted', _bool),
|
||||
'x-ms-creation-time': (None, 'creation_time', parser.parse),
|
||||
'content-type': ('content_settings', 'content_type', _to_str),
|
||||
'cache-control': ('content_settings', 'cache_control', _to_str),
|
||||
'content-encoding': ('content_settings', 'content_encoding', _to_str),
|
||||
'content-disposition': ('content_settings', 'content_disposition', _to_str),
|
||||
'content-language': ('content_settings', 'content_language', _to_str),
|
||||
'content-md5': ('content_settings', 'content_md5', _to_str),
|
||||
'x-ms-lease-status': ('lease', 'status', _to_str),
|
||||
'x-ms-lease-state': ('lease', 'state', _to_str),
|
||||
'x-ms-lease-duration': ('lease', 'duration', _to_str),
|
||||
'x-ms-copy-id': ('copy', 'id', _to_str),
|
||||
'x-ms-copy-source': ('copy', 'source', _to_str),
|
||||
'x-ms-copy-status': ('copy', 'status', _to_str),
|
||||
'x-ms-copy-progress': ('copy', 'progress', _to_str),
|
||||
'x-ms-copy-completion-time': ('copy', 'completion_time', parser.parse),
|
||||
'x-ms-copy-destination-snapshot': ('copy', 'destination_snapshot_time', _to_str),
|
||||
'x-ms-copy-status-description': ('copy', 'status_description', _to_str),
|
||||
'x-ms-has-immutability-policy': (None, 'has_immutability_policy', _bool),
|
||||
'x-ms-has-legal-hold': (None, 'has_legal_hold', _bool),
|
||||
}
|
||||
|
||||
|
||||
def _parse_metadata(response):
|
||||
'''
|
||||
Extracts out resource metadata information.
|
||||
'''
|
||||
|
||||
if response is None or response.headers is None:
|
||||
return None
|
||||
|
||||
metadata = _dict()
|
||||
for key, value in response.headers.items():
|
||||
if key.lower().startswith('x-ms-meta-'):
|
||||
metadata[key[10:]] = _to_str(value)
|
||||
|
||||
return metadata
|
||||
|
||||
|
||||
def _parse_properties(response, result_class):
|
||||
'''
|
||||
Extracts out resource properties and metadata information.
|
||||
Ignores the standard http headers.
|
||||
'''
|
||||
|
||||
if response is None or response.headers is None:
|
||||
return None
|
||||
|
||||
props = result_class()
|
||||
for key, value in response.headers.items():
|
||||
info = GET_PROPERTIES_ATTRIBUTE_MAP.get(key)
|
||||
if info:
|
||||
if info[0] is None:
|
||||
setattr(props, info[1], info[2](value))
|
||||
else:
|
||||
attr = getattr(props, info[0])
|
||||
setattr(attr, info[1], info[2](value))
|
||||
|
||||
if hasattr(props, 'blob_type') and props.blob_type == 'PageBlob' and hasattr(props, 'blob_tier') and props.blob_tier is not None:
|
||||
props.blob_tier = _to_upper_str(props.blob_tier)
|
||||
return props
|
||||
|
||||
|
||||
def _parse_length_from_content_range(content_range):
|
||||
'''
|
||||
Parses the blob length from the content range header: bytes 1-3/65537
|
||||
'''
|
||||
if content_range is None:
|
||||
return None
|
||||
|
||||
# First, split in space and take the second half: '1-3/65537'
|
||||
# Next, split on slash and take the second half: '65537'
|
||||
# Finally, convert to an int: 65537
|
||||
return int(content_range.split(' ', 1)[1].split('/', 1)[1])
|
||||
|
||||
|
||||
def _convert_xml_to_signed_identifiers(response):
|
||||
'''
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<SignedIdentifiers>
|
||||
<SignedIdentifier>
|
||||
<Id>unique-value</Id>
|
||||
<AccessPolicy>
|
||||
<Start>start-time</Start>
|
||||
<Expiry>expiry-time</Expiry>
|
||||
<Permission>abbreviated-permission-list</Permission>
|
||||
</AccessPolicy>
|
||||
</SignedIdentifier>
|
||||
</SignedIdentifiers>
|
||||
'''
|
||||
if response is None or response.body is None:
|
||||
return None
|
||||
|
||||
list_element = ETree.fromstring(response.body)
|
||||
signed_identifiers = _dict()
|
||||
|
||||
for signed_identifier_element in list_element.findall('SignedIdentifier'):
|
||||
# Id element
|
||||
id = signed_identifier_element.find('Id').text
|
||||
|
||||
# Access policy element
|
||||
access_policy = AccessPolicy()
|
||||
access_policy_element = signed_identifier_element.find('AccessPolicy')
|
||||
if access_policy_element is not None:
|
||||
start_element = access_policy_element.find('Start')
|
||||
if start_element is not None:
|
||||
access_policy.start = parser.parse(start_element.text)
|
||||
|
||||
expiry_element = access_policy_element.find('Expiry')
|
||||
if expiry_element is not None:
|
||||
access_policy.expiry = parser.parse(expiry_element.text)
|
||||
|
||||
access_policy.permission = access_policy_element.findtext('Permission')
|
||||
|
||||
signed_identifiers[id] = access_policy
|
||||
|
||||
return signed_identifiers
|
||||
|
||||
|
||||
def _convert_xml_to_service_stats(response):
|
||||
'''
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<StorageServiceStats>
|
||||
<GeoReplication>
|
||||
<Status>live|bootstrap|unavailable</Status>
|
||||
<LastSyncTime>sync-time|<empty></LastSyncTime>
|
||||
</GeoReplication>
|
||||
</StorageServiceStats>
|
||||
'''
|
||||
if response is None or response.body is None:
|
||||
return None
|
||||
|
||||
service_stats_element = ETree.fromstring(response.body)
|
||||
|
||||
geo_replication_element = service_stats_element.find('GeoReplication')
|
||||
|
||||
geo_replication = GeoReplication()
|
||||
geo_replication.status = geo_replication_element.find('Status').text
|
||||
last_sync_time = geo_replication_element.find('LastSyncTime').text
|
||||
geo_replication.last_sync_time = parser.parse(last_sync_time) if last_sync_time else None
|
||||
|
||||
service_stats = ServiceStats()
|
||||
service_stats.geo_replication = geo_replication
|
||||
return service_stats
|
||||
|
||||
|
||||
def _convert_xml_to_service_properties(response):
|
||||
'''
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<StorageServiceProperties>
|
||||
<Logging>
|
||||
<Version>version-number</Version>
|
||||
<Delete>true|false</Delete>
|
||||
<Read>true|false</Read>
|
||||
<Write>true|false</Write>
|
||||
<RetentionPolicy>
|
||||
<Enabled>true|false</Enabled>
|
||||
<Days>number-of-days</Days>
|
||||
</RetentionPolicy>
|
||||
</Logging>
|
||||
<HourMetrics>
|
||||
<Version>version-number</Version>
|
||||
<Enabled>true|false</Enabled>
|
||||
<IncludeAPIs>true|false</IncludeAPIs>
|
||||
<RetentionPolicy>
|
||||
<Enabled>true|false</Enabled>
|
||||
<Days>number-of-days</Days>
|
||||
</RetentionPolicy>
|
||||
</HourMetrics>
|
||||
<MinuteMetrics>
|
||||
<Version>version-number</Version>
|
||||
<Enabled>true|false</Enabled>
|
||||
<IncludeAPIs>true|false</IncludeAPIs>
|
||||
<RetentionPolicy>
|
||||
<Enabled>true|false</Enabled>
|
||||
<Days>number-of-days</Days>
|
||||
</RetentionPolicy>
|
||||
</MinuteMetrics>
|
||||
<Cors>
|
||||
<CorsRule>
|
||||
<AllowedOrigins>comma-separated-list-of-allowed-origins</AllowedOrigins>
|
||||
<AllowedMethods>comma-separated-list-of-HTTP-verb</AllowedMethods>
|
||||
<MaxAgeInSeconds>max-caching-age-in-seconds</MaxAgeInSeconds>
|
||||
<ExposedHeaders>comma-seperated-list-of-response-headers</ExposedHeaders>
|
||||
<AllowedHeaders>comma-seperated-list-of-request-headers</AllowedHeaders>
|
||||
</CorsRule>
|
||||
</Cors>
|
||||
<DeleteRetentionPolicy>
|
||||
<Enabled>true|false</Enabled>
|
||||
<Days>number-of-days</Days>
|
||||
</DeleteRetentionPolicy>
|
||||
<StaticWebsite>
|
||||
<Enabled>true|false</Enabled>
|
||||
<IndexDocument></IndexDocument>
|
||||
<ErrorDocument404Path></ErrorDocument404Path>
|
||||
</StaticWebsite>
|
||||
</StorageServiceProperties>
|
||||
'''
|
||||
if response is None or response.body is None:
|
||||
return None
|
||||
|
||||
service_properties_element = ETree.fromstring(response.body)
|
||||
service_properties = ServiceProperties()
|
||||
|
||||
# Logging
|
||||
logging = service_properties_element.find('Logging')
|
||||
if logging is not None:
|
||||
service_properties.logging = Logging()
|
||||
service_properties.logging.version = logging.find('Version').text
|
||||
service_properties.logging.delete = _bool(logging.find('Delete').text)
|
||||
service_properties.logging.read = _bool(logging.find('Read').text)
|
||||
service_properties.logging.write = _bool(logging.find('Write').text)
|
||||
|
||||
_convert_xml_to_retention_policy(logging.find('RetentionPolicy'),
|
||||
service_properties.logging.retention_policy)
|
||||
# HourMetrics
|
||||
hour_metrics_element = service_properties_element.find('HourMetrics')
|
||||
if hour_metrics_element is not None:
|
||||
service_properties.hour_metrics = Metrics()
|
||||
_convert_xml_to_metrics(hour_metrics_element, service_properties.hour_metrics)
|
||||
|
||||
# MinuteMetrics
|
||||
minute_metrics_element = service_properties_element.find('MinuteMetrics')
|
||||
if minute_metrics_element is not None:
|
||||
service_properties.minute_metrics = Metrics()
|
||||
_convert_xml_to_metrics(minute_metrics_element, service_properties.minute_metrics)
|
||||
|
||||
# CORS
|
||||
cors = service_properties_element.find('Cors')
|
||||
if cors is not None:
|
||||
service_properties.cors = list()
|
||||
for rule in cors.findall('CorsRule'):
|
||||
allowed_origins = rule.find('AllowedOrigins').text.split(',')
|
||||
|
||||
allowed_methods = rule.find('AllowedMethods').text.split(',')
|
||||
|
||||
max_age_in_seconds = int(rule.find('MaxAgeInSeconds').text)
|
||||
|
||||
cors_rule = CorsRule(allowed_origins, allowed_methods, max_age_in_seconds)
|
||||
|
||||
exposed_headers = rule.find('ExposedHeaders').text
|
||||
if exposed_headers is not None:
|
||||
cors_rule.exposed_headers = exposed_headers.split(',')
|
||||
|
||||
allowed_headers = rule.find('AllowedHeaders').text
|
||||
if allowed_headers is not None:
|
||||
cors_rule.allowed_headers = allowed_headers.split(',')
|
||||
|
||||
service_properties.cors.append(cors_rule)
|
||||
|
||||
# Target version
|
||||
target_version = service_properties_element.find('DefaultServiceVersion')
|
||||
if target_version is not None:
|
||||
service_properties.target_version = target_version.text
|
||||
|
||||
# DeleteRetentionPolicy
|
||||
delete_retention_policy_element = service_properties_element.find('DeleteRetentionPolicy')
|
||||
if delete_retention_policy_element is not None:
|
||||
service_properties.delete_retention_policy = DeleteRetentionPolicy()
|
||||
policy_enabled = _bool(delete_retention_policy_element.find('Enabled').text)
|
||||
service_properties.delete_retention_policy.enabled = policy_enabled
|
||||
|
||||
if policy_enabled:
|
||||
service_properties.delete_retention_policy.days = int(delete_retention_policy_element.find('Days').text)
|
||||
|
||||
# StaticWebsite
|
||||
static_website_element = service_properties_element.find('StaticWebsite')
|
||||
if static_website_element is not None:
|
||||
service_properties.static_website = StaticWebsite()
|
||||
service_properties.static_website.enabled = _bool(static_website_element.find('Enabled').text)
|
||||
|
||||
index_document_element = static_website_element.find('IndexDocument')
|
||||
if index_document_element is not None:
|
||||
service_properties.static_website.index_document = index_document_element.text
|
||||
|
||||
error_document_element = static_website_element.find('ErrorDocument404Path')
|
||||
if error_document_element is not None:
|
||||
service_properties.static_website.error_document_404_path = error_document_element.text
|
||||
|
||||
return service_properties
|
||||
|
||||
|
||||
def _convert_xml_to_metrics(xml, metrics):
|
||||
'''
|
||||
<Version>version-number</Version>
|
||||
<Enabled>true|false</Enabled>
|
||||
<IncludeAPIs>true|false</IncludeAPIs>
|
||||
<RetentionPolicy>
|
||||
<Enabled>true|false</Enabled>
|
||||
<Days>number-of-days</Days>
|
||||
</RetentionPolicy>
|
||||
'''
|
||||
# Version
|
||||
metrics.version = xml.find('Version').text
|
||||
|
||||
# Enabled
|
||||
metrics.enabled = _bool(xml.find('Enabled').text)
|
||||
|
||||
# IncludeAPIs
|
||||
include_apis_element = xml.find('IncludeAPIs')
|
||||
if include_apis_element is not None:
|
||||
metrics.include_apis = _bool(include_apis_element.text)
|
||||
|
||||
# RetentionPolicy
|
||||
_convert_xml_to_retention_policy(xml.find('RetentionPolicy'), metrics.retention_policy)
|
||||
|
||||
|
||||
def _convert_xml_to_retention_policy(xml, retention_policy):
|
||||
'''
|
||||
<Enabled>true|false</Enabled>
|
||||
<Days>number-of-days</Days>
|
||||
'''
|
||||
# Enabled
|
||||
retention_policy.enabled = _bool(xml.find('Enabled').text)
|
||||
|
||||
# Days
|
||||
days_element = xml.find('Days')
|
||||
if days_element is not None:
|
||||
retention_policy.days = int(days_element.text)
|
|
@ -0,0 +1,233 @@
|
|||
# -------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for
|
||||
# license information.
|
||||
# --------------------------------------------------------------------------
|
||||
from collections import OrderedDict
|
||||
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
from cryptography.hazmat.primitives.ciphers import Cipher
|
||||
from cryptography.hazmat.primitives.ciphers.algorithms import AES
|
||||
from cryptography.hazmat.primitives.ciphers.modes import CBC
|
||||
|
||||
from ._common_conversion import (
|
||||
_encode_base64,
|
||||
_decode_base64_to_bytes,
|
||||
)
|
||||
from ._constants import (
|
||||
_ENCRYPTION_PROTOCOL_V1,
|
||||
__version__,
|
||||
)
|
||||
from ._error import (
|
||||
_ERROR_UNSUPPORTED_ENCRYPTION_VERSION,
|
||||
_validate_not_none,
|
||||
_validate_encryption_protocol_version,
|
||||
_validate_key_encryption_key_unwrap,
|
||||
_validate_kek_id,
|
||||
)
|
||||
|
||||
|
||||
class _EncryptionAlgorithm(object):
|
||||
'''
|
||||
Specifies which client encryption algorithm is used.
|
||||
'''
|
||||
AES_CBC_256 = 'AES_CBC_256'
|
||||
|
||||
|
||||
class _WrappedContentKey:
|
||||
'''
|
||||
Represents the envelope key details stored on the service.
|
||||
'''
|
||||
|
||||
def __init__(self, algorithm, encrypted_key, key_id):
|
||||
'''
|
||||
:param str algorithm:
|
||||
The algorithm used for wrapping.
|
||||
:param bytes encrypted_key:
|
||||
The encrypted content-encryption-key.
|
||||
:param str key_id:
|
||||
The key-encryption-key identifier string.
|
||||
'''
|
||||
|
||||
_validate_not_none('algorithm', algorithm)
|
||||
_validate_not_none('encrypted_key', encrypted_key)
|
||||
_validate_not_none('key_id', key_id)
|
||||
|
||||
self.algorithm = algorithm
|
||||
self.encrypted_key = encrypted_key
|
||||
self.key_id = key_id
|
||||
|
||||
|
||||
class _EncryptionAgent:
|
||||
'''
|
||||
Represents the encryption agent stored on the service.
|
||||
It consists of the encryption protocol version and encryption algorithm used.
|
||||
'''
|
||||
|
||||
def __init__(self, encryption_algorithm, protocol):
|
||||
'''
|
||||
:param _EncryptionAlgorithm encryption_algorithm:
|
||||
The algorithm used for encrypting the message contents.
|
||||
:param str protocol:
|
||||
The protocol version used for encryption.
|
||||
'''
|
||||
|
||||
_validate_not_none('encryption_algorithm', encryption_algorithm)
|
||||
_validate_not_none('protocol', protocol)
|
||||
|
||||
self.encryption_algorithm = str(encryption_algorithm)
|
||||
self.protocol = protocol
|
||||
|
||||
|
||||
class _EncryptionData:
|
||||
'''
|
||||
Represents the encryption data that is stored on the service.
|
||||
'''
|
||||
|
||||
def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key,
|
||||
key_wrapping_metadata):
|
||||
'''
|
||||
:param bytes content_encryption_IV:
|
||||
The content encryption initialization vector.
|
||||
:param _EncryptionAgent encryption_agent:
|
||||
The encryption agent.
|
||||
:param _WrappedContentKey wrapped_content_key:
|
||||
An object that stores the wrapping algorithm, the key identifier,
|
||||
and the encrypted key bytes.
|
||||
:param dict key_wrapping_metadata:
|
||||
A dict containing metadata related to the key wrapping.
|
||||
'''
|
||||
|
||||
_validate_not_none('content_encryption_IV', content_encryption_IV)
|
||||
_validate_not_none('encryption_agent', encryption_agent)
|
||||
_validate_not_none('wrapped_content_key', wrapped_content_key)
|
||||
|
||||
self.content_encryption_IV = content_encryption_IV
|
||||
self.encryption_agent = encryption_agent
|
||||
self.wrapped_content_key = wrapped_content_key
|
||||
self.key_wrapping_metadata = key_wrapping_metadata
|
||||
|
||||
|
||||
def _generate_encryption_data_dict(kek, cek, iv):
|
||||
'''
|
||||
Generates and returns the encryption metadata as a dict.
|
||||
|
||||
:param object kek: The key encryption key. See calling functions for more information.
|
||||
:param bytes cek: The content encryption key.
|
||||
:param bytes iv: The initialization vector.
|
||||
:return: A dict containing all the encryption metadata.
|
||||
:rtype: dict
|
||||
'''
|
||||
# Encrypt the cek.
|
||||
wrapped_cek = kek.wrap_key(cek)
|
||||
|
||||
# Build the encryption_data dict.
|
||||
# Use OrderedDict to comply with Java's ordering requirement.
|
||||
wrapped_content_key = OrderedDict()
|
||||
wrapped_content_key['KeyId'] = kek.get_kid()
|
||||
wrapped_content_key['EncryptedKey'] = _encode_base64(wrapped_cek)
|
||||
wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm()
|
||||
|
||||
encryption_agent = OrderedDict()
|
||||
encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1
|
||||
encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256
|
||||
|
||||
encryption_data_dict = OrderedDict()
|
||||
encryption_data_dict['WrappedContentKey'] = wrapped_content_key
|
||||
encryption_data_dict['EncryptionAgent'] = encryption_agent
|
||||
encryption_data_dict['ContentEncryptionIV'] = _encode_base64(iv)
|
||||
encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + __version__}
|
||||
|
||||
return encryption_data_dict
|
||||
|
||||
|
||||
def _dict_to_encryption_data(encryption_data_dict):
|
||||
'''
|
||||
Converts the specified dictionary to an EncryptionData object for
|
||||
eventual use in decryption.
|
||||
|
||||
:param dict encryption_data_dict:
|
||||
The dictionary containing the encryption data.
|
||||
:return: an _EncryptionData object built from the dictionary.
|
||||
:rtype: _EncryptionData
|
||||
'''
|
||||
try:
|
||||
if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1:
|
||||
raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_VERSION)
|
||||
except KeyError:
|
||||
raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_VERSION)
|
||||
wrapped_content_key = encryption_data_dict['WrappedContentKey']
|
||||
wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'],
|
||||
_decode_base64_to_bytes(wrapped_content_key['EncryptedKey']),
|
||||
wrapped_content_key['KeyId'])
|
||||
|
||||
encryption_agent = encryption_data_dict['EncryptionAgent']
|
||||
encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'],
|
||||
encryption_agent['Protocol'])
|
||||
|
||||
if 'KeyWrappingMetadata' in encryption_data_dict:
|
||||
key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata']
|
||||
else:
|
||||
key_wrapping_metadata = None
|
||||
|
||||
encryption_data = _EncryptionData(_decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']),
|
||||
encryption_agent,
|
||||
wrapped_content_key,
|
||||
key_wrapping_metadata)
|
||||
|
||||
return encryption_data
|
||||
|
||||
|
||||
def _generate_AES_CBC_cipher(cek, iv):
|
||||
'''
|
||||
Generates and returns an encryption cipher for AES CBC using the given cek and iv.
|
||||
|
||||
:param bytes[] cek: The content encryption key for the cipher.
|
||||
:param bytes[] iv: The initialization vector for the cipher.
|
||||
:return: A cipher for encrypting in AES256 CBC.
|
||||
:rtype: ~cryptography.hazmat.primitives.ciphers.Cipher
|
||||
'''
|
||||
|
||||
backend = default_backend()
|
||||
algorithm = AES(cek)
|
||||
mode = CBC(iv)
|
||||
return Cipher(algorithm, mode, backend)
|
||||
|
||||
|
||||
def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None):
|
||||
'''
|
||||
Extracts and returns the content_encryption_key stored in the encryption_data object
|
||||
and performs necessary validation on all parameters.
|
||||
:param _EncryptionData encryption_data:
|
||||
The encryption metadata of the retrieved value.
|
||||
:param obj key_encryption_key:
|
||||
The key_encryption_key used to unwrap the cek. Please refer to high-level service object
|
||||
instance variables for more details.
|
||||
:param func key_resolver:
|
||||
A function used that, given a key_id, will return a key_encryption_key. Please refer
|
||||
to high-level service object instance variables for more details.
|
||||
:return: the content_encryption_key stored in the encryption_data object.
|
||||
:rtype: bytes[]
|
||||
'''
|
||||
|
||||
_validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV)
|
||||
_validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key)
|
||||
|
||||
_validate_encryption_protocol_version(encryption_data.encryption_agent.protocol)
|
||||
|
||||
content_encryption_key = None
|
||||
|
||||
# If the resolver exists, give priority to the key it finds.
|
||||
if key_resolver is not None:
|
||||
key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id)
|
||||
|
||||
_validate_not_none('key_encryption_key', key_encryption_key)
|
||||
_validate_key_encryption_key_unwrap(key_encryption_key)
|
||||
_validate_kek_id(encryption_data.wrapped_content_key.key_id, key_encryption_key.get_kid())
|
||||
|
||||
# Will throw an exception if the specified algorithm is not supported.
|
||||
content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key,
|
||||
encryption_data.wrapped_content_key.algorithm)
|
||||
_validate_not_none('content_encryption_key', content_encryption_key)
|
||||
|
||||
return content_encryption_key
|
|
@ -0,0 +1,207 @@
|
|||
# -------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for
|
||||
# license information.
|
||||
# --------------------------------------------------------------------------
|
||||
from sys import version_info
|
||||
|
||||
if version_info < (3,):
|
||||
def _str(value):
|
||||
if isinstance(value, unicode):
|
||||
return value.encode('utf-8')
|
||||
|
||||
return str(value)
|
||||
else:
|
||||
_str = str
|
||||
|
||||
|
||||
def _to_str(value):
|
||||
return _str(value) if value is not None else None
|
||||
|
||||
|
||||
from azure.common import (
|
||||
AzureHttpError,
|
||||
AzureConflictHttpError,
|
||||
AzureMissingResourceHttpError,
|
||||
AzureException,
|
||||
)
|
||||
from ._constants import (
|
||||
_ENCRYPTION_PROTOCOL_V1,
|
||||
)
|
||||
|
||||
_ERROR_CONFLICT = 'Conflict ({0})'
|
||||
_ERROR_NOT_FOUND = 'Not found ({0})'
|
||||
_ERROR_UNKNOWN = 'Unknown error ({0})'
|
||||
_ERROR_STORAGE_MISSING_INFO = \
|
||||
'You need to provide an account name and either an account_key or sas_token when creating a storage service.'
|
||||
_ERROR_EMULATOR_DOES_NOT_SUPPORT_FILES = \
|
||||
'The emulator does not support the file service.'
|
||||
_ERROR_ACCESS_POLICY = \
|
||||
'share_access_policy must be either SignedIdentifier or AccessPolicy ' + \
|
||||
'instance'
|
||||
_ERROR_PARALLEL_NOT_SEEKABLE = 'Parallel operations require a seekable stream.'
|
||||
_ERROR_VALUE_SHOULD_BE_BYTES = '{0} should be of type bytes.'
|
||||
_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM = '{0} should be of type bytes or a readable file-like/io.IOBase stream object.'
|
||||
_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.'
|
||||
_ERROR_VALUE_SHOULD_BE_STREAM = '{0} should be a file-like/io.IOBase type stream object with a read method.'
|
||||
_ERROR_VALUE_NONE = '{0} should not be None.'
|
||||
_ERROR_VALUE_NONE_OR_EMPTY = '{0} should not be None or empty.'
|
||||
_ERROR_VALUE_NEGATIVE = '{0} should not be negative.'
|
||||
_ERROR_START_END_NEEDED_FOR_MD5 = \
|
||||
'Both end_range and start_range need to be specified ' + \
|
||||
'for getting content MD5.'
|
||||
_ERROR_RANGE_TOO_LARGE_FOR_MD5 = \
|
||||
'Getting content MD5 for a range greater than 4MB ' + \
|
||||
'is not supported.'
|
||||
_ERROR_MD5_MISMATCH = \
|
||||
'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'
|
||||
_ERROR_TOO_MANY_ACCESS_POLICIES = \
|
||||
'Too many access policies provided. The server does not support setting more than 5 access policies on a single resource.'
|
||||
_ERROR_OBJECT_INVALID = \
|
||||
'{0} does not define a complete interface. Value of {1} is either missing or invalid.'
|
||||
_ERROR_UNSUPPORTED_ENCRYPTION_VERSION = \
|
||||
'Encryption version is not supported.'
|
||||
_ERROR_DECRYPTION_FAILURE = \
|
||||
'Decryption failed'
|
||||
_ERROR_ENCRYPTION_REQUIRED = \
|
||||
'Encryption required but no key was provided.'
|
||||
_ERROR_DECRYPTION_REQUIRED = \
|
||||
'Decryption required but neither key nor resolver was provided.' + \
|
||||
' If you do not want to decypt, please do not set the require encryption flag.'
|
||||
_ERROR_INVALID_KID = \
|
||||
'Provided or resolved key-encryption-key does not match the id of key used to encrypt.'
|
||||
_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM = \
|
||||
'Specified encryption algorithm is not supported.'
|
||||
_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = 'The require_encryption flag is set, but encryption is not supported' + \
|
||||
' for this method.'
|
||||
_ERROR_UNKNOWN_KEY_WRAP_ALGORITHM = 'Unknown key wrap algorithm.'
|
||||
_ERROR_DATA_NOT_ENCRYPTED = 'Encryption required, but received data does not contain appropriate metatadata.' + \
|
||||
'Data was either not encrypted or metadata has been lost.'
|
||||
|
||||
|
||||
def _dont_fail_on_exist(error):
|
||||
''' don't throw exception if the resource exists.
|
||||
This is called by create_* APIs with fail_on_exist=False'''
|
||||
if isinstance(error, AzureConflictHttpError):
|
||||
return False
|
||||
else:
|
||||
raise error
|
||||
|
||||
|
||||
def _dont_fail_not_exist(error):
|
||||
''' don't throw exception if the resource doesn't exist.
|
||||
This is called by create_* APIs with fail_on_exist=False'''
|
||||
if isinstance(error, AzureMissingResourceHttpError):
|
||||
return False
|
||||
else:
|
||||
raise error
|
||||
|
||||
|
||||
def _http_error_handler(http_error):
|
||||
''' Simple error handler for azure.'''
|
||||
message = str(http_error)
|
||||
error_code = None
|
||||
|
||||
if 'x-ms-error-code' in http_error.respheader:
|
||||
error_code = http_error.respheader['x-ms-error-code']
|
||||
message += ' ErrorCode: ' + error_code
|
||||
|
||||
if http_error.respbody is not None:
|
||||
message += '\n' + http_error.respbody.decode('utf-8-sig')
|
||||
|
||||
ex = AzureHttpError(message, http_error.status)
|
||||
ex.error_code = error_code
|
||||
|
||||
raise ex
|
||||
|
||||
def _validate_type_bytes(param_name, param):
|
||||
if not isinstance(param, bytes):
|
||||
raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name))
|
||||
|
||||
|
||||
def _validate_type_bytes_or_stream(param_name, param):
|
||||
if not (isinstance(param, bytes) or hasattr(param, 'read')):
|
||||
raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM.format(param_name))
|
||||
|
||||
|
||||
def _validate_not_none(param_name, param):
|
||||
if param is None:
|
||||
raise ValueError(_ERROR_VALUE_NONE.format(param_name))
|
||||
|
||||
|
||||
def _validate_content_match(server_md5, computed_md5):
|
||||
if server_md5 != computed_md5:
|
||||
raise AzureException(_ERROR_MD5_MISMATCH.format(server_md5, computed_md5))
|
||||
|
||||
|
||||
def _validate_access_policies(identifiers):
|
||||
if identifiers and len(identifiers) > 5:
|
||||
raise AzureException(_ERROR_TOO_MANY_ACCESS_POLICIES)
|
||||
|
||||
|
||||
def _validate_key_encryption_key_wrap(kek):
|
||||
# Note that None is not callable and so will fail the second clause of each check.
|
||||
if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key):
|
||||
raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key'))
|
||||
if not hasattr(kek, 'get_kid') or not callable(kek.get_kid):
|
||||
raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid'))
|
||||
if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm):
|
||||
raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm'))
|
||||
|
||||
|
||||
def _validate_key_encryption_key_unwrap(kek):
|
||||
if not hasattr(kek, 'get_kid') or not callable(kek.get_kid):
|
||||
raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid'))
|
||||
if not hasattr(kek, 'unwrap_key') or not callable(kek.unwrap_key):
|
||||
raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key'))
|
||||
|
||||
|
||||
def _validate_encryption_required(require_encryption, kek):
|
||||
if require_encryption and (kek is None):
|
||||
raise ValueError(_ERROR_ENCRYPTION_REQUIRED)
|
||||
|
||||
|
||||
def _validate_decryption_required(require_encryption, kek, resolver):
|
||||
if (require_encryption and (kek is None) and
|
||||
(resolver is None)):
|
||||
raise ValueError(_ERROR_DECRYPTION_REQUIRED)
|
||||
|
||||
|
||||
def _validate_encryption_protocol_version(encryption_protocol):
|
||||
if not (_ENCRYPTION_PROTOCOL_V1 == encryption_protocol):
|
||||
raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_VERSION)
|
||||
|
||||
|
||||
def _validate_kek_id(kid, resolved_id):
|
||||
if not (kid == resolved_id):
|
||||
raise ValueError(_ERROR_INVALID_KID)
|
||||
|
||||
|
||||
def _validate_encryption_unsupported(require_encryption, key_encryption_key):
|
||||
if require_encryption or (key_encryption_key is not None):
|
||||
raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
|
||||
|
||||
|
||||
# wraps a given exception with the desired exception type
|
||||
def _wrap_exception(ex, desired_type):
|
||||
msg = ""
|
||||
if len(ex.args) > 0:
|
||||
msg = ex.args[0]
|
||||
if version_info >= (3,):
|
||||
# Automatic chaining in Python 3 means we keep the trace
|
||||
return desired_type(msg)
|
||||
else:
|
||||
# There isn't a good solution in 2 for keeping the stack trace
|
||||
# in general, or that will not result in an error in 3
|
||||
# However, we can keep the previous error type and message
|
||||
# TODO: In the future we will log the trace
|
||||
return desired_type('{}: {}'.format(ex.__class__.__name__, msg))
|
||||
|
||||
|
||||
class AzureSigningError(AzureException):
|
||||
"""
|
||||
Represents a fatal error when attempting to sign a request.
|
||||
In general, the cause of this exception is user error. For example, the given account key is not valid.
|
||||
Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info.
|
||||
"""
|
||||
pass
|
|
@ -0,0 +1,74 @@
|
|||
# -------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for
|
||||
# license information.
|
||||
# --------------------------------------------------------------------------
|
||||
|
||||
|
||||
class HTTPError(Exception):
|
||||
'''
|
||||
Represents an HTTP Exception when response status code >= 300.
|
||||
|
||||
:ivar int status:
|
||||
the status code of the response
|
||||
:ivar str message:
|
||||
the message
|
||||
:ivar list headers:
|
||||
the returned headers, as a list of (name, value) pairs
|
||||
:ivar bytes body:
|
||||
the body of the response
|
||||
'''
|
||||
|
||||
def __init__(self, status, message, respheader, respbody):
|
||||
self.status = status
|
||||
self.respheader = respheader
|
||||
self.respbody = respbody
|
||||
Exception.__init__(self, message)
|
||||
|
||||
|
||||
class HTTPResponse(object):
|
||||
'''
|
||||
Represents a response from an HTTP request.
|
||||
|
||||
:ivar int status:
|
||||
the status code of the response
|
||||
:ivar str message:
|
||||
the message
|
||||
:ivar dict headers:
|
||||
the returned headers
|
||||
:ivar bytes body:
|
||||
the body of the response
|
||||
'''
|
||||
|
||||
def __init__(self, status, message, headers, body):
|
||||
self.status = status
|
||||
self.message = message
|
||||
self.headers = headers
|
||||
self.body = body
|
||||
|
||||
|
||||
class HTTPRequest(object):
|
||||
'''
|
||||
Represents an HTTP Request.
|
||||
|
||||
:ivar str host:
|
||||
the host name to connect to
|
||||
:ivar str method:
|
||||
the method to use to connect (string such as GET, POST, PUT, etc.)
|
||||
:ivar str path:
|
||||
the uri fragment
|
||||
:ivar dict query:
|
||||
query parameters
|
||||
:ivar dict headers:
|
||||
header values
|
||||
:ivar bytes body:
|
||||
the body of the request.
|
||||
'''
|
||||
|
||||
def __init__(self):
|
||||
self.host = ''
|
||||
self.method = ''
|
||||
self.path = ''
|
||||
self.query = {} # list of (name, value)
|
||||
self.headers = {} # list of (header name, header value)
|
||||
self.body = ''
|
|
@ -0,0 +1,107 @@
|
|||
# -------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for
|
||||
# license information.
|
||||
# --------------------------------------------------------------------------
|
||||
|
||||
import logging
|
||||
from . import HTTPResponse
|
||||
from .._serialization import _get_data_bytes_or_stream_only
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class _HTTPClient(object):
|
||||
'''
|
||||
Takes the request and sends it to cloud service and returns the response.
|
||||
'''
|
||||
|
||||
def __init__(self, protocol=None, session=None, timeout=None):
|
||||
'''
|
||||
:param str protocol:
|
||||
http or https.
|
||||
:param requests.Session session:
|
||||
session object created with requests library (or compatible).
|
||||
:param int timeout:
|
||||
timeout for the http request, in seconds.
|
||||
'''
|
||||
self.protocol = protocol
|
||||
self.session = session
|
||||
self.timeout = timeout
|
||||
|
||||
# By default, requests adds an Accept:*/* and Accept-Encoding to the session,
|
||||
# which causes issues with some Azure REST APIs. Removing these here gives us
|
||||
# the flexibility to add it back on a case by case basis.
|
||||
if 'Accept' in self.session.headers:
|
||||
del self.session.headers['Accept']
|
||||
|
||||
if 'Accept-Encoding' in self.session.headers:
|
||||
del self.session.headers['Accept-Encoding']
|
||||
|
||||
self.proxies = None
|
||||
|
||||
def set_proxy(self, host, port, user, password):
|
||||
'''
|
||||
Sets the proxy server host and port for the HTTP CONNECT Tunnelling.
|
||||
|
||||
Note that we set the proxies directly on the request later on rather than
|
||||
using the session object as requests has a bug where session proxy is ignored
|
||||
in favor of environment proxy. So, auth will not work unless it is passed
|
||||
directly when making the request as this overrides both.
|
||||
|
||||
:param str host:
|
||||
Address of the proxy. Ex: '192.168.0.100'
|
||||
:param int port:
|
||||
Port of the proxy. Ex: 6000
|
||||
:param str user:
|
||||
User for proxy authorization.
|
||||
:param str password:
|
||||
Password for proxy authorization.
|
||||
'''
|
||||
if user and password:
|
||||
proxy_string = '{}:{}@{}:{}'.format(user, password, host, port)
|
||||
else:
|
||||
proxy_string = '{}:{}'.format(host, port)
|
||||
|
||||
self.proxies = {'http': 'http://{}'.format(proxy_string),
|
||||
'https': 'https://{}'.format(proxy_string)}
|
||||
|
||||
def perform_request(self, request):
|
||||
'''
|
||||
Sends an HTTPRequest to Azure Storage and returns an HTTPResponse. If
|
||||
the response code indicates an error, raise an HTTPError.
|
||||
|
||||
:param HTTPRequest request:
|
||||
The request to serialize and send.
|
||||
:return: An HTTPResponse containing the parsed HTTP response.
|
||||
:rtype: :class:`~azure.cosmosdb.common._http.HTTPResponse`
|
||||
'''
|
||||
# Verify the body is in bytes or either a file-like/stream object
|
||||
if request.body:
|
||||
request.body = _get_data_bytes_or_stream_only('request.body', request.body)
|
||||
|
||||
# Construct the URI
|
||||
uri = self.protocol.lower() + '://' + request.host + request.path
|
||||
|
||||
# Send the request
|
||||
response = self.session.request(request.method,
|
||||
uri,
|
||||
params=request.query,
|
||||
headers=request.headers,
|
||||
data=request.body or None,
|
||||
timeout=self.timeout,
|
||||
proxies=self.proxies)
|
||||
|
||||
# Parse the response
|
||||
status = int(response.status_code)
|
||||
response_headers = {}
|
||||
for key, name in response.headers.items():
|
||||
# Preserve the case of metadata
|
||||
if key.lower().startswith('x-ms-meta-'):
|
||||
response_headers[key] = name
|
||||
else:
|
||||
response_headers[key.lower()] = name
|
||||
|
||||
wrap = HTTPResponse(status, response.reason, response_headers, response.content)
|
||||
response.close()
|
||||
|
||||
return wrap
|
|
@ -0,0 +1,371 @@
|
|||
# -------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for
|
||||
# license information.
|
||||
# --------------------------------------------------------------------------
|
||||
import sys
|
||||
import uuid
|
||||
from datetime import date
|
||||
from io import (BytesIO, IOBase, SEEK_SET, SEEK_END, UnsupportedOperation)
|
||||
from os import fstat
|
||||
from time import time
|
||||
from wsgiref.handlers import format_date_time
|
||||
|
||||
from dateutil.tz import tzutc
|
||||
|
||||
if sys.version_info >= (3,):
|
||||
from urllib.parse import quote as url_quote
|
||||
else:
|
||||
from urllib2 import quote as url_quote
|
||||
|
||||
try:
|
||||
from xml.etree import cElementTree as ETree
|
||||
except ImportError:
|
||||
from xml.etree import ElementTree as ETree
|
||||
|
||||
from ._error import (
|
||||
_ERROR_VALUE_SHOULD_BE_BYTES,
|
||||
_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM,
|
||||
_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM
|
||||
)
|
||||
from .models import (
|
||||
_unicode_type,
|
||||
)
|
||||
from ._common_conversion import (
|
||||
_str,
|
||||
)
|
||||
|
||||
|
||||
def _to_utc_datetime(value):
|
||||
# Azure expects the date value passed in to be UTC.
|
||||
# Azure will always return values as UTC.
|
||||
# If a date is passed in without timezone info, it is assumed to be UTC.
|
||||
if value.tzinfo:
|
||||
value = value.astimezone(tzutc())
|
||||
return value.strftime('%Y-%m-%dT%H:%M:%SZ')
|
||||
|
||||
|
||||
def _update_request(request, x_ms_version, user_agent_string):
|
||||
# Verify body
|
||||
if request.body:
|
||||
request.body = _get_data_bytes_or_stream_only('request.body', request.body)
|
||||
length = _len_plus(request.body)
|
||||
|
||||
# only scenario where this case is plausible is if the stream object is not seekable.
|
||||
if length is None:
|
||||
raise ValueError(_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM)
|
||||
|
||||
# if it is PUT, POST, MERGE, DELETE, need to add content-length to header.
|
||||
if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']:
|
||||
request.headers['Content-Length'] = str(length)
|
||||
|
||||
# append addtional headers based on the service
|
||||
request.headers['x-ms-version'] = x_ms_version
|
||||
request.headers['User-Agent'] = user_agent_string
|
||||
request.headers['x-ms-client-request-id'] = str(uuid.uuid1())
|
||||
|
||||
# If the host has a path component (ex local storage), move it
|
||||
path = request.host.split('/', 1)
|
||||
if len(path) == 2:
|
||||
request.host = path[0]
|
||||
request.path = '/{}{}'.format(path[1], request.path)
|
||||
|
||||
# Encode and optionally add local storage prefix to path
|
||||
request.path = url_quote(request.path, '/()$=\',~')
|
||||
|
||||
|
||||
def _add_metadata_headers(metadata, request):
|
||||
if metadata:
|
||||
if not request.headers:
|
||||
request.headers = {}
|
||||
for name, value in metadata.items():
|
||||
request.headers['x-ms-meta-' + name] = value
|
||||
|
||||
|
||||
def _add_date_header(request):
|
||||
current_time = format_date_time(time())
|
||||
request.headers['x-ms-date'] = current_time
|
||||
|
||||
|
||||
def _get_data_bytes_only(param_name, param_value):
|
||||
'''Validates the request body passed in and converts it to bytes
|
||||
if our policy allows it.'''
|
||||
if param_value is None:
|
||||
return b''
|
||||
|
||||
if isinstance(param_value, bytes):
|
||||
return param_value
|
||||
|
||||
raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name))
|
||||
|
||||
|
||||
def _get_data_bytes_or_stream_only(param_name, param_value):
|
||||
'''Validates the request body passed in is a stream/file-like or bytes
|
||||
object.'''
|
||||
if param_value is None:
|
||||
return b''
|
||||
|
||||
if isinstance(param_value, bytes) or hasattr(param_value, 'read'):
|
||||
return param_value
|
||||
|
||||
raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM.format(param_name))
|
||||
|
||||
|
||||
def _get_request_body(request_body):
|
||||
'''Converts an object into a request body. If it's None
|
||||
we'll return an empty string, if it's one of our objects it'll
|
||||
convert it to XML and return it. Otherwise we just use the object
|
||||
directly'''
|
||||
if request_body is None:
|
||||
return b''
|
||||
|
||||
if isinstance(request_body, bytes) or isinstance(request_body, IOBase):
|
||||
return request_body
|
||||
|
||||
if isinstance(request_body, _unicode_type):
|
||||
return request_body.encode('utf-8')
|
||||
|
||||
request_body = str(request_body)
|
||||
if isinstance(request_body, _unicode_type):
|
||||
return request_body.encode('utf-8')
|
||||
|
||||
return request_body
|
||||
|
||||
|
||||
def _convert_signed_identifiers_to_xml(signed_identifiers):
|
||||
if signed_identifiers is None:
|
||||
return ''
|
||||
|
||||
sis = ETree.Element('SignedIdentifiers')
|
||||
for id, access_policy in signed_identifiers.items():
|
||||
# Root signed identifers element
|
||||
si = ETree.SubElement(sis, 'SignedIdentifier')
|
||||
|
||||
# Id element
|
||||
ETree.SubElement(si, 'Id').text = id
|
||||
|
||||
# Access policy element
|
||||
policy = ETree.SubElement(si, 'AccessPolicy')
|
||||
|
||||
if access_policy.start:
|
||||
start = access_policy.start
|
||||
if isinstance(access_policy.start, date):
|
||||
start = _to_utc_datetime(start)
|
||||
ETree.SubElement(policy, 'Start').text = start
|
||||
|
||||
if access_policy.expiry:
|
||||
expiry = access_policy.expiry
|
||||
if isinstance(access_policy.expiry, date):
|
||||
expiry = _to_utc_datetime(expiry)
|
||||
ETree.SubElement(policy, 'Expiry').text = expiry
|
||||
|
||||
if access_policy.permission:
|
||||
ETree.SubElement(policy, 'Permission').text = _str(access_policy.permission)
|
||||
|
||||
# Add xml declaration and serialize
|
||||
try:
|
||||
stream = BytesIO()
|
||||
ETree.ElementTree(sis).write(stream, xml_declaration=True, encoding='utf-8', method='xml')
|
||||
except:
|
||||
raise
|
||||
finally:
|
||||
output = stream.getvalue()
|
||||
stream.close()
|
||||
|
||||
return output
|
||||
|
||||
|
||||
def _convert_service_properties_to_xml(logging, hour_metrics, minute_metrics,
|
||||
cors, target_version=None, delete_retention_policy=None, static_website=None):
|
||||
'''
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<StorageServiceProperties>
|
||||
<Logging>
|
||||
<Version>version-number</Version>
|
||||
<Delete>true|false</Delete>
|
||||
<Read>true|false</Read>
|
||||
<Write>true|false</Write>
|
||||
<RetentionPolicy>
|
||||
<Enabled>true|false</Enabled>
|
||||
<Days>number-of-days</Days>
|
||||
</RetentionPolicy>
|
||||
</Logging>
|
||||
<HourMetrics>
|
||||
<Version>version-number</Version>
|
||||
<Enabled>true|false</Enabled>
|
||||
<IncludeAPIs>true|false</IncludeAPIs>
|
||||
<RetentionPolicy>
|
||||
<Enabled>true|false</Enabled>
|
||||
<Days>number-of-days</Days>
|
||||
</RetentionPolicy>
|
||||
</HourMetrics>
|
||||
<MinuteMetrics>
|
||||
<Version>version-number</Version>
|
||||
<Enabled>true|false</Enabled>
|
||||
<IncludeAPIs>true|false</IncludeAPIs>
|
||||
<RetentionPolicy>
|
||||
<Enabled>true|false</Enabled>
|
||||
<Days>number-of-days</Days>
|
||||
</RetentionPolicy>
|
||||
</MinuteMetrics>
|
||||
<Cors>
|
||||
<CorsRule>
|
||||
<AllowedOrigins>comma-separated-list-of-allowed-origins</AllowedOrigins>
|
||||
<AllowedMethods>comma-separated-list-of-HTTP-verb</AllowedMethods>
|
||||
<MaxAgeInSeconds>max-caching-age-in-seconds</MaxAgeInSeconds>
|
||||
<ExposedHeaders>comma-seperated-list-of-response-headers</ExposedHeaders>
|
||||
<AllowedHeaders>comma-seperated-list-of-request-headers</AllowedHeaders>
|
||||
</CorsRule>
|
||||
</Cors>
|
||||
<DeleteRetentionPolicy>
|
||||
<Enabled>true|false</Enabled>
|
||||
<Days>number-of-days</Days>
|
||||
</DeleteRetentionPolicy>
|
||||
<StaticWebsite>
|
||||
<Enabled>true|false</Enabled>
|
||||
<IndexDocument></IndexDocument>
|
||||
<ErrorDocument404Path></ErrorDocument404Path>
|
||||
</StaticWebsite>
|
||||
</StorageServiceProperties>
|
||||
'''
|
||||
service_properties_element = ETree.Element('StorageServiceProperties')
|
||||
|
||||
# Logging
|
||||
if logging:
|
||||
logging_element = ETree.SubElement(service_properties_element, 'Logging')
|
||||
ETree.SubElement(logging_element, 'Version').text = logging.version
|
||||
ETree.SubElement(logging_element, 'Delete').text = str(logging.delete)
|
||||
ETree.SubElement(logging_element, 'Read').text = str(logging.read)
|
||||
ETree.SubElement(logging_element, 'Write').text = str(logging.write)
|
||||
|
||||
retention_element = ETree.SubElement(logging_element, 'RetentionPolicy')
|
||||
_convert_retention_policy_to_xml(logging.retention_policy, retention_element)
|
||||
|
||||
# HourMetrics
|
||||
if hour_metrics:
|
||||
hour_metrics_element = ETree.SubElement(service_properties_element, 'HourMetrics')
|
||||
_convert_metrics_to_xml(hour_metrics, hour_metrics_element)
|
||||
|
||||
# MinuteMetrics
|
||||
if minute_metrics:
|
||||
minute_metrics_element = ETree.SubElement(service_properties_element, 'MinuteMetrics')
|
||||
_convert_metrics_to_xml(minute_metrics, minute_metrics_element)
|
||||
|
||||
# CORS
|
||||
# Make sure to still serialize empty list
|
||||
if cors is not None:
|
||||
cors_element = ETree.SubElement(service_properties_element, 'Cors')
|
||||
for rule in cors:
|
||||
cors_rule = ETree.SubElement(cors_element, 'CorsRule')
|
||||
ETree.SubElement(cors_rule, 'AllowedOrigins').text = ",".join(rule.allowed_origins)
|
||||
ETree.SubElement(cors_rule, 'AllowedMethods').text = ",".join(rule.allowed_methods)
|
||||
ETree.SubElement(cors_rule, 'MaxAgeInSeconds').text = str(rule.max_age_in_seconds)
|
||||
ETree.SubElement(cors_rule, 'ExposedHeaders').text = ",".join(rule.exposed_headers)
|
||||
ETree.SubElement(cors_rule, 'AllowedHeaders').text = ",".join(rule.allowed_headers)
|
||||
|
||||
# Target version
|
||||
if target_version:
|
||||
ETree.SubElement(service_properties_element, 'DefaultServiceVersion').text = target_version
|
||||
|
||||
# DeleteRetentionPolicy
|
||||
if delete_retention_policy:
|
||||
policy_element = ETree.SubElement(service_properties_element, 'DeleteRetentionPolicy')
|
||||
ETree.SubElement(policy_element, 'Enabled').text = str(delete_retention_policy.enabled)
|
||||
|
||||
if delete_retention_policy.enabled:
|
||||
ETree.SubElement(policy_element, 'Days').text = str(delete_retention_policy.days)
|
||||
|
||||
# StaticWebsite
|
||||
if static_website:
|
||||
static_website_element = ETree.SubElement(service_properties_element, 'StaticWebsite')
|
||||
ETree.SubElement(static_website_element, 'Enabled').text = str(static_website.enabled)
|
||||
|
||||
if static_website.enabled:
|
||||
|
||||
if static_website.index_document is not None:
|
||||
ETree.SubElement(static_website_element, 'IndexDocument').text = str(static_website.index_document)
|
||||
|
||||
if static_website.error_document_404_path is not None:
|
||||
ETree.SubElement(static_website_element, 'ErrorDocument404Path').text = \
|
||||
str(static_website.error_document_404_path)
|
||||
|
||||
# Add xml declaration and serialize
|
||||
try:
|
||||
stream = BytesIO()
|
||||
ETree.ElementTree(service_properties_element).write(stream, xml_declaration=True, encoding='utf-8',
|
||||
method='xml')
|
||||
except:
|
||||
raise
|
||||
finally:
|
||||
output = stream.getvalue()
|
||||
stream.close()
|
||||
|
||||
return output
|
||||
|
||||
|
||||
def _convert_metrics_to_xml(metrics, root):
|
||||
'''
|
||||
<Version>version-number</Version>
|
||||
<Enabled>true|false</Enabled>
|
||||
<IncludeAPIs>true|false</IncludeAPIs>
|
||||
<RetentionPolicy>
|
||||
<Enabled>true|false</Enabled>
|
||||
<Days>number-of-days</Days>
|
||||
</RetentionPolicy>
|
||||
'''
|
||||
# Version
|
||||
ETree.SubElement(root, 'Version').text = metrics.version
|
||||
|
||||
# Enabled
|
||||
ETree.SubElement(root, 'Enabled').text = str(metrics.enabled)
|
||||
|
||||
# IncludeAPIs
|
||||
if metrics.enabled and metrics.include_apis is not None:
|
||||
ETree.SubElement(root, 'IncludeAPIs').text = str(metrics.include_apis)
|
||||
|
||||
# RetentionPolicy
|
||||
retention_element = ETree.SubElement(root, 'RetentionPolicy')
|
||||
_convert_retention_policy_to_xml(metrics.retention_policy, retention_element)
|
||||
|
||||
|
||||
def _convert_retention_policy_to_xml(retention_policy, root):
|
||||
'''
|
||||
<Enabled>true|false</Enabled>
|
||||
<Days>number-of-days</Days>
|
||||
'''
|
||||
# Enabled
|
||||
ETree.SubElement(root, 'Enabled').text = str(retention_policy.enabled)
|
||||
|
||||
# Days
|
||||
if retention_policy.enabled and retention_policy.days:
|
||||
ETree.SubElement(root, 'Days').text = str(retention_policy.days)
|
||||
|
||||
|
||||
def _len_plus(data):
|
||||
length = None
|
||||
# Check if object implements the __len__ method, covers most input cases such as bytearray.
|
||||
try:
|
||||
length = len(data)
|
||||
except:
|
||||
pass
|
||||
|
||||
if not length:
|
||||
# Check if the stream is a file-like stream object.
|
||||
# If so, calculate the size using the file descriptor.
|
||||
try:
|
||||
fileno = data.fileno()
|
||||
except (AttributeError, UnsupportedOperation):
|
||||
pass
|
||||
else:
|
||||
return fstat(fileno).st_size
|
||||
|
||||
# If the stream is seekable and tell() is implemented, calculate the stream size.
|
||||
try:
|
||||
current_position = data.tell()
|
||||
data.seek(0, SEEK_END)
|
||||
length = data.tell() - current_position
|
||||
data.seek(current_position, SEEK_SET)
|
||||
except (AttributeError, UnsupportedOperation):
|
||||
pass
|
||||
|
||||
return length
|
|
@ -0,0 +1,193 @@
|
|||
# -------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for
|
||||
# license information.
|
||||
# --------------------------------------------------------------------------
|
||||
|
||||
from ._error import _validate_not_none
|
||||
from .models import (
|
||||
ResourceTypes,
|
||||
Services,
|
||||
AccountPermissions,
|
||||
)
|
||||
from .sharedaccesssignature import (
|
||||
SharedAccessSignature,
|
||||
)
|
||||
|
||||
|
||||
class CloudStorageAccount(object):
|
||||
"""
|
||||
Provides a factory for creating the blob, queue, and file services
|
||||
with a common account name and account key or sas token. Users can either
|
||||
use the factory or can construct the appropriate service directly.
|
||||
"""
|
||||
|
||||
def __init__(self, account_name=None, account_key=None, sas_token=None,
|
||||
is_emulated=None, endpoint_suffix=None):
|
||||
'''
|
||||
:param str account_name:
|
||||
The storage account name. This is used to authenticate requests
|
||||
signed with an account key and to construct the storage endpoint. It
|
||||
is required unless is_emulated is used.
|
||||
:param str account_key:
|
||||
The storage account key. This is used for shared key authentication.
|
||||
:param str sas_token:
|
||||
A shared access signature token to use to authenticate requests
|
||||
instead of the account key. If account key and sas token are both
|
||||
specified, account key will be used to sign.
|
||||
:param bool is_emulated:
|
||||
Whether to use the emulator. Defaults to False. If specified, will
|
||||
override all other parameters.
|
||||
:param str endpoint_suffix:
|
||||
The host base component of the url, minus the account name. Defaults
|
||||
to Azure (core.windows.net). Override this to use a sovereign cloud.
|
||||
'''
|
||||
self.account_name = account_name
|
||||
self.account_key = account_key
|
||||
self.sas_token = sas_token
|
||||
self.is_emulated = is_emulated
|
||||
self.endpoint_suffix = endpoint_suffix
|
||||
|
||||
def create_block_blob_service(self):
|
||||
'''
|
||||
Creates a BlockBlobService object with the settings specified in the
|
||||
CloudStorageAccount.
|
||||
|
||||
:return: A service object.
|
||||
:rtype: :class:`~azure.storage.blob.blockblobservice.BlockBlobService`
|
||||
'''
|
||||
try:
|
||||
from azure.storage.blob.blockblobservice import BlockBlobService
|
||||
return BlockBlobService(self.account_name, self.account_key,
|
||||
sas_token=self.sas_token,
|
||||
is_emulated=self.is_emulated,
|
||||
endpoint_suffix=self.endpoint_suffix)
|
||||
except ImportError:
|
||||
raise Exception('The package azure-storage-blob is required. '
|
||||
+ 'Please install it using "pip install azure-storage-blob"')
|
||||
|
||||
def create_page_blob_service(self):
|
||||
'''
|
||||
Creates a PageBlobService object with the settings specified in the
|
||||
CloudStorageAccount.
|
||||
|
||||
:return: A service object.
|
||||
:rtype: :class:`~azure.storage.blob.pageblobservice.PageBlobService`
|
||||
'''
|
||||
try:
|
||||
from azure.storage.blob.pageblobservice import PageBlobService
|
||||
return PageBlobService(self.account_name, self.account_key,
|
||||
sas_token=self.sas_token,
|
||||
is_emulated=self.is_emulated,
|
||||
endpoint_suffix=self.endpoint_suffix)
|
||||
except ImportError:
|
||||
raise Exception('The package azure-storage-blob is required. '
|
||||
+ 'Please install it using "pip install azure-storage-blob"')
|
||||
|
||||
def create_append_blob_service(self):
|
||||
'''
|
||||
Creates a AppendBlobService object with the settings specified in the
|
||||
CloudStorageAccount.
|
||||
|
||||
:return: A service object.
|
||||
:rtype: :class:`~azure.storage.blob.appendblobservice.AppendBlobService`
|
||||
'''
|
||||
try:
|
||||
from azure.storage.blob.appendblobservice import AppendBlobService
|
||||
return AppendBlobService(self.account_name, self.account_key,
|
||||
sas_token=self.sas_token,
|
||||
is_emulated=self.is_emulated,
|
||||
endpoint_suffix=self.endpoint_suffix)
|
||||
except ImportError:
|
||||
raise Exception('The package azure-storage-blob is required. '
|
||||
+ 'Please install it using "pip install azure-storage-blob"')
|
||||
|
||||
def create_queue_service(self):
|
||||
'''
|
||||
Creates a QueueService object with the settings specified in the
|
||||
CloudStorageAccount.
|
||||
|
||||
:return: A service object.
|
||||
:rtype: :class:`~azure.storage.queue.queueservice.QueueService`
|
||||
'''
|
||||
try:
|
||||
from azure.storage.queue.queueservice import QueueService
|
||||
return QueueService(self.account_name, self.account_key,
|
||||
sas_token=self.sas_token,
|
||||
is_emulated=self.is_emulated,
|
||||
endpoint_suffix=self.endpoint_suffix)
|
||||
except ImportError:
|
||||
raise Exception('The package azure-storage-queue is required. '
|
||||
+ 'Please install it using "pip install azure-storage-queue"')
|
||||
|
||||
def create_file_service(self):
|
||||
'''
|
||||
Creates a FileService object with the settings specified in the
|
||||
CloudStorageAccount.
|
||||
|
||||
:return: A service object.
|
||||
:rtype: :class:`~azure.storage.file.fileservice.FileService`
|
||||
'''
|
||||
try:
|
||||
from azure.storage.file.fileservice import FileService
|
||||
return FileService(self.account_name, self.account_key,
|
||||
sas_token=self.sas_token,
|
||||
endpoint_suffix=self.endpoint_suffix)
|
||||
except ImportError:
|
||||
raise Exception('The package azure-storage-file is required. '
|
||||
+ 'Please install it using "pip install azure-storage-file"')
|
||||
|
||||
def generate_shared_access_signature(self, services, resource_types,
|
||||
permission, expiry, start=None,
|
||||
ip=None, protocol=None):
|
||||
'''
|
||||
Generates a shared access signature for the account.
|
||||
Use the returned signature with the sas_token parameter of the service
|
||||
or to create a new account object.
|
||||
|
||||
:param Services services:
|
||||
Specifies the services accessible with the account SAS. You can
|
||||
combine values to provide access to more than one service.
|
||||
:param ResourceTypes resource_types:
|
||||
Specifies the resource types that are accessible with the account
|
||||
SAS. You can combine values to provide access to more than one
|
||||
resource type.
|
||||
:param AccountPermissions permission:
|
||||
The permissions associated with the shared access signature. The
|
||||
user is restricted to operations allowed by the permissions.
|
||||
Required unless an id is given referencing a stored access policy
|
||||
which contains this field. This field must be omitted if it has been
|
||||
specified in an associated stored access policy. You can combine
|
||||
values to provide more than one permission.
|
||||
:param expiry:
|
||||
The time at which the shared access signature becomes invalid.
|
||||
Required unless an id is given referencing a stored access policy
|
||||
which contains this field. This field must be omitted if it has
|
||||
been specified in an associated stored access policy. Azure will always
|
||||
convert values to UTC. If a date is passed in without timezone info, it
|
||||
is assumed to be UTC.
|
||||
:type expiry: datetime or str
|
||||
:param start:
|
||||
The time at which the shared access signature becomes valid. If
|
||||
omitted, start time for this call is assumed to be the time when the
|
||||
storage service receives the request. Azure will always convert values
|
||||
to UTC. If a date is passed in without timezone info, it is assumed to
|
||||
be UTC.
|
||||
:type start: datetime or str
|
||||
:param str ip:
|
||||
Specifies an IP address or a range of IP addresses from which to accept requests.
|
||||
If the IP address from which the request originates does not match the IP address
|
||||
or address range specified on the SAS token, the request is not authenticated.
|
||||
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
|
||||
restricts the request to those IP addresses.
|
||||
:param str protocol:
|
||||
Specifies the protocol permitted for a request made. Possible values are
|
||||
both HTTPS and HTTP (https,http) or HTTPS only (https). The default value
|
||||
is https,http. Note that HTTP only is not a permitted value.
|
||||
'''
|
||||
_validate_not_none('self.account_name', self.account_name)
|
||||
_validate_not_none('self.account_key', self.account_key)
|
||||
|
||||
sas = SharedAccessSignature(self.account_name, self.account_key)
|
||||
return sas.generate_account(services, resource_types, permission,
|
||||
expiry, start=start, ip=ip, protocol=protocol)
|
|
@ -0,0 +1,672 @@
|
|||
# -------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for
|
||||
# license information.
|
||||
# --------------------------------------------------------------------------
|
||||
import sys
|
||||
|
||||
if sys.version_info < (3,):
|
||||
from collections import Iterable
|
||||
|
||||
_unicode_type = unicode
|
||||
else:
|
||||
from collections.abc import Iterable
|
||||
|
||||
_unicode_type = str
|
||||
|
||||
from ._error import (
|
||||
_validate_not_none
|
||||
)
|
||||
|
||||
|
||||
class _HeaderDict(dict):
|
||||
def __getitem__(self, index):
|
||||
return super(_HeaderDict, self).__getitem__(index.lower())
|
||||
|
||||
|
||||
class _list(list):
|
||||
'''Used so that additional properties can be set on the return list'''
|
||||
pass
|
||||
|
||||
|
||||
class _dict(dict):
|
||||
'''Used so that additional properties can be set on the return dictionary'''
|
||||
pass
|
||||
|
||||
|
||||
class _OperationContext(object):
|
||||
'''
|
||||
Contains information that lasts the lifetime of an operation. This operation
|
||||
may span multiple calls to the Azure service.
|
||||
|
||||
:ivar bool location_lock:
|
||||
Whether the location should be locked for this operation.
|
||||
:ivar str location:
|
||||
The location to lock to.
|
||||
'''
|
||||
|
||||
def __init__(self, location_lock=False):
|
||||
self.location_lock = location_lock
|
||||
self.host_location = None
|
||||
|
||||
|
||||
class ListGenerator(Iterable):
|
||||
'''
|
||||
A generator object used to list storage resources. The generator will lazily
|
||||
follow the continuation tokens returned by the service and stop when all
|
||||
resources have been returned or max_results is reached.
|
||||
|
||||
If max_results is specified and the account has more than that number of
|
||||
resources, the generator will have a populated next_marker field once it
|
||||
finishes. This marker can be used to create a new generator if more
|
||||
results are desired.
|
||||
'''
|
||||
|
||||
def __init__(self, resources, list_method, list_args, list_kwargs):
|
||||
self.items = resources
|
||||
self.next_marker = resources.next_marker
|
||||
|
||||
self._list_method = list_method
|
||||
self._list_args = list_args
|
||||
self._list_kwargs = list_kwargs
|
||||
|
||||
def __iter__(self):
|
||||
# return results
|
||||
for i in self.items:
|
||||
yield i
|
||||
|
||||
while True:
|
||||
# if no more results on the service, return
|
||||
if not self.next_marker:
|
||||
break
|
||||
|
||||
# update the marker args
|
||||
self._list_kwargs['marker'] = self.next_marker
|
||||
|
||||
# handle max results, if present
|
||||
max_results = self._list_kwargs.get('max_results')
|
||||
if max_results is not None:
|
||||
max_results = max_results - len(self.items)
|
||||
|
||||
# if we've reached max_results, return
|
||||
# else, update the max_results arg
|
||||
if max_results <= 0:
|
||||
break
|
||||
else:
|
||||
self._list_kwargs['max_results'] = max_results
|
||||
|
||||
# get the next segment
|
||||
resources = self._list_method(*self._list_args, **self._list_kwargs)
|
||||
self.items = resources
|
||||
self.next_marker = resources.next_marker
|
||||
|
||||
# return results
|
||||
for i in self.items:
|
||||
yield i
|
||||
|
||||
|
||||
class RetryContext(object):
|
||||
'''
|
||||
Contains the request and response information that can be used to determine
|
||||
whether and how to retry. This context is stored across retries and may be
|
||||
used to store other information relevant to the retry strategy.
|
||||
|
||||
:ivar ~azure.cosmosdb.common._http.HTTPRequest request:
|
||||
The request sent to the storage service.
|
||||
:ivar ~azure.cosmosdb.common._http.HTTPResponse response:
|
||||
The response returned by the storage service.
|
||||
:ivar LocationMode location_mode:
|
||||
The location the request was sent to.
|
||||
:ivar Exception exception:
|
||||
The exception that just occurred. The type could either be AzureException (for HTTP errors),
|
||||
or other Exception types from lower layers, which are kept unwrapped for easier processing.
|
||||
:ivar bool is_emulated:
|
||||
Whether retry is targeting the emulator. The default value is False.
|
||||
:ivar int body_position:
|
||||
The initial position of the body stream. It is useful when retries happen and we need to rewind the stream.
|
||||
'''
|
||||
|
||||
def __init__(self):
|
||||
self.request = None
|
||||
self.response = None
|
||||
self.location_mode = None
|
||||
self.exception = None
|
||||
self.is_emulated = False
|
||||
self.body_position = None
|
||||
|
||||
|
||||
class LocationMode(object):
|
||||
'''
|
||||
Specifies the location the request should be sent to. This mode only applies
|
||||
for RA-GRS accounts which allow secondary read access. All other account types
|
||||
must use PRIMARY.
|
||||
'''
|
||||
|
||||
PRIMARY = 'primary'
|
||||
''' Requests should be sent to the primary location. '''
|
||||
|
||||
SECONDARY = 'secondary'
|
||||
''' Requests should be sent to the secondary location, if possible. '''
|
||||
|
||||
|
||||
class RetentionPolicy(object):
|
||||
'''
|
||||
By default, Storage Analytics will not delete any logging or metrics data. Blobs
|
||||
will continue to be written until the shared 20TB limit is
|
||||
reached. Once the 20TB limit is reached, Storage Analytics will stop writing
|
||||
new data and will not resume until free space is available. This 20TB limit
|
||||
is independent of the total limit for your storage account.
|
||||
|
||||
There are two ways to delete Storage Analytics data: by manually making deletion
|
||||
requests or by setting a data retention policy. Manual requests to delete Storage
|
||||
Analytics data are billable, but delete requests resulting from a retention policy
|
||||
are not billable.
|
||||
'''
|
||||
|
||||
def __init__(self, enabled=False, days=None):
|
||||
'''
|
||||
:param bool enabled:
|
||||
Indicates whether a retention policy is enabled for the
|
||||
storage service. If disabled, logging and metrics data will be retained
|
||||
infinitely by the service unless explicitly deleted.
|
||||
:param int days:
|
||||
Required if enabled is true. Indicates the number of
|
||||
days that metrics or logging data should be retained. All data older
|
||||
than this value will be deleted. The minimum value you can specify is 1;
|
||||
the largest value is 365 (one year).
|
||||
'''
|
||||
_validate_not_none("enabled", enabled)
|
||||
if enabled:
|
||||
_validate_not_none("days", days)
|
||||
|
||||
self.enabled = enabled
|
||||
self.days = days
|
||||
|
||||
|
||||
class Logging(object):
|
||||
'''
|
||||
Storage Analytics logs detailed information about successful and failed requests
|
||||
to a storage service. This information can be used to monitor individual requests
|
||||
and to diagnose issues with a storage service. Requests are logged on a best-effort
|
||||
basis.
|
||||
|
||||
All logs are stored in block blobs in a container named $logs, which is
|
||||
automatically created when Storage Analytics is enabled for a storage account.
|
||||
The $logs container is located in the blob namespace of the storage account.
|
||||
This container cannot be deleted once Storage Analytics has been enabled, though
|
||||
its contents can be deleted.
|
||||
|
||||
For more information, see https://msdn.microsoft.com/en-us/library/azure/hh343262.aspx
|
||||
'''
|
||||
|
||||
def __init__(self, delete=False, read=False, write=False,
|
||||
retention_policy=None):
|
||||
'''
|
||||
:param bool delete:
|
||||
Indicates whether all delete requests should be logged.
|
||||
:param bool read:
|
||||
Indicates whether all read requests should be logged.
|
||||
:param bool write:
|
||||
Indicates whether all write requests should be logged.
|
||||
:param RetentionPolicy retention_policy:
|
||||
The retention policy for the metrics.
|
||||
'''
|
||||
_validate_not_none("read", read)
|
||||
_validate_not_none("write", write)
|
||||
_validate_not_none("delete", delete)
|
||||
|
||||
self.version = u'1.0'
|
||||
self.delete = delete
|
||||
self.read = read
|
||||
self.write = write
|
||||
self.retention_policy = retention_policy if retention_policy else RetentionPolicy()
|
||||
|
||||
|
||||
class Metrics(object):
|
||||
'''
|
||||
Metrics include aggregated transaction statistics and capacity data about requests
|
||||
to a storage service. Transactions are reported at both the API operation level
|
||||
as well as at the storage service level, and capacity is reported at the storage
|
||||
service level. Metrics data can be used to analyze storage service usage, diagnose
|
||||
issues with requests made against the storage service, and to improve the
|
||||
performance of applications that use a service.
|
||||
|
||||
For more information, see https://msdn.microsoft.com/en-us/library/azure/hh343258.aspx
|
||||
'''
|
||||
|
||||
def __init__(self, enabled=False, include_apis=None,
|
||||
retention_policy=None):
|
||||
'''
|
||||
:param bool enabled:
|
||||
Indicates whether metrics are enabled for
|
||||
the service.
|
||||
:param bool include_apis:
|
||||
Required if enabled is True. Indicates whether metrics
|
||||
should generate summary statistics for called API operations.
|
||||
:param RetentionPolicy retention_policy:
|
||||
The retention policy for the metrics.
|
||||
'''
|
||||
_validate_not_none("enabled", enabled)
|
||||
if enabled:
|
||||
_validate_not_none("include_apis", include_apis)
|
||||
|
||||
self.version = u'1.0'
|
||||
self.enabled = enabled
|
||||
self.include_apis = include_apis
|
||||
self.retention_policy = retention_policy if retention_policy else RetentionPolicy()
|
||||
|
||||
|
||||
class CorsRule(object):
|
||||
'''
|
||||
CORS is an HTTP feature that enables a web application running under one domain
|
||||
to access resources in another domain. Web browsers implement a security
|
||||
restriction known as same-origin policy that prevents a web page from calling
|
||||
APIs in a different domain; CORS provides a secure way to allow one domain
|
||||
(the origin domain) to call APIs in another domain.
|
||||
|
||||
For more information, see https://msdn.microsoft.com/en-us/library/azure/dn535601.aspx
|
||||
'''
|
||||
|
||||
def __init__(self, allowed_origins, allowed_methods, max_age_in_seconds=0,
|
||||
exposed_headers=None, allowed_headers=None):
|
||||
'''
|
||||
:param allowed_origins:
|
||||
A list of origin domains that will be allowed via CORS, or "*" to allow
|
||||
all domains. The list of must contain at least one entry. Limited to 64
|
||||
origin domains. Each allowed origin can have up to 256 characters.
|
||||
:type allowed_origins: list(str)
|
||||
:param allowed_methods:
|
||||
A list of HTTP methods that are allowed to be executed by the origin.
|
||||
The list of must contain at least one entry. For Azure Storage,
|
||||
permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT.
|
||||
:type allowed_methods: list(str)
|
||||
:param int max_age_in_seconds:
|
||||
The number of seconds that the client/browser should cache a
|
||||
preflight response.
|
||||
:param exposed_headers:
|
||||
Defaults to an empty list. A list of response headers to expose to CORS
|
||||
clients. Limited to 64 defined headers and two prefixed headers. Each
|
||||
header can be up to 256 characters.
|
||||
:type exposed_headers: list(str)
|
||||
:param allowed_headers:
|
||||
Defaults to an empty list. A list of headers allowed to be part of
|
||||
the cross-origin request. Limited to 64 defined headers and 2 prefixed
|
||||
headers. Each header can be up to 256 characters.
|
||||
:type allowed_headers: list(str)
|
||||
'''
|
||||
_validate_not_none("allowed_origins", allowed_origins)
|
||||
_validate_not_none("allowed_methods", allowed_methods)
|
||||
_validate_not_none("max_age_in_seconds", max_age_in_seconds)
|
||||
|
||||
self.allowed_origins = allowed_origins if allowed_origins else list()
|
||||
self.allowed_methods = allowed_methods if allowed_methods else list()
|
||||
self.max_age_in_seconds = max_age_in_seconds
|
||||
self.exposed_headers = exposed_headers if exposed_headers else list()
|
||||
self.allowed_headers = allowed_headers if allowed_headers else list()
|
||||
|
||||
|
||||
class DeleteRetentionPolicy(object):
|
||||
'''
|
||||
To set DeleteRetentionPolicy, you must call Set Blob Service Properties using version 2017-07-29 or later.
|
||||
This class groups the settings related to delete retention policy.
|
||||
'''
|
||||
|
||||
def __init__(self, enabled=False, days=None):
|
||||
'''
|
||||
:param bool enabled:
|
||||
Required. Indicates whether a deleted blob or snapshot is retained or immediately removed by delete operation.
|
||||
:param int days:
|
||||
Required only if Enabled is true. Indicates the number of days that deleted blob be retained.
|
||||
All data older than this value will be permanently deleted.
|
||||
The minimum value you can specify is 1; the largest value is 365.
|
||||
'''
|
||||
_validate_not_none("enabled", enabled)
|
||||
if enabled:
|
||||
_validate_not_none("days", days)
|
||||
|
||||
self.enabled = enabled
|
||||
self.days = days
|
||||
|
||||
|
||||
class StaticWebsite(object):
|
||||
'''
|
||||
Class representing the service properties pertaining to static websites.
|
||||
To set StaticWebsite, you must call Set Blob Service Properties using version 2018-03-28 or later.
|
||||
'''
|
||||
|
||||
def __init__(self, enabled=False, index_document=None, error_document_404_path=None):
|
||||
'''
|
||||
:param bool enabled:
|
||||
Required. True if static websites should be enabled on the blob service for the corresponding Storage Account.
|
||||
:param str index_document:
|
||||
Represents the name of the index document. This is commonly "index.html".
|
||||
:param str error_document_404_path:
|
||||
Represents the path to the error document that should be shown when an error 404 is issued,
|
||||
in other words, when a browser requests a page that does not exist.
|
||||
'''
|
||||
_validate_not_none("enabled", enabled)
|
||||
|
||||
self.enabled = enabled
|
||||
self.index_document = index_document
|
||||
self.error_document_404_path = error_document_404_path
|
||||
|
||||
|
||||
class ServiceProperties(object):
|
||||
'''
|
||||
Returned by get_*_service_properties functions. Contains the properties of a
|
||||
storage service, including Analytics and CORS rules.
|
||||
|
||||
Azure Storage Analytics performs logging and provides metrics data for a storage
|
||||
account. You can use this data to trace requests, analyze usage trends, and
|
||||
diagnose issues with your storage account. To use Storage Analytics, you must
|
||||
enable it individually for each service you want to monitor.
|
||||
|
||||
The aggregated data is stored in a well-known blob (for logging) and in well-known
|
||||
tables (for metrics), which may be accessed using the Blob service and Table
|
||||
service APIs.
|
||||
|
||||
For an in-depth guide on using Storage Analytics and other tools to identify,
|
||||
diagnose, and troubleshoot Azure Storage-related issues, see
|
||||
http://azure.microsoft.com/documentation/articles/storage-monitoring-diagnosing-troubleshooting/
|
||||
|
||||
For more information on CORS, see https://msdn.microsoft.com/en-us/library/azure/dn535601.aspx
|
||||
'''
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class ServiceStats(object):
|
||||
'''
|
||||
Returned by get_*_service_stats functions. Contains statistics related to
|
||||
replication for the given service. It is only available when read-access
|
||||
geo-redundant replication is enabled for the storage account.
|
||||
|
||||
:ivar GeoReplication geo_replication:
|
||||
An object containing statistics related to replication for the given service.
|
||||
'''
|
||||
pass
|
||||
|
||||
|
||||
class GeoReplication(object):
|
||||
'''
|
||||
Contains statistics related to replication for the given service.
|
||||
|
||||
:ivar str status:
|
||||
The status of the secondary location. Possible values are:
|
||||
live: Indicates that the secondary location is active and operational.
|
||||
bootstrap: Indicates initial synchronization from the primary location
|
||||
to the secondary location is in progress. This typically occurs
|
||||
when replication is first enabled.
|
||||
unavailable: Indicates that the secondary location is temporarily
|
||||
unavailable.
|
||||
:ivar date last_sync_time:
|
||||
A GMT date value, to the second. All primary writes preceding this value
|
||||
are guaranteed to be available for read operations at the secondary.
|
||||
Primary writes after this point in time may or may not be available for
|
||||
reads. The value may be empty if LastSyncTime is not available. This can
|
||||
happen if the replication status is bootstrap or unavailable. Although
|
||||
geo-replication is continuously enabled, the LastSyncTime result may
|
||||
reflect a cached value from the service that is refreshed every few minutes.
|
||||
'''
|
||||
pass
|
||||
|
||||
|
||||
class AccessPolicy(object):
|
||||
'''
|
||||
Access Policy class used by the set and get acl methods in each service.
|
||||
|
||||
A stored access policy can specify the start time, expiry time, and
|
||||
permissions for the Shared Access Signatures with which it's associated.
|
||||
Depending on how you want to control access to your resource, you can
|
||||
specify all of these parameters within the stored access policy, and omit
|
||||
them from the URL for the Shared Access Signature. Doing so permits you to
|
||||
modify the associated signature's behavior at any time, as well as to revoke
|
||||
it. Or you can specify one or more of the access policy parameters within
|
||||
the stored access policy, and the others on the URL. Finally, you can
|
||||
specify all of the parameters on the URL. In this case, you can use the
|
||||
stored access policy to revoke the signature, but not to modify its behavior.
|
||||
|
||||
Together the Shared Access Signature and the stored access policy must
|
||||
include all fields required to authenticate the signature. If any required
|
||||
fields are missing, the request will fail. Likewise, if a field is specified
|
||||
both in the Shared Access Signature URL and in the stored access policy, the
|
||||
request will fail with status code 400 (Bad Request).
|
||||
'''
|
||||
|
||||
def __init__(self, permission=None, expiry=None, start=None):
|
||||
'''
|
||||
:param str permission:
|
||||
The permissions associated with the shared access signature. The
|
||||
user is restricted to operations allowed by the permissions.
|
||||
Required unless an id is given referencing a stored access policy
|
||||
which contains this field. This field must be omitted if it has been
|
||||
specified in an associated stored access policy.
|
||||
:param expiry:
|
||||
The time at which the shared access signature becomes invalid.
|
||||
Required unless an id is given referencing a stored access policy
|
||||
which contains this field. This field must be omitted if it has
|
||||
been specified in an associated stored access policy. Azure will always
|
||||
convert values to UTC. If a date is passed in without timezone info, it
|
||||
is assumed to be UTC.
|
||||
:type expiry: datetime or str
|
||||
:param start:
|
||||
The time at which the shared access signature becomes valid. If
|
||||
omitted, start time for this call is assumed to be the time when the
|
||||
storage service receives the request. Azure will always convert values
|
||||
to UTC. If a date is passed in without timezone info, it is assumed to
|
||||
be UTC.
|
||||
:type start: datetime or str
|
||||
'''
|
||||
self.start = start
|
||||
self.expiry = expiry
|
||||
self.permission = permission
|
||||
|
||||
|
||||
class Protocol(object):
|
||||
'''
|
||||
Specifies the protocol permitted for a SAS token. Note that HTTP only is
|
||||
not allowed.
|
||||
'''
|
||||
|
||||
HTTPS = 'https'
|
||||
''' Allow HTTPS requests only. '''
|
||||
|
||||
HTTPS_HTTP = 'https,http'
|
||||
''' Allow HTTP and HTTPS requests. '''
|
||||
|
||||
|
||||
class ResourceTypes(object):
|
||||
'''
|
||||
Specifies the resource types that are accessible with the account SAS.
|
||||
|
||||
:ivar ResourceTypes ResourceTypes.CONTAINER:
|
||||
Access to container-level APIs (e.g., Create/Delete Container,
|
||||
Create/Delete Queue, Create/Delete Share,
|
||||
List Blobs/Files and Directories)
|
||||
:ivar ResourceTypes ResourceTypes.OBJECT:
|
||||
Access to object-level APIs for blobs, queue messages, and
|
||||
files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.)
|
||||
:ivar ResourceTypes ResourceTypes.SERVICE:
|
||||
Access to service-level APIs (e.g., Get/Set Service Properties,
|
||||
Get Service Stats, List Containers/Queues/Shares)
|
||||
'''
|
||||
|
||||
def __init__(self, service=False, container=False, object=False, _str=None):
|
||||
'''
|
||||
:param bool service:
|
||||
Access to service-level APIs (e.g., Get/Set Service Properties,
|
||||
Get Service Stats, List Containers/Queues/Shares)
|
||||
:param bool container:
|
||||
Access to container-level APIs (e.g., Create/Delete Container,
|
||||
Create/Delete Queue, Create/Delete Share,
|
||||
List Blobs/Files and Directories)
|
||||
:param bool object:
|
||||
Access to object-level APIs for blobs, queue messages, and
|
||||
files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.)
|
||||
:param str _str:
|
||||
A string representing the resource types.
|
||||
'''
|
||||
if not _str:
|
||||
_str = ''
|
||||
self.service = service or ('s' in _str)
|
||||
self.container = container or ('c' in _str)
|
||||
self.object = object or ('o' in _str)
|
||||
|
||||
def __or__(self, other):
|
||||
return ResourceTypes(_str=str(self) + str(other))
|
||||
|
||||
def __add__(self, other):
|
||||
return ResourceTypes(_str=str(self) + str(other))
|
||||
|
||||
def __str__(self):
|
||||
return (('s' if self.service else '') +
|
||||
('c' if self.container else '') +
|
||||
('o' if self.object else ''))
|
||||
|
||||
|
||||
ResourceTypes.SERVICE = ResourceTypes(service=True)
|
||||
ResourceTypes.CONTAINER = ResourceTypes(container=True)
|
||||
ResourceTypes.OBJECT = ResourceTypes(object=True)
|
||||
|
||||
|
||||
class Services(object):
|
||||
'''
|
||||
Specifies the services accessible with the account SAS.
|
||||
|
||||
:ivar Services Services.BLOB: The blob service.
|
||||
:ivar Services Services.FILE: The file service
|
||||
:ivar Services Services.QUEUE: The queue service.
|
||||
:ivar Services Services.TABLE: The table service.
|
||||
'''
|
||||
|
||||
def __init__(self, blob=False, queue=False, file=False, table=False, _str=None):
|
||||
'''
|
||||
:param bool blob:
|
||||
Access to any blob service, for example, the `.BlockBlobService`
|
||||
:param bool queue:
|
||||
Access to the `.QueueService`
|
||||
:param bool file:
|
||||
Access to the `.FileService`
|
||||
:param bool table:
|
||||
Access to the TableService
|
||||
:param str _str:
|
||||
A string representing the services.
|
||||
'''
|
||||
if not _str:
|
||||
_str = ''
|
||||
self.blob = blob or ('b' in _str)
|
||||
self.queue = queue or ('q' in _str)
|
||||
self.file = file or ('f' in _str)
|
||||
self.table = table or ('t' in _str)
|
||||
|
||||
def __or__(self, other):
|
||||
return Services(_str=str(self) + str(other))
|
||||
|
||||
def __add__(self, other):
|
||||
return Services(_str=str(self) + str(other))
|
||||
|
||||
def __str__(self):
|
||||
return (('b' if self.blob else '') +
|
||||
('q' if self.queue else '') +
|
||||
('t' if self.table else '') +
|
||||
('f' if self.file else ''))
|
||||
|
||||
|
||||
Services.BLOB = Services(blob=True)
|
||||
Services.QUEUE = Services(queue=True)
|
||||
Services.TABLE = Services(table=True)
|
||||
Services.FILE = Services(file=True)
|
||||
|
||||
|
||||
class AccountPermissions(object):
|
||||
'''
|
||||
:class:`~ResourceTypes` class to be used with generate_shared_access_signature
|
||||
method and for the AccessPolicies used with set_*_acl. There are two types of
|
||||
SAS which may be used to grant resource access. One is to grant access to a
|
||||
specific resource (resource-specific). Another is to grant access to the
|
||||
entire service for a specific account and allow certain operations based on
|
||||
perms found here.
|
||||
|
||||
:ivar AccountPermissions AccountPermissions.ADD:
|
||||
Valid for the following Object resource types only: queue messages and append blobs.
|
||||
:ivar AccountPermissions AccountPermissions.CREATE:
|
||||
Valid for the following Object resource types only: blobs and files. Users
|
||||
can create new blobs or files, but may not overwrite existing blobs or files.
|
||||
:ivar AccountPermissions AccountPermissions.DELETE:
|
||||
Valid for Container and Object resource types, except for queue messages.
|
||||
:ivar AccountPermissions AccountPermissions.LIST:
|
||||
Valid for Service and Container resource types only.
|
||||
:ivar AccountPermissions AccountPermissions.PROCESS:
|
||||
Valid for the following Object resource type only: queue messages.
|
||||
:ivar AccountPermissions AccountPermissions.READ:
|
||||
Valid for all signed resources types (Service, Container, and Object).
|
||||
Permits read permissions to the specified resource type.
|
||||
:ivar AccountPermissions AccountPermissions.UPDATE:
|
||||
Valid for the following Object resource types only: queue messages.
|
||||
:ivar AccountPermissions AccountPermissions.WRITE:
|
||||
Valid for all signed resources types (Service, Container, and Object).
|
||||
Permits write permissions to the specified resource type.
|
||||
'''
|
||||
|
||||
def __init__(self, read=False, write=False, delete=False, list=False,
|
||||
add=False, create=False, update=False, process=False, _str=None):
|
||||
'''
|
||||
:param bool read:
|
||||
Valid for all signed resources types (Service, Container, and Object).
|
||||
Permits read permissions to the specified resource type.
|
||||
:param bool write:
|
||||
Valid for all signed resources types (Service, Container, and Object).
|
||||
Permits write permissions to the specified resource type.
|
||||
:param bool delete:
|
||||
Valid for Container and Object resource types, except for queue messages.
|
||||
:param bool list:
|
||||
Valid for Service and Container resource types only.
|
||||
:param bool add:
|
||||
Valid for the following Object resource types only: queue messages, and append blobs.
|
||||
:param bool create:
|
||||
Valid for the following Object resource types only: blobs and files.
|
||||
Users can create new blobs or files, but may not overwrite existing
|
||||
blobs or files.
|
||||
:param bool update:
|
||||
Valid for the following Object resource types only: queue messages.
|
||||
:param bool process:
|
||||
Valid for the following Object resource type only: queue messages.
|
||||
:param str _str:
|
||||
A string representing the permissions.
|
||||
'''
|
||||
if not _str:
|
||||
_str = ''
|
||||
self.read = read or ('r' in _str)
|
||||
self.write = write or ('w' in _str)
|
||||
self.delete = delete or ('d' in _str)
|
||||
self.list = list or ('l' in _str)
|
||||
self.add = add or ('a' in _str)
|
||||
self.create = create or ('c' in _str)
|
||||
self.update = update or ('u' in _str)
|
||||
self.process = process or ('p' in _str)
|
||||
|
||||
def __or__(self, other):
|
||||
return AccountPermissions(_str=str(self) + str(other))
|
||||
|
||||
def __add__(self, other):
|
||||
return AccountPermissions(_str=str(self) + str(other))
|
||||
|
||||
def __str__(self):
|
||||
return (('r' if self.read else '') +
|
||||
('w' if self.write else '') +
|
||||
('d' if self.delete else '') +
|
||||
('l' if self.list else '') +
|
||||
('a' if self.add else '') +
|
||||
('c' if self.create else '') +
|
||||
('u' if self.update else '') +
|
||||
('p' if self.process else ''))
|
||||
|
||||
|
||||
AccountPermissions.READ = AccountPermissions(read=True)
|
||||
AccountPermissions.WRITE = AccountPermissions(write=True)
|
||||
AccountPermissions.DELETE = AccountPermissions(delete=True)
|
||||
AccountPermissions.LIST = AccountPermissions(list=True)
|
||||
AccountPermissions.ADD = AccountPermissions(add=True)
|
||||
AccountPermissions.CREATE = AccountPermissions(create=True)
|
||||
AccountPermissions.UPDATE = AccountPermissions(update=True)
|
||||
AccountPermissions.PROCESS = AccountPermissions(process=True)
|
|
@ -0,0 +1,306 @@
|
|||
# -------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for
|
||||
# license information.
|
||||
# --------------------------------------------------------------------------
|
||||
from abc import ABCMeta
|
||||
from math import pow
|
||||
import random
|
||||
from io import (SEEK_SET, UnsupportedOperation)
|
||||
|
||||
from .models import LocationMode
|
||||
from ._constants import (
|
||||
DEV_ACCOUNT_NAME,
|
||||
DEV_ACCOUNT_SECONDARY_NAME
|
||||
)
|
||||
|
||||
|
||||
class _Retry(object):
|
||||
'''
|
||||
The base class for Exponential and Linear retries containing shared code.
|
||||
'''
|
||||
__metaclass__ = ABCMeta
|
||||
|
||||
def __init__(self, max_attempts, retry_to_secondary):
|
||||
'''
|
||||
Constructs a base retry object.
|
||||
|
||||
:param int max_attempts:
|
||||
The maximum number of retry attempts.
|
||||
:param bool retry_to_secondary:
|
||||
Whether the request should be retried to secondary, if able. This should
|
||||
only be enabled of RA-GRS accounts are used and potentially stale data
|
||||
can be handled.
|
||||
'''
|
||||
self.max_attempts = max_attempts
|
||||
self.retry_to_secondary = retry_to_secondary
|
||||
|
||||
def _should_retry(self, context):
|
||||
'''
|
||||
A function which determines whether or not to retry.
|
||||
|
||||
:param ~azure.storage.models.RetryContext context:
|
||||
The retry context. This contains the request, response, and other data
|
||||
which can be used to determine whether or not to retry.
|
||||
:return:
|
||||
A boolean indicating whether or not to retry the request.
|
||||
:rtype: bool
|
||||
'''
|
||||
# If max attempts are reached, do not retry.
|
||||
if context.count >= self.max_attempts:
|
||||
return False
|
||||
|
||||
status = None
|
||||
if context.response and context.response.status:
|
||||
status = context.response.status
|
||||
|
||||
if status is None:
|
||||
'''
|
||||
If status is None, retry as this request triggered an exception. For
|
||||
example, network issues would trigger this.
|
||||
'''
|
||||
return True
|
||||
elif 200 <= status < 300:
|
||||
'''
|
||||
This method is called after a successful response, meaning we failed
|
||||
during the response body download or parsing. So, success codes should
|
||||
be retried.
|
||||
'''
|
||||
return True
|
||||
elif 300 <= status < 500:
|
||||
'''
|
||||
An exception occured, but in most cases it was expected. Examples could
|
||||
include a 309 Conflict or 412 Precondition Failed.
|
||||
'''
|
||||
if status == 404 and context.location_mode == LocationMode.SECONDARY:
|
||||
# Response code 404 should be retried if secondary was used.
|
||||
return True
|
||||
if status == 408:
|
||||
# Response code 408 is a timeout and should be retried.
|
||||
return True
|
||||
return False
|
||||
elif status >= 500:
|
||||
'''
|
||||
Response codes above 500 with the exception of 501 Not Implemented and
|
||||
505 Version Not Supported indicate a server issue and should be retried.
|
||||
'''
|
||||
if status == 501 or status == 505:
|
||||
return False
|
||||
return True
|
||||
else:
|
||||
# If something else happened, it's unexpected. Retry.
|
||||
return True
|
||||
|
||||
def _set_next_host_location(self, context):
|
||||
'''
|
||||
A function which sets the next host location on the request, if applicable.
|
||||
|
||||
:param ~azure.storage.models.RetryContext context:
|
||||
The retry context containing the previous host location and the request
|
||||
to evaluate and possibly modify.
|
||||
'''
|
||||
if len(context.request.host_locations) > 1:
|
||||
# If there's more than one possible location, retry to the alternative
|
||||
if context.location_mode == LocationMode.PRIMARY:
|
||||
context.location_mode = LocationMode.SECONDARY
|
||||
|
||||
# if targeting the emulator (with path style), change path instead of host
|
||||
if context.is_emulated:
|
||||
# replace the first instance of primary account name with the secondary account name
|
||||
context.request.path = context.request.path.replace(DEV_ACCOUNT_NAME, DEV_ACCOUNT_SECONDARY_NAME, 1)
|
||||
else:
|
||||
context.request.host = context.request.host_locations.get(context.location_mode)
|
||||
else:
|
||||
context.location_mode = LocationMode.PRIMARY
|
||||
|
||||
# if targeting the emulator (with path style), change path instead of host
|
||||
if context.is_emulated:
|
||||
# replace the first instance of secondary account name with the primary account name
|
||||
context.request.path = context.request.path.replace(DEV_ACCOUNT_SECONDARY_NAME, DEV_ACCOUNT_NAME, 1)
|
||||
else:
|
||||
context.request.host = context.request.host_locations.get(context.location_mode)
|
||||
|
||||
def _retry(self, context, backoff):
|
||||
'''
|
||||
A function which determines whether and how to retry.
|
||||
|
||||
:param ~azure.storage.models.RetryContext context:
|
||||
The retry context. This contains the request, response, and other data
|
||||
which can be used to determine whether or not to retry.
|
||||
:param function() backoff:
|
||||
A function which returns the backoff time if a retry is to be performed.
|
||||
:return:
|
||||
An integer indicating how long to wait before retrying the request,
|
||||
or None to indicate no retry should be performed.
|
||||
:rtype: int or None
|
||||
'''
|
||||
# If the context does not contain a count parameter, this request has not
|
||||
# been retried yet. Add the count parameter to track the number of retries.
|
||||
if not hasattr(context, 'count'):
|
||||
context.count = 0
|
||||
|
||||
# Determine whether to retry, and if so increment the count, modify the
|
||||
# request as desired, and return the backoff.
|
||||
if self._should_retry(context):
|
||||
backoff_interval = backoff(context)
|
||||
context.count += 1
|
||||
|
||||
# If retry to secondary is enabled, attempt to change the host if the
|
||||
# request allows it
|
||||
if self.retry_to_secondary:
|
||||
self._set_next_host_location(context)
|
||||
|
||||
# rewind the request body if it is a stream
|
||||
if hasattr(context.request, 'body') and hasattr(context.request.body, 'read'):
|
||||
# no position was saved, then retry would not work
|
||||
if context.body_position is None:
|
||||
return None
|
||||
else:
|
||||
try:
|
||||
# attempt to rewind the body to the initial position
|
||||
context.request.body.seek(context.body_position, SEEK_SET)
|
||||
except UnsupportedOperation:
|
||||
# if body is not seekable, then retry would not work
|
||||
return None
|
||||
|
||||
return backoff_interval
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class ExponentialRetry(_Retry):
|
||||
'''
|
||||
Exponential retry.
|
||||
'''
|
||||
|
||||
def __init__(self, initial_backoff=15, increment_base=3, max_attempts=3,
|
||||
retry_to_secondary=False, random_jitter_range=3):
|
||||
'''
|
||||
Constructs an Exponential retry object. The initial_backoff is used for
|
||||
the first retry. Subsequent retries are retried after initial_backoff +
|
||||
increment_power^retry_count seconds. For example, by default the first retry
|
||||
occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the
|
||||
third after (15+3^2) = 24 seconds.
|
||||
|
||||
:param int initial_backoff:
|
||||
The initial backoff interval, in seconds, for the first retry.
|
||||
:param int increment_base:
|
||||
The base, in seconds, to increment the initial_backoff by after the
|
||||
first retry.
|
||||
:param int max_attempts:
|
||||
The maximum number of retry attempts.
|
||||
:param bool retry_to_secondary:
|
||||
Whether the request should be retried to secondary, if able. This should
|
||||
only be enabled of RA-GRS accounts are used and potentially stale data
|
||||
can be handled.
|
||||
:param int random_jitter_range:
|
||||
A number in seconds which indicates a range to jitter/randomize for the back-off interval.
|
||||
For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
|
||||
'''
|
||||
self.initial_backoff = initial_backoff
|
||||
self.increment_base = increment_base
|
||||
self.random_jitter_range = random_jitter_range
|
||||
super(ExponentialRetry, self).__init__(max_attempts, retry_to_secondary)
|
||||
|
||||
'''
|
||||
A function which determines whether and how to retry.
|
||||
|
||||
:param ~azure.storage.models.RetryContext context:
|
||||
The retry context. This contains the request, response, and other data
|
||||
which can be used to determine whether or not to retry.
|
||||
:return:
|
||||
An integer indicating how long to wait before retrying the request,
|
||||
or None to indicate no retry should be performed.
|
||||
:rtype: int or None
|
||||
'''
|
||||
|
||||
def retry(self, context):
|
||||
return self._retry(context, self._backoff)
|
||||
|
||||
'''
|
||||
Calculates how long to sleep before retrying.
|
||||
|
||||
:return:
|
||||
An integer indicating how long to wait before retrying the request,
|
||||
or None to indicate no retry should be performed.
|
||||
:rtype: int or None
|
||||
'''
|
||||
|
||||
def _backoff(self, context):
|
||||
random_generator = random.Random()
|
||||
backoff = self.initial_backoff + (0 if context.count == 0 else pow(self.increment_base, context.count))
|
||||
random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0
|
||||
random_range_end = backoff + self.random_jitter_range
|
||||
return random_generator.uniform(random_range_start, random_range_end)
|
||||
|
||||
|
||||
class LinearRetry(_Retry):
|
||||
'''
|
||||
Linear retry.
|
||||
'''
|
||||
|
||||
def __init__(self, backoff=15, max_attempts=3, retry_to_secondary=False, random_jitter_range=3):
|
||||
'''
|
||||
Constructs a Linear retry object.
|
||||
|
||||
:param int backoff:
|
||||
The backoff interval, in seconds, between retries.
|
||||
:param int max_attempts:
|
||||
The maximum number of retry attempts.
|
||||
:param bool retry_to_secondary:
|
||||
Whether the request should be retried to secondary, if able. This should
|
||||
only be enabled of RA-GRS accounts are used and potentially stale data
|
||||
can be handled.
|
||||
:param int random_jitter_range:
|
||||
A number in seconds which indicates a range to jitter/randomize for the back-off interval.
|
||||
For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
|
||||
'''
|
||||
self.backoff = backoff
|
||||
self.max_attempts = max_attempts
|
||||
self.random_jitter_range = random_jitter_range
|
||||
super(LinearRetry, self).__init__(max_attempts, retry_to_secondary)
|
||||
|
||||
'''
|
||||
A function which determines whether and how to retry.
|
||||
|
||||
:param ~azure.storage.models.RetryContext context:
|
||||
The retry context. This contains the request, response, and other data
|
||||
which can be used to determine whether or not to retry.
|
||||
:return:
|
||||
An integer indicating how long to wait before retrying the request,
|
||||
or None to indicate no retry should be performed.
|
||||
:rtype: int or None
|
||||
'''
|
||||
|
||||
def retry(self, context):
|
||||
return self._retry(context, self._backoff)
|
||||
|
||||
'''
|
||||
Calculates how long to sleep before retrying.
|
||||
|
||||
:return:
|
||||
An integer indicating how long to wait before retrying the request,
|
||||
or None to indicate no retry should be performed.
|
||||
:rtype: int or None
|
||||
'''
|
||||
|
||||
def _backoff(self, context):
|
||||
random_generator = random.Random()
|
||||
# the backoff interval normally does not change, however there is the possibility
|
||||
# that it was modified by accessing the property directly after initializing the object
|
||||
self.random_range_start = self.backoff - self.random_jitter_range if self.backoff > self.random_jitter_range else 0
|
||||
self.random_range_end = self.backoff + self.random_jitter_range
|
||||
return random_generator.uniform(self.random_range_start, self.random_range_end)
|
||||
|
||||
|
||||
def no_retry(context):
|
||||
'''
|
||||
Specifies never to retry.
|
||||
|
||||
:param ~azure.storage.models.RetryContext context:
|
||||
The retry context.
|
||||
:return:
|
||||
Always returns None to indicate never to retry.
|
||||
:rtype: None
|
||||
'''
|
||||
return None
|
|
@ -0,0 +1,217 @@
|
|||
# -------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for
|
||||
# license information.
|
||||
# --------------------------------------------------------------------------
|
||||
from datetime import date
|
||||
|
||||
from ._common_conversion import (
|
||||
_sign_string,
|
||||
_to_str,
|
||||
)
|
||||
from ._constants import DEFAULT_X_MS_VERSION
|
||||
from ._serialization import (
|
||||
url_quote,
|
||||
_to_utc_datetime,
|
||||
)
|
||||
|
||||
|
||||
class SharedAccessSignature(object):
|
||||
'''
|
||||
Provides a factory for creating account access
|
||||
signature tokens with an account name and account key. Users can either
|
||||
use the factory or can construct the appropriate service and use the
|
||||
generate_*_shared_access_signature method directly.
|
||||
'''
|
||||
|
||||
def __init__(self, account_name, account_key, x_ms_version=DEFAULT_X_MS_VERSION):
|
||||
'''
|
||||
:param str account_name:
|
||||
The storage account name used to generate the shared access signatures.
|
||||
:param str account_key:
|
||||
The access key to generate the shares access signatures.
|
||||
:param str x_ms_version:
|
||||
The service version used to generate the shared access signatures.
|
||||
'''
|
||||
self.account_name = account_name
|
||||
self.account_key = account_key
|
||||
self.x_ms_version = x_ms_version
|
||||
|
||||
def generate_account(self, services, resource_types, permission, expiry, start=None,
|
||||
ip=None, protocol=None):
|
||||
'''
|
||||
Generates a shared access signature for the account.
|
||||
Use the returned signature with the sas_token parameter of the service
|
||||
or to create a new account object.
|
||||
|
||||
:param Services services:
|
||||
Specifies the services accessible with the account SAS. You can
|
||||
combine values to provide access to more than one service.
|
||||
:param ResourceTypes resource_types:
|
||||
Specifies the resource types that are accessible with the account
|
||||
SAS. You can combine values to provide access to more than one
|
||||
resource type.
|
||||
:param AccountPermissions permission:
|
||||
The permissions associated with the shared access signature. The
|
||||
user is restricted to operations allowed by the permissions.
|
||||
Required unless an id is given referencing a stored access policy
|
||||
which contains this field. This field must be omitted if it has been
|
||||
specified in an associated stored access policy. You can combine
|
||||
values to provide more than one permission.
|
||||
:param expiry:
|
||||
The time at which the shared access signature becomes invalid.
|
||||
Required unless an id is given referencing a stored access policy
|
||||
which contains this field. This field must be omitted if it has
|
||||
been specified in an associated stored access policy. Azure will always
|
||||
convert values to UTC. If a date is passed in without timezone info, it
|
||||
is assumed to be UTC.
|
||||
:type expiry: datetime or str
|
||||
:param start:
|
||||
The time at which the shared access signature becomes valid. If
|
||||
omitted, start time for this call is assumed to be the time when the
|
||||
storage service receives the request. Azure will always convert values
|
||||
to UTC. If a date is passed in without timezone info, it is assumed to
|
||||
be UTC.
|
||||
:type start: datetime or str
|
||||
:param str ip:
|
||||
Specifies an IP address or a range of IP addresses from which to accept requests.
|
||||
If the IP address from which the request originates does not match the IP address
|
||||
or address range specified on the SAS token, the request is not authenticated.
|
||||
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
|
||||
restricts the request to those IP addresses.
|
||||
:param str protocol:
|
||||
Specifies the protocol permitted for a request made. The default value
|
||||
is https,http. See :class:`~azure.cosmosdb.common.models.Protocol` for possible values.
|
||||
'''
|
||||
sas = _SharedAccessHelper()
|
||||
sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
|
||||
sas.add_account(services, resource_types)
|
||||
sas.add_account_signature(self.account_name, self.account_key)
|
||||
|
||||
return sas.get_token()
|
||||
|
||||
|
||||
class _QueryStringConstants(object):
|
||||
SIGNED_SIGNATURE = 'sig'
|
||||
SIGNED_PERMISSION = 'sp'
|
||||
SIGNED_START = 'st'
|
||||
SIGNED_EXPIRY = 'se'
|
||||
SIGNED_RESOURCE = 'sr'
|
||||
SIGNED_IDENTIFIER = 'si'
|
||||
SIGNED_IP = 'sip'
|
||||
SIGNED_PROTOCOL = 'spr'
|
||||
SIGNED_VERSION = 'sv'
|
||||
SIGNED_CACHE_CONTROL = 'rscc'
|
||||
SIGNED_CONTENT_DISPOSITION = 'rscd'
|
||||
SIGNED_CONTENT_ENCODING = 'rsce'
|
||||
SIGNED_CONTENT_LANGUAGE = 'rscl'
|
||||
SIGNED_CONTENT_TYPE = 'rsct'
|
||||
START_PK = 'spk'
|
||||
START_RK = 'srk'
|
||||
END_PK = 'epk'
|
||||
END_RK = 'erk'
|
||||
SIGNED_RESOURCE_TYPES = 'srt'
|
||||
SIGNED_SERVICES = 'ss'
|
||||
|
||||
|
||||
class _SharedAccessHelper(object):
|
||||
def __init__(self):
|
||||
self.query_dict = {}
|
||||
|
||||
def _add_query(self, name, val):
|
||||
if val:
|
||||
self.query_dict[name] = _to_str(val)
|
||||
|
||||
def add_base(self, permission, expiry, start, ip, protocol, x_ms_version):
|
||||
if isinstance(start, date):
|
||||
start = _to_utc_datetime(start)
|
||||
|
||||
if isinstance(expiry, date):
|
||||
expiry = _to_utc_datetime(expiry)
|
||||
|
||||
self._add_query(_QueryStringConstants.SIGNED_START, start)
|
||||
self._add_query(_QueryStringConstants.SIGNED_EXPIRY, expiry)
|
||||
self._add_query(_QueryStringConstants.SIGNED_PERMISSION, permission)
|
||||
self._add_query(_QueryStringConstants.SIGNED_IP, ip)
|
||||
self._add_query(_QueryStringConstants.SIGNED_PROTOCOL, protocol)
|
||||
self._add_query(_QueryStringConstants.SIGNED_VERSION, x_ms_version)
|
||||
|
||||
def add_resource(self, resource):
|
||||
self._add_query(_QueryStringConstants.SIGNED_RESOURCE, resource)
|
||||
|
||||
def add_id(self, id):
|
||||
self._add_query(_QueryStringConstants.SIGNED_IDENTIFIER, id)
|
||||
|
||||
def add_account(self, services, resource_types):
|
||||
self._add_query(_QueryStringConstants.SIGNED_SERVICES, services)
|
||||
self._add_query(_QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types)
|
||||
|
||||
def add_override_response_headers(self, cache_control,
|
||||
content_disposition,
|
||||
content_encoding,
|
||||
content_language,
|
||||
content_type):
|
||||
self._add_query(_QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control)
|
||||
self._add_query(_QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition)
|
||||
self._add_query(_QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding)
|
||||
self._add_query(_QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language)
|
||||
self._add_query(_QueryStringConstants.SIGNED_CONTENT_TYPE, content_type)
|
||||
|
||||
def add_resource_signature(self, account_name, account_key, service, path):
|
||||
def get_value_to_append(query):
|
||||
return_value = self.query_dict.get(query) or ''
|
||||
return return_value + '\n'
|
||||
|
||||
if path[0] != '/':
|
||||
path = '/' + path
|
||||
|
||||
canonicalized_resource = '/' + service + '/' + account_name + path + '\n'
|
||||
|
||||
# Form the string to sign from shared_access_policy and canonicalized
|
||||
# resource. The order of values is important.
|
||||
string_to_sign = \
|
||||
(get_value_to_append(_QueryStringConstants.SIGNED_PERMISSION) +
|
||||
get_value_to_append(_QueryStringConstants.SIGNED_START) +
|
||||
get_value_to_append(_QueryStringConstants.SIGNED_EXPIRY) +
|
||||
canonicalized_resource +
|
||||
get_value_to_append(_QueryStringConstants.SIGNED_IDENTIFIER) +
|
||||
get_value_to_append(_QueryStringConstants.SIGNED_IP) +
|
||||
get_value_to_append(_QueryStringConstants.SIGNED_PROTOCOL) +
|
||||
get_value_to_append(_QueryStringConstants.SIGNED_VERSION))
|
||||
|
||||
if service == 'blob' or service == 'file':
|
||||
string_to_sign += \
|
||||
(get_value_to_append(_QueryStringConstants.SIGNED_CACHE_CONTROL) +
|
||||
get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_DISPOSITION) +
|
||||
get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_ENCODING) +
|
||||
get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_LANGUAGE) +
|
||||
get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_TYPE))
|
||||
|
||||
# remove the trailing newline
|
||||
if string_to_sign[-1] == '\n':
|
||||
string_to_sign = string_to_sign[:-1]
|
||||
|
||||
self._add_query(_QueryStringConstants.SIGNED_SIGNATURE,
|
||||
_sign_string(account_key, string_to_sign))
|
||||
|
||||
def add_account_signature(self, account_name, account_key):
|
||||
def get_value_to_append(query):
|
||||
return_value = self.query_dict.get(query) or ''
|
||||
return return_value + '\n'
|
||||
|
||||
string_to_sign = \
|
||||
(account_name + '\n' +
|
||||
get_value_to_append(_QueryStringConstants.SIGNED_PERMISSION) +
|
||||
get_value_to_append(_QueryStringConstants.SIGNED_SERVICES) +
|
||||
get_value_to_append(_QueryStringConstants.SIGNED_RESOURCE_TYPES) +
|
||||
get_value_to_append(_QueryStringConstants.SIGNED_START) +
|
||||
get_value_to_append(_QueryStringConstants.SIGNED_EXPIRY) +
|
||||
get_value_to_append(_QueryStringConstants.SIGNED_IP) +
|
||||
get_value_to_append(_QueryStringConstants.SIGNED_PROTOCOL) +
|
||||
get_value_to_append(_QueryStringConstants.SIGNED_VERSION))
|
||||
|
||||
self._add_query(_QueryStringConstants.SIGNED_SIGNATURE,
|
||||
_sign_string(account_key, string_to_sign))
|
||||
|
||||
def get_token(self):
|
||||
return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None])
|
|
@ -0,0 +1,440 @@
|
|||
# -------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for
|
||||
# license information.
|
||||
# --------------------------------------------------------------------------
|
||||
|
||||
import requests
|
||||
from abc import ABCMeta
|
||||
import logging
|
||||
from time import sleep
|
||||
import sys
|
||||
|
||||
from azure.common import (
|
||||
AzureException,
|
||||
AzureHttpError,
|
||||
)
|
||||
|
||||
from ._constants import (
|
||||
DEFAULT_SOCKET_TIMEOUT,
|
||||
DEFAULT_X_MS_VERSION,
|
||||
DEFAULT_USER_AGENT_STRING,
|
||||
USER_AGENT_STRING_PREFIX,
|
||||
USER_AGENT_STRING_SUFFIX,
|
||||
_AUTHORIZATION_HEADER_NAME,
|
||||
_REDACTED_VALUE,
|
||||
_COPY_SOURCE_HEADER_NAME,
|
||||
)
|
||||
from ._error import (
|
||||
_ERROR_DECRYPTION_FAILURE,
|
||||
_http_error_handler,
|
||||
_wrap_exception,
|
||||
AzureSigningError,
|
||||
)
|
||||
from ._http import HTTPError
|
||||
from ._http.httpclient import _HTTPClient
|
||||
from ._serialization import (
|
||||
_update_request,
|
||||
_add_date_header,
|
||||
)
|
||||
from .models import (
|
||||
RetryContext,
|
||||
LocationMode,
|
||||
_OperationContext,
|
||||
)
|
||||
from .retry import ExponentialRetry
|
||||
from io import UnsupportedOperation
|
||||
from .sharedaccesssignature import _QueryStringConstants
|
||||
|
||||
if sys.version_info >= (3,):
|
||||
from urllib.parse import (
|
||||
urlparse,
|
||||
parse_qsl,
|
||||
urlunparse,
|
||||
urlencode,
|
||||
)
|
||||
else:
|
||||
from urlparse import (
|
||||
urlparse,
|
||||
parse_qsl,
|
||||
urlunparse,
|
||||
)
|
||||
from urllib import urlencode
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class StorageClient(object):
|
||||
'''
|
||||
This is the base class for service objects. Service objects are used to do
|
||||
all requests to Storage. This class cannot be instantiated directly.
|
||||
|
||||
:ivar str account_name:
|
||||
The storage account name. This is used to authenticate requests
|
||||
signed with an account key and to construct the storage endpoint. It
|
||||
is required unless a connection string is given, or if a custom
|
||||
domain is used with anonymous authentication.
|
||||
:ivar str account_key:
|
||||
The storage account key. This is used for shared key authentication.
|
||||
If neither account key or sas token is specified, anonymous access
|
||||
will be used.
|
||||
:ivar str sas_token:
|
||||
A shared access signature token to use to authenticate requests
|
||||
instead of the account key. If account key and sas token are both
|
||||
specified, account key will be used to sign. If neither are
|
||||
specified, anonymous access will be used.
|
||||
:ivar str primary_endpoint:
|
||||
The endpoint to send storage requests to.
|
||||
:ivar str secondary_endpoint:
|
||||
The secondary endpoint to read storage data from. This will only be a
|
||||
valid endpoint if the storage account used is RA-GRS and thus allows
|
||||
reading from secondary.
|
||||
:ivar function(context) retry:
|
||||
A function which determines whether to retry. Takes as a parameter a
|
||||
:class:`~azure.cosmosdb.common.models.RetryContext` object. Returns the number
|
||||
of seconds to wait before retrying the request, or None to indicate not
|
||||
to retry.
|
||||
:ivar ~azure.cosmosdb.common.models.LocationMode location_mode:
|
||||
The host location to use to make requests. Defaults to LocationMode.PRIMARY.
|
||||
Note that this setting only applies to RA-GRS accounts as other account
|
||||
types do not allow reading from secondary. If the location_mode is set to
|
||||
LocationMode.SECONDARY, read requests will be sent to the secondary endpoint.
|
||||
Write requests will continue to be sent to primary.
|
||||
:ivar str protocol:
|
||||
The protocol to use for requests. Defaults to https.
|
||||
:ivar requests.Session request_session:
|
||||
The session object to use for http requests.
|
||||
:ivar function(request) request_callback:
|
||||
A function called immediately before each request is sent. This function
|
||||
takes as a parameter the request object and returns nothing. It may be
|
||||
used to added custom headers or log request data.
|
||||
:ivar function() response_callback:
|
||||
A function called immediately after each response is received. This
|
||||
function takes as a parameter the response object and returns nothing.
|
||||
It may be used to log response data.
|
||||
:ivar function() retry_callback:
|
||||
A function called immediately after retry evaluation is performed. This
|
||||
function takes as a parameter the retry context object and returns nothing.
|
||||
It may be used to detect retries and log context information.
|
||||
'''
|
||||
|
||||
__metaclass__ = ABCMeta
|
||||
|
||||
def __init__(self, connection_params):
|
||||
'''
|
||||
:param obj connection_params: The parameters to use to construct the client.
|
||||
'''
|
||||
self.account_name = connection_params.account_name
|
||||
self.account_key = connection_params.account_key
|
||||
self.sas_token = connection_params.sas_token
|
||||
self.token_credential = connection_params.token_credential
|
||||
self.is_emulated = connection_params.is_emulated
|
||||
|
||||
self.primary_endpoint = connection_params.primary_endpoint
|
||||
self.secondary_endpoint = connection_params.secondary_endpoint
|
||||
|
||||
protocol = connection_params.protocol
|
||||
request_session = connection_params.request_session or requests.Session()
|
||||
socket_timeout = connection_params.socket_timeout or DEFAULT_SOCKET_TIMEOUT
|
||||
self._httpclient = _HTTPClient(
|
||||
protocol=protocol,
|
||||
session=request_session,
|
||||
timeout=socket_timeout,
|
||||
)
|
||||
|
||||
self.retry = ExponentialRetry().retry
|
||||
self.location_mode = LocationMode.PRIMARY
|
||||
|
||||
self.request_callback = None
|
||||
self.response_callback = None
|
||||
self.retry_callback = None
|
||||
self._X_MS_VERSION = DEFAULT_X_MS_VERSION
|
||||
self._USER_AGENT_STRING = DEFAULT_USER_AGENT_STRING
|
||||
|
||||
def _update_user_agent_string(self, service_package_version):
|
||||
self._USER_AGENT_STRING = '{}{} {}'.format(USER_AGENT_STRING_PREFIX,
|
||||
service_package_version,
|
||||
USER_AGENT_STRING_SUFFIX)
|
||||
|
||||
@property
|
||||
def socket_timeout(self):
|
||||
return self._httpclient.timeout
|
||||
|
||||
@socket_timeout.setter
|
||||
def socket_timeout(self, value):
|
||||
self._httpclient.timeout = value
|
||||
|
||||
@property
|
||||
def protocol(self):
|
||||
return self._httpclient.protocol
|
||||
|
||||
@protocol.setter
|
||||
def protocol(self, value):
|
||||
self._httpclient.protocol = value
|
||||
|
||||
@property
|
||||
def request_session(self):
|
||||
return self._httpclient.session
|
||||
|
||||
@request_session.setter
|
||||
def request_session(self, value):
|
||||
self._httpclient.session = value
|
||||
|
||||
def set_proxy(self, host, port, user=None, password=None):
|
||||
'''
|
||||
Sets the proxy server host and port for the HTTP CONNECT Tunnelling.
|
||||
|
||||
:param str host: Address of the proxy. Ex: '192.168.0.100'
|
||||
:param int port: Port of the proxy. Ex: 6000
|
||||
:param str user: User for proxy authorization.
|
||||
:param str password: Password for proxy authorization.
|
||||
'''
|
||||
self._httpclient.set_proxy(host, port, user, password)
|
||||
|
||||
def _get_host_locations(self, primary=True, secondary=False):
|
||||
locations = {}
|
||||
if primary:
|
||||
locations[LocationMode.PRIMARY] = self.primary_endpoint
|
||||
if secondary:
|
||||
locations[LocationMode.SECONDARY] = self.secondary_endpoint
|
||||
return locations
|
||||
|
||||
def _apply_host(self, request, operation_context, retry_context):
|
||||
if operation_context.location_lock and operation_context.host_location:
|
||||
# If this is a location locked operation and the location is set,
|
||||
# override the request location and host_location.
|
||||
request.host_locations = operation_context.host_location
|
||||
request.host = list(operation_context.host_location.values())[0]
|
||||
retry_context.location_mode = list(operation_context.host_location.keys())[0]
|
||||
elif len(request.host_locations) == 1:
|
||||
# If only one location is allowed, use that location.
|
||||
request.host = list(request.host_locations.values())[0]
|
||||
retry_context.location_mode = list(request.host_locations.keys())[0]
|
||||
else:
|
||||
# If multiple locations are possible, choose based on the location mode.
|
||||
request.host = request.host_locations.get(self.location_mode)
|
||||
retry_context.location_mode = self.location_mode
|
||||
|
||||
@staticmethod
|
||||
def extract_date_and_request_id(retry_context):
|
||||
if getattr(retry_context, 'response', None) is None:
|
||||
return ""
|
||||
resp = retry_context.response
|
||||
|
||||
if 'date' in resp.headers and 'x-ms-request-id' in resp.headers:
|
||||
return str.format("Server-Timestamp={0}, Server-Request-ID={1}",
|
||||
resp.headers['date'], resp.headers['x-ms-request-id'])
|
||||
elif 'date' in resp.headers:
|
||||
return str.format("Server-Timestamp={0}", resp.headers['date'])
|
||||
elif 'x-ms-request-id' in resp.headers:
|
||||
return str.format("Server-Request-ID={0}", resp.headers['x-ms-request-id'])
|
||||
else:
|
||||
return ""
|
||||
|
||||
@staticmethod
|
||||
def _scrub_headers(headers):
|
||||
# make a copy to avoid contaminating the request
|
||||
clean_headers = headers.copy()
|
||||
|
||||
if _AUTHORIZATION_HEADER_NAME in clean_headers:
|
||||
clean_headers[_AUTHORIZATION_HEADER_NAME] = _REDACTED_VALUE
|
||||
|
||||
# in case of copy operations, there could be a SAS signature present in the header value
|
||||
if _COPY_SOURCE_HEADER_NAME in clean_headers \
|
||||
and _QueryStringConstants.SIGNED_SIGNATURE + "=" in clean_headers[_COPY_SOURCE_HEADER_NAME]:
|
||||
# take the url apart and scrub away the signed signature
|
||||
scheme, netloc, path, params, query, fragment = urlparse(clean_headers[_COPY_SOURCE_HEADER_NAME])
|
||||
parsed_qs = dict(parse_qsl(query))
|
||||
parsed_qs[_QueryStringConstants.SIGNED_SIGNATURE] = _REDACTED_VALUE
|
||||
|
||||
# the SAS needs to be put back together
|
||||
clean_headers[_COPY_SOURCE_HEADER_NAME] = urlunparse(
|
||||
(scheme, netloc, path, params, urlencode(parsed_qs), fragment))
|
||||
return clean_headers
|
||||
|
||||
@staticmethod
|
||||
def _scrub_query_parameters(query):
|
||||
# make a copy to avoid contaminating the request
|
||||
clean_queries = query.copy()
|
||||
|
||||
if _QueryStringConstants.SIGNED_SIGNATURE in clean_queries:
|
||||
clean_queries[_QueryStringConstants.SIGNED_SIGNATURE] = _REDACTED_VALUE
|
||||
return clean_queries
|
||||
|
||||
def _perform_request(self, request, parser=None, parser_args=None, operation_context=None, expected_errors=None):
|
||||
'''
|
||||
Sends the request and return response. Catches HTTPError and hands it
|
||||
to error handler
|
||||
'''
|
||||
operation_context = operation_context or _OperationContext()
|
||||
retry_context = RetryContext()
|
||||
retry_context.is_emulated = self.is_emulated
|
||||
|
||||
# if request body is a stream, we need to remember its current position in case retries happen
|
||||
if hasattr(request.body, 'read'):
|
||||
try:
|
||||
retry_context.body_position = request.body.tell()
|
||||
except (AttributeError, UnsupportedOperation):
|
||||
# if body position cannot be obtained, then retries will not work
|
||||
pass
|
||||
|
||||
# Apply the appropriate host based on the location mode
|
||||
self._apply_host(request, operation_context, retry_context)
|
||||
|
||||
# Apply common settings to the request
|
||||
_update_request(request, self._X_MS_VERSION, self._USER_AGENT_STRING)
|
||||
client_request_id_prefix = str.format("Client-Request-ID={0}", request.headers['x-ms-client-request-id'])
|
||||
|
||||
while True:
|
||||
try:
|
||||
try:
|
||||
# Execute the request callback
|
||||
if self.request_callback:
|
||||
self.request_callback(request)
|
||||
|
||||
# Add date and auth after the callback so date doesn't get too old and
|
||||
# authentication is still correct if signed headers are added in the request
|
||||
# callback. This also ensures retry policies with long back offs
|
||||
# will work as it resets the time sensitive headers.
|
||||
_add_date_header(request)
|
||||
|
||||
try:
|
||||
# request can be signed individually
|
||||
self.authentication.sign_request(request)
|
||||
except AttributeError:
|
||||
# session can also be signed
|
||||
self.request_session = self.authentication.signed_session(self.request_session)
|
||||
|
||||
# Set the request context
|
||||
retry_context.request = request
|
||||
|
||||
# Log the request before it goes out
|
||||
# Avoid unnecessary scrubbing if the logger is not on
|
||||
if logger.isEnabledFor(logging.INFO):
|
||||
logger.info("%s Outgoing request: Method=%s, Path=%s, Query=%s, Headers=%s.",
|
||||
client_request_id_prefix,
|
||||
request.method,
|
||||
request.path,
|
||||
self._scrub_query_parameters(request.query),
|
||||
str(self._scrub_headers(request.headers)).replace('\n', ''))
|
||||
|
||||
# Perform the request
|
||||
response = self._httpclient.perform_request(request)
|
||||
|
||||
# Execute the response callback
|
||||
if self.response_callback:
|
||||
self.response_callback(response)
|
||||
|
||||
# Set the response context
|
||||
retry_context.response = response
|
||||
|
||||
# Log the response when it comes back
|
||||
logger.info("%s Receiving Response: "
|
||||
"%s, HTTP Status Code=%s, Message=%s, Headers=%s.",
|
||||
client_request_id_prefix,
|
||||
self.extract_date_and_request_id(retry_context),
|
||||
response.status,
|
||||
response.message,
|
||||
str(response.headers).replace('\n', ''))
|
||||
|
||||
# Parse and wrap HTTP errors in AzureHttpError which inherits from AzureException
|
||||
if response.status >= 300:
|
||||
# This exception will be caught by the general error handler
|
||||
# and raised as an azure http exception
|
||||
_http_error_handler(
|
||||
HTTPError(response.status, response.message, response.headers, response.body))
|
||||
|
||||
# Parse the response
|
||||
if parser:
|
||||
if parser_args:
|
||||
args = [response]
|
||||
args.extend(parser_args)
|
||||
return parser(*args)
|
||||
else:
|
||||
return parser(response)
|
||||
else:
|
||||
return
|
||||
except AzureException as ex:
|
||||
retry_context.exception = ex
|
||||
raise ex
|
||||
except Exception as ex:
|
||||
retry_context.exception = ex
|
||||
raise _wrap_exception(ex, AzureException)
|
||||
|
||||
except AzureException as ex:
|
||||
# only parse the strings used for logging if logging is at least enabled for CRITICAL
|
||||
exception_str_in_one_line = ''
|
||||
status_code = ''
|
||||
timestamp_and_request_id = ''
|
||||
if logger.isEnabledFor(logging.CRITICAL):
|
||||
exception_str_in_one_line = str(ex).replace('\n', '')
|
||||
status_code = retry_context.response.status if retry_context.response is not None else 'Unknown'
|
||||
timestamp_and_request_id = self.extract_date_and_request_id(retry_context)
|
||||
|
||||
# if the http error was expected, we should short-circuit
|
||||
if isinstance(ex, AzureHttpError) and expected_errors is not None and ex.error_code in expected_errors:
|
||||
logger.info("%s Received expected http error: "
|
||||
"%s, HTTP status code=%s, Exception=%s.",
|
||||
client_request_id_prefix,
|
||||
timestamp_and_request_id,
|
||||
status_code,
|
||||
exception_str_in_one_line)
|
||||
raise ex
|
||||
elif isinstance(ex, AzureSigningError):
|
||||
logger.info("%s Unable to sign the request: Exception=%s.",
|
||||
client_request_id_prefix,
|
||||
exception_str_in_one_line)
|
||||
raise ex
|
||||
|
||||
logger.info("%s Operation failed: checking if the operation should be retried. "
|
||||
"Current retry count=%s, %s, HTTP status code=%s, Exception=%s.",
|
||||
client_request_id_prefix,
|
||||
retry_context.count if hasattr(retry_context, 'count') else 0,
|
||||
timestamp_and_request_id,
|
||||
status_code,
|
||||
exception_str_in_one_line)
|
||||
|
||||
# Decryption failures (invalid objects, invalid algorithms, data unencrypted in strict mode, etc)
|
||||
# will not be resolved with retries.
|
||||
if str(ex) == _ERROR_DECRYPTION_FAILURE:
|
||||
logger.error("%s Encountered decryption failure: this cannot be retried. "
|
||||
"%s, HTTP status code=%s, Exception=%s.",
|
||||
client_request_id_prefix,
|
||||
timestamp_and_request_id,
|
||||
status_code,
|
||||
exception_str_in_one_line)
|
||||
raise ex
|
||||
|
||||
# Determine whether a retry should be performed and if so, how
|
||||
# long to wait before performing retry.
|
||||
retry_interval = self.retry(retry_context)
|
||||
if retry_interval is not None:
|
||||
# Execute the callback
|
||||
if self.retry_callback:
|
||||
self.retry_callback(retry_context)
|
||||
|
||||
logger.info(
|
||||
"%s Retry policy is allowing a retry: Retry count=%s, Interval=%s.",
|
||||
client_request_id_prefix,
|
||||
retry_context.count,
|
||||
retry_interval)
|
||||
|
||||
# Sleep for the desired retry interval
|
||||
sleep(retry_interval)
|
||||
else:
|
||||
logger.error("%s Retry policy did not allow for a retry: "
|
||||
"%s, HTTP status code=%s, Exception=%s.",
|
||||
client_request_id_prefix,
|
||||
timestamp_and_request_id,
|
||||
status_code,
|
||||
exception_str_in_one_line)
|
||||
raise ex
|
||||
finally:
|
||||
# If this is a location locked operation and the location is not set,
|
||||
# this is the first request of that operation. Set the location to
|
||||
# be used for subsequent requests in the operation.
|
||||
if operation_context.location_lock and not operation_context.host_location:
|
||||
# note: to cover the emulator scenario, the host_location is grabbed
|
||||
# from request.host_locations(which includes the dev account name)
|
||||
# instead of request.host(which at this point no longer includes the dev account name)
|
||||
operation_context.host_location = {
|
||||
retry_context.location_mode: request.host_locations[retry_context.location_mode]}
|
|
@ -0,0 +1,48 @@
|
|||
# -------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for
|
||||
# license information.
|
||||
# --------------------------------------------------------------------------
|
||||
|
||||
import requests
|
||||
|
||||
|
||||
class TokenCredential(object):
|
||||
"""
|
||||
Represents a token credential that is used to authorize HTTPS requests.
|
||||
The token can be updated by the user.
|
||||
|
||||
:ivar str token:
|
||||
The authorization token. It can be set by the user at any point in a thread-safe way.
|
||||
"""
|
||||
|
||||
def __init__(self, initial_value=None):
|
||||
"""
|
||||
:param initial_value: initial value for the token.
|
||||
"""
|
||||
self.token = initial_value
|
||||
|
||||
def signed_session(self, session=None):
|
||||
"""
|
||||
Sign requests session with the token. This method is called every time a request is going on the wire.
|
||||
The user is responsible for updating the token with the preferred tool/SDK.
|
||||
In general there are two options:
|
||||
- override this method to update the token in a preferred way and set Authorization header on session
|
||||
- not override this method, and have a timer that triggers periodically to update the token on this class
|
||||
|
||||
The second option is recommended as it tends to be more performance-friendly.
|
||||
|
||||
:param session: The session to configure for authentication
|
||||
:type session: requests.Session
|
||||
:rtype: requests.Session
|
||||
"""
|
||||
session = session or requests.Session()
|
||||
session.headers['Authorization'] = "Bearer {}".format(self.token)
|
||||
|
||||
return session
|
||||
|
||||
def token(self, new_value):
|
||||
"""
|
||||
:param new_value: new value to be set as the token.
|
||||
"""
|
||||
self.token = new_value
|
|
@ -13,7 +13,7 @@
|
|||
# limitations under the License.
|
||||
# --------------------------------------------------------------------------
|
||||
|
||||
from azure.storage.common._auth import (
|
||||
from azure.cosmosdb.common._auth import (
|
||||
_StorageSharedKeyAuthentication,
|
||||
)
|
||||
|
||||
|
|
|
@ -13,12 +13,12 @@
|
|||
# limitations under the License.
|
||||
# --------------------------------------------------------------------------
|
||||
|
||||
from azure.storage.common._constants import (
|
||||
from azure.cosmosdb.common._constants import (
|
||||
SERVICE_HOST_BASE,
|
||||
DEFAULT_PROTOCOL,
|
||||
)
|
||||
|
||||
from azure.storage.common._connection import (
|
||||
from azure.cosmosdb.common._connection import (
|
||||
_ServiceParameters,
|
||||
_EMULATOR_ENDPOINTS,
|
||||
_CONNECTION_ENDPOINTS,
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
import platform
|
||||
|
||||
__author__ = 'Microsoft Corp. <ptvshelp@microsoft.com>'
|
||||
__version__ = '1.0.5'
|
||||
__version__ = '1.0.6'
|
||||
|
||||
# x-ms-version for storage service.
|
||||
X_MS_VERSION = '2017-04-17'
|
||||
|
|
|
@ -23,14 +23,14 @@ else:
|
|||
from json import (
|
||||
loads,
|
||||
)
|
||||
from azure.storage.common._http import HTTPResponse
|
||||
from azure.cosmosdb.common._http import HTTPResponse
|
||||
from azure.common import (
|
||||
AzureException,
|
||||
)
|
||||
from azure.storage.common._common_conversion import (
|
||||
from azure.cosmosdb.common._common_conversion import (
|
||||
_decode_base64_to_bytes,
|
||||
)
|
||||
from azure.storage.common._error import (
|
||||
from azure.cosmosdb.common._error import (
|
||||
_ERROR_DECRYPTION_FAILURE,
|
||||
_validate_decryption_required,
|
||||
)
|
||||
|
@ -45,7 +45,7 @@ from azure.cosmosdb.table.models import (
|
|||
EdmType,
|
||||
AzureBatchOperationError,
|
||||
)
|
||||
from azure.storage.common.models import (
|
||||
from azure.cosmosdb.common.models import (
|
||||
_list,
|
||||
)
|
||||
from azure.cosmosdb.table._encryption import (
|
||||
|
|
|
@ -28,20 +28,20 @@ from cryptography.hazmat.primitives.hashes import (
|
|||
)
|
||||
from cryptography.hazmat.primitives.padding import PKCS7
|
||||
|
||||
from azure.storage.common._common_conversion import (
|
||||
from azure.cosmosdb.common._common_conversion import (
|
||||
_decode_base64_to_bytes,
|
||||
)
|
||||
from azure.storage.common._constants import (
|
||||
from azure.cosmosdb.common._constants import (
|
||||
_ENCRYPTION_PROTOCOL_V1,
|
||||
)
|
||||
from azure.storage.common._encryption import (
|
||||
from azure.cosmosdb.common._encryption import (
|
||||
_generate_encryption_data_dict,
|
||||
_dict_to_encryption_data,
|
||||
_generate_AES_CBC_cipher,
|
||||
_validate_and_unwrap_cek,
|
||||
_EncryptionAlgorithm
|
||||
)
|
||||
from azure.storage.common._error import (
|
||||
from azure.cosmosdb.common._error import (
|
||||
_ERROR_DECRYPTION_FAILURE,
|
||||
_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM,
|
||||
_validate_not_none,
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
# limitations under the License.
|
||||
# --------------------------------------------------------------------------
|
||||
|
||||
from azure.storage.common._error import (
|
||||
from azure.cosmosdb.common._error import (
|
||||
_validate_not_none,
|
||||
_ERROR_VALUE_NONE_OR_EMPTY,
|
||||
)
|
||||
|
|
|
@ -12,16 +12,16 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# --------------------------------------------------------------------------
|
||||
from azure.storage.common._common_conversion import (
|
||||
from azure.cosmosdb.common._common_conversion import (
|
||||
_to_str,
|
||||
)
|
||||
from azure.storage.common._error import (
|
||||
from azure.cosmosdb.common._error import (
|
||||
_validate_not_none,
|
||||
_validate_encryption_required,
|
||||
_validate_encryption_unsupported,
|
||||
)
|
||||
from azure.storage.common._http import HTTPRequest
|
||||
from azure.storage.common._serialization import (
|
||||
from azure.cosmosdb.common._http import HTTPRequest
|
||||
from azure.cosmosdb.common._serialization import (
|
||||
_get_request_body,
|
||||
)
|
||||
from azure.cosmosdb.table._encryption import (
|
||||
|
|
|
@ -23,11 +23,11 @@ from math import (
|
|||
isnan,
|
||||
)
|
||||
|
||||
from azure.storage.common._common_conversion import (
|
||||
from azure.cosmosdb.common._common_conversion import (
|
||||
_encode_base64,
|
||||
_to_str,
|
||||
)
|
||||
from azure.storage.common._serialization import (
|
||||
from azure.cosmosdb.common._serialization import (
|
||||
_to_utc_datetime,
|
||||
)
|
||||
from azure.cosmosdb.table._error import (
|
||||
|
|
|
@ -21,7 +21,7 @@ from azure.cosmosdb.table._error import (
|
|||
_ERROR_ATTRIBUTE_MISSING,
|
||||
)
|
||||
|
||||
from azure.storage.common.models import (
|
||||
from azure.cosmosdb.common.models import (
|
||||
Services,
|
||||
)
|
||||
|
||||
|
|
|
@ -13,10 +13,10 @@
|
|||
# limitations under the License.
|
||||
# --------------------------------------------------------------------------
|
||||
|
||||
from azure.storage.common._common_conversion import (
|
||||
from azure.cosmosdb.common._common_conversion import (
|
||||
_sign_string,
|
||||
)
|
||||
from azure.storage.common.sharedaccesssignature import (
|
||||
from azure.cosmosdb.common.sharedaccesssignature import (
|
||||
SharedAccessSignature,
|
||||
_SharedAccessHelper,
|
||||
_QueryStringConstants,
|
||||
|
@ -85,7 +85,7 @@ class TableSharedAccessSignature(SharedAccessSignature):
|
|||
restricts the request to those IP addresses.
|
||||
:param str protocol:
|
||||
Specifies the protocol permitted for a request made. The default value
|
||||
is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
|
||||
is https,http. See :class:`~azure.cosmosdb.common.models.Protocol` for possible values.
|
||||
:param str start_pk:
|
||||
The minimum partition key accessible with this shared access
|
||||
signature. startpk must accompany startrk. Key values are inclusive.
|
||||
|
|
|
@ -18,42 +18,42 @@ from azure.common import (
|
|||
AzureHttpError,
|
||||
)
|
||||
|
||||
from azure.storage.common._auth import (
|
||||
from azure.cosmosdb.common._auth import (
|
||||
_StorageSASAuthentication,
|
||||
)
|
||||
from azure.cosmosdb.table._auth import (
|
||||
_StorageTableSharedKeyAuthentication,
|
||||
)
|
||||
from azure.storage.common._common_conversion import (
|
||||
from azure.cosmosdb.common._common_conversion import (
|
||||
_int_to_str,
|
||||
_to_str,
|
||||
)
|
||||
from azure.cosmosdb.table._connection import _TableServiceParameters
|
||||
from azure.storage.common._constants import (
|
||||
from azure.cosmosdb.common._constants import (
|
||||
SERVICE_HOST_BASE,
|
||||
DEFAULT_PROTOCOL,
|
||||
DEV_ACCOUNT_NAME,
|
||||
)
|
||||
from azure.storage.common._deserialization import (
|
||||
from azure.cosmosdb.common._deserialization import (
|
||||
_convert_xml_to_service_properties,
|
||||
_convert_xml_to_signed_identifiers,
|
||||
_convert_xml_to_service_stats,
|
||||
)
|
||||
from azure.storage.common._error import (
|
||||
from azure.cosmosdb.common._error import (
|
||||
_dont_fail_not_exist,
|
||||
_dont_fail_on_exist,
|
||||
_validate_not_none,
|
||||
_ERROR_STORAGE_MISSING_INFO,
|
||||
_validate_access_policies,
|
||||
)
|
||||
from azure.storage.common._http import HTTPRequest
|
||||
from azure.storage.common._serialization import (
|
||||
from azure.cosmosdb.common._http import HTTPRequest
|
||||
from azure.cosmosdb.common._serialization import (
|
||||
_get_request_body,
|
||||
_update_request,
|
||||
_convert_signed_identifiers_to_xml,
|
||||
_convert_service_properties_to_xml,
|
||||
)
|
||||
from azure.storage.common.models import (
|
||||
from azure.cosmosdb.common.models import (
|
||||
ListGenerator,
|
||||
_OperationContext,
|
||||
)
|
||||
|
@ -63,7 +63,7 @@ from azure.cosmosdb.table.models import (
|
|||
from azure.cosmosdb.table.sharedaccesssignature import (
|
||||
TableSharedAccessSignature,
|
||||
)
|
||||
from azure.storage.common.storageclient import StorageClient
|
||||
from azure.cosmosdb.common.storageclient import StorageClient
|
||||
from azure.cosmosdb.table._deserialization import (
|
||||
_convert_json_response_to_entity,
|
||||
_convert_json_response_to_tables,
|
||||
|
@ -236,7 +236,7 @@ class TableService(StorageClient):
|
|||
restricts the request to those IP addresses.
|
||||
:param str protocol:
|
||||
Specifies the protocol permitted for a request made. The default value
|
||||
is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
|
||||
is https,http. See :class:`~azure.cosmosdb.common.models.Protocol` for possible values.
|
||||
:return: A Shared Access Signature (sas) token.
|
||||
:rtype: str
|
||||
'''
|
||||
|
@ -290,7 +290,7 @@ class TableService(StorageClient):
|
|||
restricts the request to those IP addresses.
|
||||
:param str protocol:
|
||||
Specifies the protocol permitted for a request made. The default value
|
||||
is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
|
||||
is https,http. See :class:`~azure.cosmosdb.common.models.Protocol` for possible values.
|
||||
:param str start_pk:
|
||||
The minimum partition key accessible with this shared access
|
||||
signature. startpk must accompany startrk. Key values are inclusive.
|
||||
|
@ -354,7 +354,7 @@ class TableService(StorageClient):
|
|||
:param int timeout:
|
||||
The timeout parameter is expressed in seconds.
|
||||
:return: The table service stats.
|
||||
:rtype: :class:`~azure.storage.common.models.ServiceStats`
|
||||
:rtype: :class:`~azure.cosmosdb.common.models.ServiceStats`
|
||||
'''
|
||||
request = HTTPRequest()
|
||||
request.method = 'GET'
|
||||
|
@ -376,7 +376,7 @@ class TableService(StorageClient):
|
|||
:param int timeout:
|
||||
The server timeout, expressed in seconds.
|
||||
:return: The table service properties.
|
||||
:rtype: :class:`~azure.storage.common.models.ServiceProperties`
|
||||
:rtype: :class:`~azure.cosmosdb.common.models.ServiceProperties`
|
||||
'''
|
||||
request = HTTPRequest()
|
||||
request.method = 'GET'
|
||||
|
@ -413,7 +413,7 @@ class TableService(StorageClient):
|
|||
and CORS will be disabled for the service. For detailed information
|
||||
about CORS rules and evaluation logic, see
|
||||
https://msdn.microsoft.com/en-us/library/azure/dn535601.aspx.
|
||||
:type cors: list(:class:`~azure.storage.common.models.CorsRule`)
|
||||
:type cors: list(:class:`~azure.cosmosdb.common.models.CorsRule`)
|
||||
:param int timeout:
|
||||
The server timeout, expressed in seconds.
|
||||
'''
|
||||
|
@ -455,8 +455,8 @@ class TableService(StorageClient):
|
|||
The server timeout, expressed in seconds. This function may make multiple
|
||||
calls to the service in which case the timeout value specified will be
|
||||
applied to each individual call.
|
||||
:return: A generator which produces :class:`~azure.storage.common.models.table.Table` objects.
|
||||
:rtype: :class:`~azure.storage.common.models.ListGenerator`:
|
||||
:return: A generator which produces :class:`~azure.cosmosdb.common.models.table.Table` objects.
|
||||
:rtype: :class:`~azure.cosmosdb.common.models.ListGenerator`:
|
||||
'''
|
||||
operation_context = _OperationContext(location_lock=True)
|
||||
kwargs = {'max_results': num_results, 'marker': marker, 'timeout': timeout,
|
||||
|
@ -485,7 +485,7 @@ class TableService(StorageClient):
|
|||
:param int timeout:
|
||||
The server timeout, expressed in seconds.
|
||||
:return: A list of tables, potentially with a next_marker property.
|
||||
:rtype: list(:class:`~azure.storage.common.models.table.Table`)
|
||||
:rtype: list(:class:`~azure.cosmosdb.common.models.table.Table`)
|
||||
'''
|
||||
request = HTTPRequest()
|
||||
request.method = 'GET'
|
||||
|
@ -620,7 +620,7 @@ class TableService(StorageClient):
|
|||
:param int timeout:
|
||||
The server timeout, expressed in seconds.
|
||||
:return: A dictionary of access policies associated with the table.
|
||||
:rtype: dict(str, :class:`~azure.storage.common.models.AccessPolicy`)
|
||||
:rtype: dict(str, :class:`~azure.cosmosdb.common.models.AccessPolicy`)
|
||||
'''
|
||||
_validate_not_none('table_name', table_name)
|
||||
request = HTTPRequest()
|
||||
|
@ -656,7 +656,7 @@ class TableService(StorageClient):
|
|||
A dictionary of access policies to associate with the table. The
|
||||
dictionary may contain up to 5 elements. An empty dictionary
|
||||
will clear the access policies set on the service.
|
||||
:type signed_identifiers: dict(str, :class:`~azure.storage.common.models.AccessPolicy`)
|
||||
:type signed_identifiers: dict(str, :class:`~azure.cosmosdb.common.models.AccessPolicy`)
|
||||
:param int timeout:
|
||||
The server timeout, expressed in seconds.
|
||||
'''
|
||||
|
@ -722,7 +722,7 @@ class TableService(StorageClient):
|
|||
calls to the service in which case the timeout value specified will be
|
||||
applied to each individual call.
|
||||
:return: A generator which produces :class:`~azure.storage.table.models.Entity` objects.
|
||||
:rtype: :class:`~azure.storage.common.models.ListGenerator`
|
||||
:rtype: :class:`~azure.cosmosdb.common.models.ListGenerator`
|
||||
'''
|
||||
|
||||
operation_context = _OperationContext(location_lock=True)
|
||||
|
|
|
@ -54,9 +54,9 @@ copyright = u'2015, Microsoft'
|
|||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
version = '1.0.5'
|
||||
version = '1.0.6'
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = '1.0.5'
|
||||
release = '1.0.6'
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
|
|
|
@ -15,7 +15,7 @@ from azure.cosmosdb.table import TableService
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# --------------------------------------------------------------------------
|
||||
from azure.storage.common import CloudStorageAccount
|
||||
from azure.cosmosdb.common import CloudStorageAccount
|
||||
|
||||
|
||||
class AuthenticationSamples():
|
||||
|
|
|
@ -17,8 +17,8 @@
|
|||
import requests
|
||||
|
||||
from azure.cosmosdb.table import TableService
|
||||
from azure.storage.common.models import LocationMode
|
||||
from azure.storage.common.retry import (
|
||||
from azure.cosmosdb.common.models import LocationMode
|
||||
from azure.cosmosdb.common.retry import (
|
||||
ExponentialRetry,
|
||||
LinearRetry,
|
||||
no_retry,
|
||||
|
|
|
@ -16,7 +16,7 @@ import time
|
|||
import uuid
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
from azure.storage.common import (
|
||||
from azure.cosmosdb.common import (
|
||||
AccessPolicy,
|
||||
ResourceTypes,
|
||||
AccountPermissions,
|
||||
|
|
|
@ -22,7 +22,7 @@ from azure.common import (
|
|||
AzureMissingResourceHttpError,
|
||||
)
|
||||
|
||||
from azure.storage.common import (
|
||||
from azure.cosmosdb.common import (
|
||||
Logging,
|
||||
Metrics,
|
||||
CorsRule,
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
# --------------------------------------------------------------------------
|
||||
import unittest
|
||||
|
||||
from azure.storage.common import CloudStorageAccount
|
||||
from azure.cosmosdb.common import CloudStorageAccount
|
||||
from samples.advanced import (
|
||||
AuthenticationSamples,
|
||||
ClientSamples,
|
||||
|
@ -31,7 +31,7 @@ class SampleTest(unittest.TestCase):
|
|||
def setUp(self):
|
||||
super(SampleTest, self).setUp()
|
||||
try:
|
||||
from samples.config import config
|
||||
import samples.config as config
|
||||
except:
|
||||
raise ValueError('Please specify configuration settings in config.py.')
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ except ImportError:
|
|||
|
||||
setup(
|
||||
name='azure-cosmosdb-table',
|
||||
version='1.0.5',
|
||||
version='1.0.6',
|
||||
description='Microsoft Azure CosmosDB Table Client Library for Python',
|
||||
long_description=open('README.rst', 'r').read(),
|
||||
license='Apache License 2.0',
|
||||
|
@ -68,7 +68,6 @@ setup(
|
|||
packages=find_packages(),
|
||||
install_requires=[
|
||||
'azure-common>=1.1.5',
|
||||
'azure-storage-common~=1.1',
|
||||
'cryptography',
|
||||
'python-dateutil',
|
||||
'requests',
|
||||
|
|
|
@ -27,11 +27,12 @@ from azure.common import (
|
|||
AzureException,
|
||||
)
|
||||
|
||||
from azure.storage.common import (
|
||||
from azure.cosmosdb.common import (
|
||||
AccessPolicy,
|
||||
ResourceTypes,
|
||||
AccountPermissions,
|
||||
)
|
||||
|
||||
from azure.cosmosdb.table import (
|
||||
TableService,
|
||||
TablePermissions,
|
||||
|
|
|
@ -17,7 +17,7 @@ from datetime import datetime
|
|||
|
||||
from dateutil.tz import tzutc
|
||||
|
||||
from azure.storage.common.retry import (
|
||||
from azure.cosmosdb.common.retry import (
|
||||
LinearRetry,
|
||||
)
|
||||
from azure.cosmosdb.table import (
|
||||
|
|
|
@ -27,19 +27,19 @@ from cryptography.hazmat.primitives.hashes import (
|
|||
from cryptography.hazmat.primitives.padding import PKCS7
|
||||
from dateutil.tz import tzutc
|
||||
|
||||
from azure.storage.common._common_conversion import (
|
||||
from azure.cosmosdb.common._common_conversion import (
|
||||
_encode_base64,
|
||||
)
|
||||
from azure.storage.common._encryption import (
|
||||
from azure.cosmosdb.common._encryption import (
|
||||
_dict_to_encryption_data,
|
||||
_generate_AES_CBC_cipher,
|
||||
)
|
||||
from azure.storage.common._error import (
|
||||
from azure.cosmosdb.common._error import (
|
||||
_ERROR_OBJECT_INVALID,
|
||||
_ERROR_DECRYPTION_FAILURE,
|
||||
AzureException,
|
||||
)
|
||||
from azure.storage.common.models import (
|
||||
from azure.cosmosdb.common.models import (
|
||||
AccessPolicy,
|
||||
)
|
||||
from azure.cosmosdb.table import (
|
||||
|
|
|
@ -26,10 +26,10 @@ from azure.common import (
|
|||
)
|
||||
from dateutil.tz import tzutc, tzoffset
|
||||
|
||||
from azure.storage.common import (
|
||||
from azure.cosmosdb.common import (
|
||||
AccessPolicy,
|
||||
)
|
||||
from azure.storage.common._common_conversion import (
|
||||
from azure.cosmosdb.common._common_conversion import (
|
||||
_encode_base64,
|
||||
)
|
||||
from azure.cosmosdb.table import (
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
futures;python_version<="2.7"
|
||||
azure-storage-common
|
||||
python-dateutil
|
||||
requests>=2.9.2
|
||||
vcrpy
|
||||
|
|
Загрузка…
Ссылка в новой задаче