Improved some details guided by the code inspection tool
This commit is contained in:
Родитель
70277bca5f
Коммит
c285393951
|
@ -89,13 +89,14 @@ Download
|
|||
--------
|
||||
|
||||
The Azure Storage SDK for Python is composed of 4 packages, each corresponding to a different service:
|
||||
|
||||
- azure-storage-blob
|
||||
- azure-storage-file
|
||||
- azure-storage-queue
|
||||
- azure-storage-table
|
||||
|
||||
Note: prior to and including version 0.36.0, there used to be a single package (azure-storage) containing all services.
|
||||
It is no longer supported, and users should install the 4 before mentioned packages individually, depending on the need.
|
||||
It is no longer supported, and users should install the 4 before-mentioned packages individually, depending on the need.
|
||||
|
||||
Option 1: Via PyPi
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
|
|
@ -102,8 +102,7 @@ def _parse_lease(response):
|
|||
'''
|
||||
Extracts lease time and ID return headers.
|
||||
'''
|
||||
lease = {}
|
||||
lease['time'] = response.headers.get('x-ms-lease-time')
|
||||
lease = {'time': response.headers.get('x-ms-lease-time')}
|
||||
if lease['time']:
|
||||
lease['time'] = _int_to_str(lease['time'])
|
||||
|
||||
|
|
|
@ -32,6 +32,7 @@ from azure.storage.common._error import (
|
|||
_validate_not_none,
|
||||
_validate_key_encryption_key_wrap,
|
||||
_ERROR_DATA_NOT_ENCRYPTED,
|
||||
_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM,
|
||||
)
|
||||
|
||||
|
||||
|
@ -101,7 +102,7 @@ def _generate_blob_encryption_data(key_encryption_key):
|
|||
encryption_data['EncryptionMode'] = 'FullBlob'
|
||||
encryption_data = dumps(encryption_data)
|
||||
|
||||
return (content_encryption_key, initialization_vector, encryption_data)
|
||||
return content_encryption_key, initialization_vector, encryption_data
|
||||
|
||||
|
||||
def _decrypt_blob(require_encryption, key_encryption_key, key_resolver,
|
||||
|
@ -143,17 +144,17 @@ def _decrypt_blob(require_encryption, key_encryption_key, key_resolver,
|
|||
unpad = False
|
||||
start_range, end_range = 0, len(content)
|
||||
if 'content-range' in response.headers:
|
||||
range = response.headers['content-range']
|
||||
content_range = response.headers['content-range']
|
||||
# Format: 'bytes x-y/size'
|
||||
|
||||
# Ignore the word 'bytes'
|
||||
range = range.split(' ')
|
||||
content_range = content_range.split(' ')
|
||||
|
||||
range = range[1].split('-')
|
||||
start_range = int(range[0])
|
||||
range = range[1].split('/')
|
||||
end_range = int(range[0])
|
||||
blob_size = int(range[1])
|
||||
content_range = content_range[1].split('-')
|
||||
start_range = int(content_range[0])
|
||||
content_range = content_range[1].split('/')
|
||||
end_range = int(content_range[0])
|
||||
blob_size = int(content_range[1])
|
||||
|
||||
if start_offset >= 16:
|
||||
iv = content[:16]
|
||||
|
|
|
@ -57,13 +57,13 @@ def _get_path(container_name=None, blob_name=None):
|
|||
def _validate_and_format_range_headers(request, start_range, end_range, start_range_required=True,
|
||||
end_range_required=True, check_content_md5=False, align_to_page=False):
|
||||
# If end range is provided, start range must be provided
|
||||
if start_range_required == True or end_range is not None:
|
||||
if start_range_required or end_range is not None:
|
||||
_validate_not_none('start_range', start_range)
|
||||
if end_range_required == True:
|
||||
if end_range_required:
|
||||
_validate_not_none('end_range', end_range)
|
||||
|
||||
# Page ranges must be 512 aligned
|
||||
if align_to_page == True:
|
||||
if align_to_page:
|
||||
if start_range is not None and start_range % 512 != 0:
|
||||
raise ValueError(_ERROR_PAGE_BLOB_START_ALIGNMENT)
|
||||
if end_range is not None and end_range % 512 != 511:
|
||||
|
@ -77,7 +77,7 @@ def _validate_and_format_range_headers(request, start_range, end_range, start_ra
|
|||
request.headers['x-ms-range'] = "bytes={0}-".format(start_range)
|
||||
|
||||
# Content MD5 can only be provided for a complete range less than 4MB in size
|
||||
if check_content_md5 == True:
|
||||
if check_content_md5:
|
||||
if start_range is None or end_range is None:
|
||||
raise ValueError(_ERROR_START_END_NEEDED_FOR_MD5)
|
||||
if end_range - start_range > 4 * 1024 * 1024:
|
||||
|
@ -104,7 +104,7 @@ def _convert_block_list_to_xml(block_id_list):
|
|||
if block_id_list is None:
|
||||
return ''
|
||||
|
||||
block_list_element = ETree.Element('BlockList');
|
||||
block_list_element = ETree.Element('BlockList')
|
||||
|
||||
# Enabled
|
||||
for block in block_id_list:
|
||||
|
|
|
@ -421,16 +421,16 @@ class _SubStream(IOBase):
|
|||
|
||||
def seek(self, offset, whence=0):
|
||||
if whence is SEEK_SET:
|
||||
startIndex = 0
|
||||
start_index = 0
|
||||
elif whence is SEEK_CUR:
|
||||
startIndex = self._position
|
||||
start_index = self._position
|
||||
elif whence is SEEK_END:
|
||||
startIndex = self._length
|
||||
start_index = self._length
|
||||
offset = - offset
|
||||
else:
|
||||
raise ValueError("Invalid argument for the 'whence' parameter.")
|
||||
|
||||
pos = startIndex + offset
|
||||
pos = start_index + offset
|
||||
|
||||
if pos > self._length:
|
||||
pos = self._length
|
||||
|
|
|
@ -757,11 +757,17 @@ class BaseBlobService(StorageClient):
|
|||
If specified, set_container_acl only succeeds if the
|
||||
container's lease is active and matches this ID.
|
||||
:param datetime if_modified_since:
|
||||
A DateTime value. Azure expects the date value passed in to be UTC.
|
||||
A datetime value. Azure expects the date value passed in to be UTC.
|
||||
If timezone is included, any non-UTC datetimes will be converted to UTC.
|
||||
If a date is passed in without timezone info, it is assumed to be UTC.
|
||||
Specify this header to perform the operation only
|
||||
if the resource has been modified since the specified time.
|
||||
if the resource has been modified since the specified date/time.
|
||||
:param datetime if_unmodified_since:
|
||||
A datetime value. Azure expects the date value passed in to be UTC.
|
||||
If timezone is included, any non-UTC datetimes will be converted to UTC.
|
||||
If a date is passed in without timezone info, it is assumed to be UTC.
|
||||
Specify this header to perform the operation only if
|
||||
the resource has not been modified since the specified date/time.
|
||||
:param int timeout:
|
||||
The timeout parameter is expressed in seconds.
|
||||
:return: ETag and last modified properties for the updated Container
|
||||
|
|
|
@ -1186,7 +1186,7 @@ class PageBlobService(BaseBlobService):
|
|||
The destination blob cannot be modified while a copy operation is in progress.
|
||||
|
||||
When copying from a page blob, the Blob service creates a destination page
|
||||
blob of the source blob’s length, initially containing all zeroes. Then
|
||||
blob of the source blob's length, initially containing all zeroes. Then
|
||||
the source page ranges are enumerated, and non-empty ranges are copied.
|
||||
|
||||
If the tier on the source blob is larger than the tier being passed to this
|
||||
|
@ -1315,7 +1315,7 @@ class PageBlobService(BaseBlobService):
|
|||
allows for encryption or other such special behavior because
|
||||
it is safely handled by the library. These behaviors are
|
||||
prohibited in the public version of this function.
|
||||
:param str _encryption_data:
|
||||
:param str encryption_data:
|
||||
The JSON formatted encryption metadata to upload as a part of the blob.
|
||||
This should only be passed internally from other methods and only applied
|
||||
when uploading entire blob contents immediately follows creation of the blob.
|
||||
|
|
|
@ -122,7 +122,7 @@ def _generate_encryption_data_dict(kek, cek, iv):
|
|||
Generates and returns the encryption metadata as a dict.
|
||||
|
||||
:param object kek: The key encryption key. See calling functions for more information.
|
||||
:param bytes cek: The conetent encryption key.
|
||||
:param bytes cek: The content encryption key.
|
||||
:param bytes iv: The initialization vector.
|
||||
:return: A dict containing all the encryption metadata.
|
||||
:rtype: dict
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
# limitations under the License.
|
||||
# --------------------------------------------------------------------------
|
||||
|
||||
|
||||
class HTTPError(Exception):
|
||||
'''
|
||||
Represents an HTTP Exception when response status code >= 300.
|
||||
|
|
|
@ -1,348 +0,0 @@
|
|||
# -------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# --------------------------------------------------------------------------
|
||||
import sys
|
||||
import uuid
|
||||
|
||||
from . import HTTPError, HTTPRequest, HTTPResponse
|
||||
from .httpclient import _HTTPClient
|
||||
from .._common_error import (
|
||||
_ERROR_CANNOT_FIND_PARTITION_KEY,
|
||||
_ERROR_CANNOT_FIND_ROW_KEY,
|
||||
_ERROR_INCORRECT_TABLE_IN_BATCH,
|
||||
_ERROR_INCORRECT_PARTITION_KEY_IN_BATCH,
|
||||
_ERROR_DUPLICATE_ROW_KEY_IN_BATCH,
|
||||
_ERROR_BATCH_COMMIT_FAIL,
|
||||
)
|
||||
from .._common_serialization import (
|
||||
ETree,
|
||||
url_unquote,
|
||||
_get_etree_text,
|
||||
_etree_entity_feed_namespaces,
|
||||
_update_request_uri_query,
|
||||
)
|
||||
from ..models import (
|
||||
AzureBatchOperationError,
|
||||
AzureBatchValidationError,
|
||||
)
|
||||
from ..table._serialization import (
|
||||
_update_storage_table_header,
|
||||
)
|
||||
|
||||
_DATASERVICES_NS = 'http://schemas.microsoft.com/ado/2007/08/dataservices'
|
||||
|
||||
if sys.version_info < (3,):
|
||||
def _new_boundary():
|
||||
return str(uuid.uuid1())
|
||||
else:
|
||||
def _new_boundary():
|
||||
return str(uuid.uuid1()).encode('utf-8')
|
||||
|
||||
|
||||
class _BatchClient(_HTTPClient):
|
||||
'''
|
||||
This is the class that is used for batch operation for storage table
|
||||
service. It only supports one changeset.
|
||||
'''
|
||||
|
||||
def __init__(self, service_instance, authentication,
|
||||
protocol='http', request_session=None, timeout=65, user_agent=''):
|
||||
_HTTPClient.__init__(self, service_instance, protocol=protocol, request_session=request_session,
|
||||
timeout=timeout, user_agent=user_agent)
|
||||
self.authentication = authentication
|
||||
self.is_batch = False
|
||||
self.batch_requests = []
|
||||
self.batch_table = ''
|
||||
self.batch_partition_key = ''
|
||||
self.batch_row_keys = []
|
||||
|
||||
def get_request_table(self, request):
|
||||
'''
|
||||
Extracts table name from request.uri. The request.uri has either
|
||||
"/mytable(...)" or "/mytable" format.
|
||||
|
||||
request:
|
||||
the request to insert, update or delete entity
|
||||
'''
|
||||
if '(' in request.path:
|
||||
pos = request.path.find('(')
|
||||
return request.path[1:pos]
|
||||
else:
|
||||
return request.path[1:]
|
||||
|
||||
def get_request_partition_key(self, request):
|
||||
'''
|
||||
Extracts PartitionKey from request.body if it is a POST request or from
|
||||
request.path if it is not a POST request. Only insert operation request
|
||||
is a POST request and the PartitionKey is in the request body.
|
||||
|
||||
request:
|
||||
the request to insert, update or delete entity
|
||||
'''
|
||||
if request.method == 'POST':
|
||||
doc = ETree.fromstring(request.body)
|
||||
part_key = doc.find('./atom:content/m:properties/d:PartitionKey', _etree_entity_feed_namespaces)
|
||||
if part_key is None:
|
||||
raise AzureBatchValidationError(_ERROR_CANNOT_FIND_PARTITION_KEY)
|
||||
return _get_etree_text(part_key)
|
||||
else:
|
||||
uri = url_unquote(request.path)
|
||||
pos1 = uri.find('PartitionKey=\'')
|
||||
pos2 = uri.find('\',', pos1)
|
||||
if pos1 == -1 or pos2 == -1:
|
||||
raise AzureBatchValidationError(_ERROR_CANNOT_FIND_PARTITION_KEY)
|
||||
return uri[pos1 + len('PartitionKey=\''):pos2]
|
||||
|
||||
def get_request_row_key(self, request):
|
||||
'''
|
||||
Extracts RowKey from request.body if it is a POST request or from
|
||||
request.path if it is not a POST request. Only insert operation request
|
||||
is a POST request and the Rowkey is in the request body.
|
||||
|
||||
request:
|
||||
the request to insert, update or delete entity
|
||||
'''
|
||||
if request.method == 'POST':
|
||||
doc = ETree.fromstring(request.body)
|
||||
row_key = doc.find('./atom:content/m:properties/d:RowKey', _etree_entity_feed_namespaces)
|
||||
if row_key is None:
|
||||
raise AzureBatchValidationError(_ERROR_CANNOT_FIND_ROW_KEY)
|
||||
return _get_etree_text(row_key)
|
||||
else:
|
||||
uri = url_unquote(request.path)
|
||||
pos1 = uri.find('RowKey=\'')
|
||||
pos2 = uri.find('\')', pos1)
|
||||
if pos1 == -1 or pos2 == -1:
|
||||
raise AzureBatchValidationError(_ERROR_CANNOT_FIND_ROW_KEY)
|
||||
row_key = uri[pos1 + len('RowKey=\''):pos2]
|
||||
return row_key
|
||||
|
||||
def validate_request_table(self, request):
|
||||
'''
|
||||
Validates that all requests have the same table name. Set the table
|
||||
name if it is the first request for the batch operation.
|
||||
|
||||
request:
|
||||
the request to insert, update or delete entity
|
||||
'''
|
||||
if self.batch_table:
|
||||
if self.get_request_table(request) != self.batch_table:
|
||||
raise AzureBatchValidationError(_ERROR_INCORRECT_TABLE_IN_BATCH)
|
||||
else:
|
||||
self.batch_table = self.get_request_table(request)
|
||||
|
||||
def validate_request_partition_key(self, request):
|
||||
'''
|
||||
Validates that all requests have the same PartitiionKey. Set the
|
||||
PartitionKey if it is the first request for the batch operation.
|
||||
|
||||
request:
|
||||
the request to insert, update or delete entity
|
||||
'''
|
||||
if self.batch_partition_key:
|
||||
if self.get_request_partition_key(request) != \
|
||||
self.batch_partition_key:
|
||||
raise AzureBatchValidationError(_ERROR_INCORRECT_PARTITION_KEY_IN_BATCH)
|
||||
else:
|
||||
self.batch_partition_key = self.get_request_partition_key(request)
|
||||
|
||||
def validate_request_row_key(self, request):
|
||||
'''
|
||||
Validates that all requests have the different RowKey and adds RowKey
|
||||
to existing RowKey list.
|
||||
|
||||
request:
|
||||
the request to insert, update or delete entity
|
||||
'''
|
||||
if self.batch_row_keys:
|
||||
if self.get_request_row_key(request) in self.batch_row_keys:
|
||||
raise AzureBatchValidationError(_ERROR_DUPLICATE_ROW_KEY_IN_BATCH)
|
||||
else:
|
||||
self.batch_row_keys.append(self.get_request_row_key(request))
|
||||
|
||||
def begin_batch(self):
|
||||
'''
|
||||
Starts the batch operation. Intializes the batch variables
|
||||
|
||||
is_batch:
|
||||
batch operation flag.
|
||||
batch_table:
|
||||
the table name of the batch operation
|
||||
batch_partition_key:
|
||||
the PartitionKey of the batch requests.
|
||||
batch_row_keys:
|
||||
the RowKey list of adding requests.
|
||||
batch_requests:
|
||||
the list of the requests.
|
||||
'''
|
||||
self.is_batch = True
|
||||
self.batch_table = ''
|
||||
self.batch_partition_key = ''
|
||||
self.batch_row_keys = []
|
||||
self.batch_requests = []
|
||||
|
||||
def insert_request_to_batch(self, request):
|
||||
'''
|
||||
Adds request to batch operation.
|
||||
|
||||
request:
|
||||
the request to insert, update or delete entity
|
||||
'''
|
||||
self.validate_request_table(request)
|
||||
self.validate_request_partition_key(request)
|
||||
self.validate_request_row_key(request)
|
||||
self.batch_requests.append(request)
|
||||
|
||||
def commit_batch(self):
|
||||
''' Resets batch flag and commits the batch requests. '''
|
||||
if self.is_batch:
|
||||
self.is_batch = False
|
||||
self.commit_batch_requests()
|
||||
|
||||
def commit_batch_requests(self):
|
||||
''' Commits the batch requests. '''
|
||||
|
||||
batch_boundary = b'batch_' + _new_boundary()
|
||||
changeset_boundary = b'changeset_' + _new_boundary()
|
||||
|
||||
# Commits batch only the requests list is not empty.
|
||||
if self.batch_requests:
|
||||
request = HTTPRequest()
|
||||
request.method = 'POST'
|
||||
request.host = self.batch_requests[0].host
|
||||
request.path = '/$batch'
|
||||
request.headers = [
|
||||
('Content-Type', 'multipart/mixed; boundary=' + \
|
||||
batch_boundary.decode('utf-8')),
|
||||
('Accept', 'application/atom+xml,application/xml'),
|
||||
('Accept-Charset', 'UTF-8')]
|
||||
|
||||
request.body = b'--' + batch_boundary + b'\n'
|
||||
request.body += b'Content-Type: multipart/mixed; boundary='
|
||||
request.body += changeset_boundary + b'\n\n'
|
||||
|
||||
content_id = 1
|
||||
|
||||
# Adds each request body to the POST data.
|
||||
for batch_request in self.batch_requests:
|
||||
request.body += b'--' + changeset_boundary + b'\n'
|
||||
request.body += b'Content-Type: application/http\n'
|
||||
request.body += b'Content-Transfer-Encoding: binary\n\n'
|
||||
request.body += batch_request.method.encode('utf-8')
|
||||
request.body += b' http://'
|
||||
request.body += batch_request.host.encode('utf-8')
|
||||
request.body += batch_request.path.encode('utf-8')
|
||||
request.body += b' HTTP/1.1\n'
|
||||
request.body += b'Content-ID: '
|
||||
request.body += str(content_id).encode('utf-8') + b'\n'
|
||||
content_id += 1
|
||||
|
||||
# Add different headers for different type requests.
|
||||
if not batch_request.method == 'DELETE':
|
||||
request.body += \
|
||||
b'Content-Type: application/atom+xml;type=entry\n'
|
||||
for name, value in batch_request.headers:
|
||||
if name == 'If-Match':
|
||||
request.body += name.encode('utf-8') + b': '
|
||||
request.body += value.encode('utf-8') + b'\n'
|
||||
break
|
||||
request.body += b'Content-Length: '
|
||||
request.body += str(len(batch_request.body)).encode('utf-8')
|
||||
request.body += b'\n\n'
|
||||
request.body += batch_request.body + b'\n'
|
||||
else:
|
||||
for name, value in batch_request.headers:
|
||||
# If-Match should be already included in
|
||||
# batch_request.headers, but in case it is missing,
|
||||
# just add it.
|
||||
if name == 'If-Match':
|
||||
request.body += name.encode('utf-8') + b': '
|
||||
request.body += value.encode('utf-8') + b'\n\n'
|
||||
break
|
||||
else:
|
||||
request.body += b'If-Match: *\n\n'
|
||||
|
||||
request.body += b'--' + changeset_boundary + b'--' + b'\n'
|
||||
request.body += b'--' + batch_boundary + b'--'
|
||||
|
||||
request.path, request.query = _update_request_uri_query(request)
|
||||
request.headers = _update_storage_table_header(request)
|
||||
self.authentication.sign_request(request)
|
||||
|
||||
# Submit the whole request as batch request.
|
||||
response = self.perform_request(request)
|
||||
if response.status >= 300:
|
||||
# This exception will be caught by the general error handler
|
||||
# and raised as an azure http exception
|
||||
raise HTTPError(response.status,
|
||||
_ERROR_BATCH_COMMIT_FAIL,
|
||||
self.respheader,
|
||||
response.body)
|
||||
|
||||
# http://www.odata.org/documentation/odata-version-2-0/batch-processing/
|
||||
# The body of a ChangeSet response is either a response for all the
|
||||
# successfully processed change request within the ChangeSet,
|
||||
# formatted exactly as it would have appeared outside of a batch,
|
||||
# or a single response indicating a failure of the entire ChangeSet.
|
||||
responses = self._parse_batch_response(response.body)
|
||||
if responses and responses[0].status >= 300:
|
||||
self._report_batch_error(responses[0])
|
||||
|
||||
def cancel_batch(self):
|
||||
''' Resets the batch flag. '''
|
||||
self.is_batch = False
|
||||
|
||||
def _parse_batch_response(self, body):
|
||||
parts = body.split(b'--changesetresponse_')
|
||||
|
||||
responses = []
|
||||
for part in parts:
|
||||
httpLocation = part.find(b'HTTP/')
|
||||
if httpLocation > 0:
|
||||
response = self._parse_batch_response_part(part[httpLocation:])
|
||||
responses.append(response)
|
||||
|
||||
return responses
|
||||
|
||||
def _parse_batch_response_part(self, part):
|
||||
lines = part.splitlines();
|
||||
|
||||
# First line is the HTTP status/reason
|
||||
status, _, reason = lines[0].partition(b' ')[2].partition(b' ')
|
||||
|
||||
# Followed by headers and body
|
||||
headers = []
|
||||
body = b''
|
||||
isBody = False
|
||||
for line in lines[1:]:
|
||||
if line == b'' and not isBody:
|
||||
isBody = True
|
||||
elif isBody:
|
||||
body += line
|
||||
else:
|
||||
headerName, _, headerVal = line.partition(b':')
|
||||
headers.append((headerName.lower(), headerVal))
|
||||
|
||||
return HTTPResponse(int(status), reason.strip(), headers, body)
|
||||
|
||||
def _report_batch_error(self, response):
|
||||
doc = ETree.fromstring(response.body)
|
||||
|
||||
code_element = doc.find('./m:code', _etree_entity_feed_namespaces)
|
||||
code = _get_etree_text(code_element) if code_element is not None else ''
|
||||
|
||||
message_element = doc.find('./m:message', _etree_entity_feed_namespaces)
|
||||
message = _get_etree_text(message_element) if message_element is not None else ''
|
||||
|
||||
raise AzureBatchOperationError(message, response.status, code)
|
|
@ -12,18 +12,11 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# --------------------------------------------------------------------------
|
||||
import sys
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if sys.version_info < (3,):
|
||||
from urllib2 import quote as url_quote
|
||||
else:
|
||||
pass
|
||||
|
||||
from . import HTTPError, HTTPResponse
|
||||
from . import HTTPResponse
|
||||
from .._serialization import _get_data_bytes_or_stream_only
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class _HTTPClient(object):
|
||||
|
@ -35,7 +28,7 @@ class _HTTPClient(object):
|
|||
'''
|
||||
:param str protocol:
|
||||
http or https.
|
||||
:param requests.Session request_session:
|
||||
:param requests.Session session:
|
||||
session object created with requests library (or compatible).
|
||||
:param int timeout:
|
||||
timeout for the http request, in seconds.
|
||||
|
@ -78,9 +71,8 @@ class _HTTPClient(object):
|
|||
else:
|
||||
proxy_string = '{}:{}'.format(host, port)
|
||||
|
||||
self.proxies = {}
|
||||
self.proxies['http'] = 'http://{}'.format(proxy_string)
|
||||
self.proxies['https'] = 'https://{}'.format(proxy_string)
|
||||
self.proxies = {'http': 'http://{}'.format(proxy_string),
|
||||
'https': 'https://{}'.format(proxy_string)}
|
||||
|
||||
def perform_request(self, request):
|
||||
'''
|
||||
|
|
|
@ -149,7 +149,7 @@ def _convert_signed_identifiers_to_xml(signed_identifiers):
|
|||
if signed_identifiers is None:
|
||||
return ''
|
||||
|
||||
sis = ETree.Element('SignedIdentifiers');
|
||||
sis = ETree.Element('SignedIdentifiers')
|
||||
for id, access_policy in signed_identifiers.items():
|
||||
# Root signed identifers element
|
||||
si = ETree.SubElement(sis, 'SignedIdentifier')
|
||||
|
@ -231,7 +231,7 @@ def _convert_service_properties_to_xml(logging, hour_metrics, minute_metrics, co
|
|||
</Cors>
|
||||
</StorageServiceProperties>
|
||||
'''
|
||||
service_properties_element = ETree.Element('StorageServiceProperties');
|
||||
service_properties_element = ETree.Element('StorageServiceProperties')
|
||||
|
||||
# Logging
|
||||
if logging:
|
||||
|
@ -342,10 +342,10 @@ def _len_plus(data):
|
|||
|
||||
# If the stream is seekable and tell() is implemented, calculate the stream size.
|
||||
try:
|
||||
currentPosition = data.tell()
|
||||
current_position = data.tell()
|
||||
data.seek(0, SEEK_END)
|
||||
length = data.tell() - currentPosition
|
||||
data.seek(currentPosition, SEEK_SET)
|
||||
length = data.tell() - current_position
|
||||
data.seek(current_position, SEEK_SET)
|
||||
except (AttributeError, UnsupportedOperation):
|
||||
pass
|
||||
|
||||
|
|
|
@ -50,27 +50,27 @@ class _Retry(object):
|
|||
:rtype: bool
|
||||
'''
|
||||
# If max attempts are reached, do not retry.
|
||||
if (context.count >= self.max_attempts):
|
||||
if context.count >= self.max_attempts:
|
||||
return False
|
||||
|
||||
status = None
|
||||
if context.response and context.response.status:
|
||||
status = context.response.status
|
||||
|
||||
if status == None:
|
||||
if status is None:
|
||||
'''
|
||||
If status is None, retry as this request triggered an exception. For
|
||||
example, network issues would trigger this.
|
||||
'''
|
||||
return True
|
||||
elif status >= 200 and status < 300:
|
||||
elif 200 <= status < 300:
|
||||
'''
|
||||
This method is called after a successful response, meaning we failed
|
||||
during the response body download or parsing. So, success codes should
|
||||
be retried.
|
||||
'''
|
||||
return True
|
||||
elif status >= 300 and status < 500:
|
||||
elif 300 <= status < 500:
|
||||
'''
|
||||
An exception occured, but in most cases it was expected. Examples could
|
||||
include a 309 Conflict or 412 Precondition Failed.
|
||||
|
|
|
@ -555,7 +555,7 @@ class _QueryStringConstants(object):
|
|||
SIGNED_SERVICES = 'ss'
|
||||
|
||||
|
||||
class _SharedAccessHelper():
|
||||
class _SharedAccessHelper(object):
|
||||
def __init__(self):
|
||||
self.query_dict = {}
|
||||
|
||||
|
|
|
@ -220,7 +220,7 @@ class StorageClient(object):
|
|||
_update_request(request)
|
||||
client_request_id_prefix = str.format("Client-Request-ID={0}", request.headers['x-ms-client-request-id'])
|
||||
|
||||
while (True):
|
||||
while True:
|
||||
try:
|
||||
try:
|
||||
# Execute the request callback
|
||||
|
|
|
@ -53,9 +53,9 @@ def _get_path(share_name=None, directory_name=None, file_name=None):
|
|||
def _validate_and_format_range_headers(request, start_range, end_range, start_range_required=True,
|
||||
end_range_required=True, check_content_md5=False):
|
||||
# If end range is provided, start range must be provided
|
||||
if start_range_required == True or end_range is not None:
|
||||
if start_range_required or end_range is not None:
|
||||
_validate_not_none('start_range', start_range)
|
||||
if end_range_required == True:
|
||||
if end_range_required:
|
||||
_validate_not_none('end_range', end_range)
|
||||
|
||||
# Format based on whether end_range is present
|
||||
|
@ -66,7 +66,7 @@ def _validate_and_format_range_headers(request, start_range, end_range, start_ra
|
|||
request.headers['x-ms-range'] = 'bytes={0}-'.format(start_range)
|
||||
|
||||
# Content MD5 can only be provided for a complete range less than 4MB in size
|
||||
if check_content_md5 == True:
|
||||
if check_content_md5:
|
||||
if start_range is None or end_range is None:
|
||||
raise ValueError(_ERROR_START_END_NEEDED_FOR_MD5)
|
||||
if end_range - start_range > 4 * 1024 * 1024:
|
||||
|
|
|
@ -41,6 +41,9 @@ from azure.storage.common._error import (
|
|||
_validate_not_none,
|
||||
_validate_key_encryption_key_wrap,
|
||||
)
|
||||
from ._error import (
|
||||
_ERROR_MESSAGE_NOT_ENCRYPTED
|
||||
)
|
||||
|
||||
|
||||
def _encrypt_queue_message(message, key_encryption_key):
|
||||
|
@ -83,11 +86,10 @@ def _encrypt_queue_message(message, key_encryption_key):
|
|||
encrypted_data = encryptor.update(padded_data) + encryptor.finalize()
|
||||
|
||||
# Build the dictionary structure.
|
||||
queue_message = {}
|
||||
queue_message['EncryptedMessageContents'] = _encode_base64(encrypted_data)
|
||||
queue_message['EncryptionData'] = _generate_encryption_data_dict(key_encryption_key,
|
||||
queue_message = {'EncryptedMessageContents': _encode_base64(encrypted_data),
|
||||
'EncryptionData': _generate_encryption_data_dict(key_encryption_key,
|
||||
content_encryption_key,
|
||||
initialization_vector)
|
||||
initialization_vector)}
|
||||
|
||||
return dumps(queue_message)
|
||||
|
||||
|
@ -115,7 +117,7 @@ def _decrypt_queue_message(message, require_encryption, key_encryption_key, reso
|
|||
|
||||
encryption_data = _dict_to_encryption_data(message['EncryptionData'])
|
||||
decoded_data = _decode_base64_to_bytes(message['EncryptedMessageContents'])
|
||||
except (KeyError, ValueError) as e:
|
||||
except (KeyError, ValueError):
|
||||
# Message was not json formatted and so was not encrypted
|
||||
# or the user provided a json formatted message.
|
||||
if require_encryption:
|
||||
|
@ -124,7 +126,7 @@ def _decrypt_queue_message(message, require_encryption, key_encryption_key, reso
|
|||
return message
|
||||
try:
|
||||
return _decrypt(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8')
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
raise AzureException(_ERROR_DECRYPTION_FAILURE)
|
||||
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ from azure.storage.common._error import (
|
|||
_ERROR_MESSAGE_SHOULD_BE_UNICODE = 'message should be of type unicode.'
|
||||
_ERROR_MESSAGE_SHOULD_BE_STR = 'message should be of type str.'
|
||||
_ERROR_MESSAGE_NOT_BASE64 = 'message is not a valid base64 value.'
|
||||
|
||||
_ERROR_MESSAGE_NOT_ENCRYPTED = 'Message was not encrypted.'
|
||||
|
||||
def _validate_message_type_text(param):
|
||||
if sys.version_info < (3,):
|
||||
|
|
|
@ -63,7 +63,7 @@ def _convert_queue_message_xml(message_text, encode_function, key_encryption_key
|
|||
<MessageText></MessageText>
|
||||
</QueueMessage>
|
||||
'''
|
||||
queue_message_element = ETree.Element('QueueMessage');
|
||||
queue_message_element = ETree.Element('QueueMessage')
|
||||
|
||||
# Enabled
|
||||
message_text = encode_function(message_text)
|
||||
|
|
|
@ -163,15 +163,14 @@ def _convert_json_to_entity(entry_element, property_resolver, encrypted_properti
|
|||
entity['Timestamp'] = _from_entity_datetime(timestamp)
|
||||
|
||||
for name, value in properties.items():
|
||||
mtype = edmtypes.get(name);
|
||||
mtype = edmtypes.get(name)
|
||||
|
||||
# use the property resolver if present
|
||||
if property_resolver:
|
||||
# Clients are not expected to resolve these interal fields.
|
||||
# This check avoids unexpected behavior from the user-defined
|
||||
# property resolver.
|
||||
if not (name == '_ClientEncryptionMetadata1' or \
|
||||
name == '_ClientEncryptionMetadata2'):
|
||||
if not (name == '_ClientEncryptionMetadata1' or name == '_ClientEncryptionMetadata2'):
|
||||
mtype = property_resolver(partition_key, row_key,
|
||||
name, value, mtype)
|
||||
|
||||
|
@ -315,7 +314,7 @@ def _parse_batch_response(response):
|
|||
|
||||
|
||||
def _parse_batch_response_part(part):
|
||||
lines = part.splitlines();
|
||||
lines = part.splitlines()
|
||||
|
||||
# First line is the HTTP status/reason
|
||||
status, _, reason = lines[0].partition(b' ')[2].partition(b' ')
|
||||
|
|
|
@ -20,6 +20,7 @@ from json import (
|
|||
loads,
|
||||
)
|
||||
|
||||
from azure.common import AzureException
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
from cryptography.hazmat.primitives.hashes import (
|
||||
Hash,
|
||||
|
@ -48,6 +49,7 @@ from azure.storage.common._error import (
|
|||
)
|
||||
from ._error import (
|
||||
_ERROR_UNSUPPORTED_TYPE_FOR_ENCRYPTION,
|
||||
_ERROR_ENTITY_NOT_ENCRYPTED
|
||||
)
|
||||
from .models import (
|
||||
Entity,
|
||||
|
@ -238,12 +240,12 @@ def _extract_encryption_metadata(entity, require_encryption, key_encryption_key,
|
|||
encrypted_properties_list = _decode_base64_to_bytes(entity['_ClientEncryptionMetadata2'])
|
||||
encryption_data = entity['_ClientEncryptionMetadata1']
|
||||
encryption_data = _dict_to_encryption_data(loads(encryption_data))
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
# Message did not have properly formatted encryption metadata.
|
||||
if require_encryption:
|
||||
raise ValueError(_ERROR_ENTITY_NOT_ENCRYPTED)
|
||||
else:
|
||||
return (None, None, None, None)
|
||||
return None, None, None, None
|
||||
|
||||
if not (encryption_data.encryption_agent.encryption_algorithm == _EncryptionAlgorithm.AES_CBC_256):
|
||||
raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM)
|
||||
|
@ -252,8 +254,8 @@ def _extract_encryption_metadata(entity, require_encryption, key_encryption_key,
|
|||
|
||||
# Special check for compatibility with Java V1 encryption protocol.
|
||||
isJavaV1 = (encryption_data.key_wrapping_metadata is None) or \
|
||||
((encryption_data.encryption_agent.protocol == _ENCRYPTION_PROTOCOL_V1) and \
|
||||
'EncryptionLibrary' in encryption_data.key_wrapping_metadata and \
|
||||
((encryption_data.encryption_agent.protocol == _ENCRYPTION_PROTOCOL_V1) and
|
||||
'EncryptionLibrary' in encryption_data.key_wrapping_metadata and
|
||||
'Java' in encryption_data.key_wrapping_metadata['EncryptionLibrary'])
|
||||
|
||||
metadataIV = _generate_property_iv(encryption_data.content_encryption_IV,
|
||||
|
@ -279,7 +281,7 @@ def _extract_encryption_metadata(entity, require_encryption, key_encryption_key,
|
|||
else:
|
||||
encrypted_properties_list = loads(encrypted_properties_list)
|
||||
|
||||
return (encryption_data.content_encryption_IV, encrypted_properties_list, content_encryption_key, isJavaV1)
|
||||
return encryption_data.content_encryption_IV, encrypted_properties_list, content_encryption_key, isJavaV1
|
||||
|
||||
|
||||
def _generate_property_iv(entity_iv, pk, rk, property_name, isJavaV1):
|
||||
|
|
|
@ -41,6 +41,7 @@ _ERROR_TOO_MANY_PROPERTIES = 'The entity contains more properties than allowed.'
|
|||
_ERROR_TYPE_NOT_SUPPORTED = 'Type not supported when sending data to the service: {0}.'
|
||||
_ERROR_VALUE_TOO_LARGE = '{0} is too large to be cast to type {1}.'
|
||||
_ERROR_UNSUPPORTED_TYPE_FOR_ENCRYPTION = 'Encryption is only supported for not None strings.'
|
||||
_ERROR_ENTITY_NOT_ENCRYPTED = 'Entity was not encrypted.'
|
||||
|
||||
|
||||
def _validate_object_has_param(param_name, object):
|
||||
|
@ -62,11 +63,11 @@ def _validate_entity(entity, encrypt=None):
|
|||
|
||||
# Two properties are added during encryption. Validate sufficient space
|
||||
max_properties = 255
|
||||
if (encrypt):
|
||||
if encrypt:
|
||||
max_properties = max_properties - 2
|
||||
|
||||
# Validate there are not more than 255 properties including Timestamp
|
||||
if (len(entity) > max_properties) or (len(entity) > (max_properties - 1) and not 'Timestamp' in entity):
|
||||
if (len(entity) > max_properties) or (len(entity) == max_properties and 'Timestamp' not in entity):
|
||||
raise ValueError(_ERROR_TOO_MANY_PROPERTIES)
|
||||
|
||||
# Validate the property names are not too long
|
||||
|
|
|
@ -78,7 +78,7 @@ def _insert_entity(entity, encryption_required=False,
|
|||
_DEFAULT_ACCEPT_HEADER[0]: _DEFAULT_ACCEPT_HEADER[1],
|
||||
_DEFAULT_PREFER_HEADER[0]: _DEFAULT_PREFER_HEADER[1]
|
||||
}
|
||||
if (key_encryption_key):
|
||||
if key_encryption_key:
|
||||
entity = _encrypt_entity(entity, key_encryption_key, encryption_resolver)
|
||||
request.body = _get_request_body(_convert_entity_to_json(entity))
|
||||
|
||||
|
@ -111,7 +111,7 @@ def _update_entity(entity, if_match, encryption_required=False,
|
|||
_DEFAULT_ACCEPT_HEADER[0]: _DEFAULT_ACCEPT_HEADER[1],
|
||||
'If-Match': _to_str(if_match),
|
||||
}
|
||||
if (key_encryption_key):
|
||||
if key_encryption_key:
|
||||
entity = _encrypt_entity(entity, key_encryption_key, encryption_resolver)
|
||||
request.body = _get_request_body(_convert_entity_to_json(entity))
|
||||
|
||||
|
@ -170,7 +170,7 @@ def _insert_or_replace_entity(entity, require_encryption=False,
|
|||
_DEFAULT_ACCEPT_HEADER[0]: _DEFAULT_ACCEPT_HEADER[1],
|
||||
}
|
||||
|
||||
if (key_encryption_key):
|
||||
if key_encryption_key:
|
||||
entity = _encrypt_entity(entity, key_encryption_key, encryption_resolver)
|
||||
request.body = _get_request_body(_convert_entity_to_json(entity))
|
||||
|
||||
|
@ -187,9 +187,6 @@ def _insert_or_merge_entity(entity, require_encryption=False, key_encryption_key
|
|||
wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
|
||||
get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
|
||||
get_kid()--returns a string key id for this key-encryption-key.
|
||||
:param function(partition_key, row_key, property_name) encryption_resolver:
|
||||
A function that takes in an entities partition key, row key, and property name and returns
|
||||
a boolean that indicates whether that property should be encrypted.
|
||||
'''
|
||||
_validate_entity(entity)
|
||||
_validate_encryption_unsupported(require_encryption, key_encryption_key)
|
||||
|
|
|
@ -227,10 +227,9 @@ def _convert_batch_to_json(batch_requests):
|
|||
batch_boundary = b'batch_' + _new_boundary()
|
||||
changeset_boundary = b'changeset_' + _new_boundary()
|
||||
|
||||
body = []
|
||||
body.append(b'--' + batch_boundary + b'\n')
|
||||
body.append(b'Content-Type: multipart/mixed; boundary=')
|
||||
body.append(changeset_boundary + b'\n\n')
|
||||
body = [b'--' + batch_boundary + b'\n',
|
||||
b'Content-Type: multipart/mixed; boundary=',
|
||||
changeset_boundary + b'\n\n']
|
||||
|
||||
content_id = 1
|
||||
|
||||
|
|
|
@ -673,10 +673,10 @@ class TableService(StorageClient):
|
|||
'''
|
||||
Returns a generator to list the entities in the table specified. The
|
||||
generator will lazily follow the continuation tokens returned by the
|
||||
service and stop when all entities have been returned or max_results is
|
||||
service and stop when all entities have been returned or num_results is
|
||||
reached.
|
||||
|
||||
If max_results is specified and the account has more than that number of
|
||||
If num_results is specified and the account has more than that number of
|
||||
entities, the generator will have a populated next_marker field once it
|
||||
finishes. This marker can be used to create a new generator if more
|
||||
results are desired.
|
||||
|
@ -747,16 +747,15 @@ class TableService(StorageClient):
|
|||
for more information on constructing filters.
|
||||
:param str select:
|
||||
Returns only the desired properties of an entity from the set.
|
||||
:param int top:
|
||||
:param int max_results:
|
||||
The maximum number of entities to return.
|
||||
:param marker:
|
||||
:param obj marker:
|
||||
A dictionary which identifies the portion of the query to be
|
||||
returned with the next query operation. The operation returns a
|
||||
next_marker element within the response body if the list returned
|
||||
was not complete. This value may then be used as a query parameter
|
||||
in a subsequent call to request the next portion of the list of
|
||||
table. The marker value is opaque to the client.
|
||||
:type marker: obj
|
||||
:param str accept:
|
||||
Specifies the accepted content type of the response payload. See
|
||||
:class:`~azure.storage.table.models.TablePayloadFormat` for possible
|
||||
|
|
|
@ -43,6 +43,9 @@ from azure.storage.common._error import (
|
|||
_ERROR_DECRYPTION_FAILURE,
|
||||
_ERROR_ENCRYPTION_REQUIRED,
|
||||
)
|
||||
from azure.storage.queue._error import (
|
||||
_ERROR_MESSAGE_NOT_ENCRYPTED,
|
||||
)
|
||||
from azure.storage.queue import (
|
||||
QueueService,
|
||||
)
|
||||
|
|
Загрузка…
Ссылка в новой задаче