{Storage-Preview} `az storage blob access/directory/move`: Remove deprecated command groups, remove preview tags for commands that have been GA in main repo (#8416)

* remove deprecated command groups `az storage blob access`, `az storage blob directory`, `az storage blob move` since the `az storage fs` command group has been GA since 2021

* `az storage account local-user` only mark some params in `create/update` as preview as the rest have been GA in main repo

* remove is_preview tags for params that have been GA already

* update version

* remove deprecated tests, also remove vendered sdk, client factory and help, lint

* remove CUSTOM_DATA_STORAGE_ADLS
This commit is contained in:
Zhiyi Huang 2025-01-15 12:35:49 +08:00 коммит произвёл GitHub
Родитель 5e4e7a4e7d
Коммит f5008af69e
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: B5690EEEBB952194
64 изменённых файлов: 31 добавлений и 32024 удалений

Просмотреть файл

@ -3,6 +3,11 @@
Release History
===============
1.0.0b5
+++++++
* `az storage blob access/directory/move`: Remove deprecated command groups since `az storage fs` command group has been GA since 2021
* `az storage account local-user` only mark some params in `create/update` as preview as the rest have been GA in main rep
1.0.0b4
+++++++
* `az storage account create/update`: Support `--enable-extended-groups`

Просмотреть файл

@ -8,7 +8,7 @@ from azure.cli.core.profiles import register_resource_type
from azure.cli.core.commands import AzCommandGroup, AzArgumentContext
import azext_storage_preview._help # pylint: disable=unused-import
from .profiles import CUSTOM_DATA_STORAGE, CUSTOM_MGMT_STORAGE, CUSTOM_DATA_STORAGE_ADLS, \
from .profiles import CUSTOM_DATA_STORAGE, CUSTOM_MGMT_STORAGE, \
CUSTOM_DATA_STORAGE_FILESHARE, CUSTOM_DATA_STORAGE_FILEDATALAKE, CUSTOM_DATA_STORAGE_BLOB
@ -17,7 +17,6 @@ class StorageCommandsLoader(AzCommandsLoader):
from azure.cli.core.commands import CliCommandType
register_resource_type('latest', CUSTOM_DATA_STORAGE, '2018-03-28')
register_resource_type('latest', CUSTOM_DATA_STORAGE_ADLS, '2019-02-02-preview')
register_resource_type('latest', CUSTOM_MGMT_STORAGE, '2023-05-01')
register_resource_type('latest', CUSTOM_DATA_STORAGE_FILESHARE, '2022-11-02')
register_resource_type('latest', CUSTOM_DATA_STORAGE_BLOB, '2022-11-02')

Просмотреть файл

@ -96,19 +96,6 @@ def blob_data_service_factory(cli_ctx, kwargs):
token_credential=kwargs.pop('token_credential', None))
def adls_blob_data_service_factory(cli_ctx, kwargs):
from .sdkutil import get_adls_blob_service_by_type
blob_type = kwargs.get('blob_type')
blob_service = get_adls_blob_service_by_type(cli_ctx, blob_type) or get_adls_blob_service_by_type(cli_ctx, 'block')
return generic_data_service_factory(cli_ctx, blob_service, kwargs.pop('account_name', None),
kwargs.pop('account_key', None),
connection_string=kwargs.pop('connection_string', None),
sas_token=kwargs.pop('sas_token', None),
socket_timeout=kwargs.pop('socket_timeout', None),
token_credential=kwargs.pop('token_credential', None))
def cloud_storage_account_service_factory(cli_ctx, kwargs):
t_cloud_storage_account = get_sdk(cli_ctx, CUSTOM_DATA_STORAGE, 'common#CloudStorageAccount')
account_name = kwargs.pop('account_name', None)

Просмотреть файл

@ -210,193 +210,6 @@ helps['storage azcopy run-command'] = """
short-summary: Run a command directly using the AzCopy CLI. Please use SAS tokens for authentication.
"""
helps['storage blob access'] = """
type: group
short-summary: Manage the access control properties of a blob when Hierarchical Namespace is enabled
"""
helps['storage blob access set'] = """
type: command
short-summary: Set the access control properties of a blob.
examples:
- name: Set the access control properties of a blob.
text: az storage blob access set -a "user::rwx,group::r--,other::---" -b MyBlob -c MyContainer --account-name MyStorageAccount
"""
helps['storage blob access show'] = """
type: command
short-summary: Show the access control properties of a blob.
examples:
- name: Show the access control properties of a blob.
text: az storage blob access show -b MyBlob -c MyContainer --account-name MyStorageAccount
"""
helps['storage blob access update'] = """
type: command
short-summary: Update the access control properties of a blob.
examples:
- name: Update the access permissions of a blob.
text: az storage blob access update --permissions "rwxrwxrwx" -b MyBlob -c MyContainer --account-name MyStorageAccount
- name: Update the owning user of a blob.
text: az storage blob access update --owner [entityId/UPN] -b MyBlob -c MyContainer --account-name MyStorageAccount
- name: Update the owning group of a blob.
text: az storage blob access update --group [entityId/UPN] -b MyBlob -c MyContainer --account-name MyStorageAccount
"""
helps['storage blob move'] = """
type: command
short-summary: Move a blob in a storage container.
examples:
- name: Move a blob in a storage container.
text: az storage blob move -c MyContainer -d DestinationBlobPath -s SourceBlobPath --account-name MyStorageAccount
"""
helps['storage blob directory'] = """
type: group
short-summary: Manage blob directories in storage account container.
long-summary: To use the directory commands, please make sure your storage account type is StorageV2.
"""
helps['storage blob directory access'] = """
type: group
short-summary: Manage the access control properties of a directory when Hierarchical Namespace is enabled
"""
helps['storage blob directory access set'] = """
type: command
short-summary: Set the access control properties of a directory.
examples:
- name: Set the access control properties of a directory.
text: az storage blob directory access set -a "user::rwx,group::r--,other::---" -d MyDirectoryPath -c MyContainer --account-name MyStorageAccount
"""
helps['storage blob directory access show'] = """
type: command
short-summary: Show the access control properties of a directory.
examples:
- name: Show the access control properties of a directory.
text: az storage blob directory access show -d MyDirectoryPath -c MyContainer --account-name MyStorageAccount
"""
helps['storage blob directory access update'] = """
type: command
short-summary: Update the access control properties of a directory.
examples:
- name: Update the access permissions of a directory.
text: az storage blob directory access update --permissions "rwxrwxrwx" -d MyDirectoryPath -c MyContainer --account-name MyStorageAccount
- name: Update the owning user of a directory.
text: az storage blob directory access update --owner [entityId/UPN] -d MyDirectoryPath -c MyContainer --account-name MyStorageAccount
- name: Update the owning group of a directory.
text: az storage blob directory access update --group [entityId/UPN] -d MyDirectoryPath -c MyContainer --account-name MyStorageAccount
"""
helps['storage blob directory create'] = """
type: command
short-summary: Create a storage blob directory in a storage container.
long-summary: Create a storage blob directory which can contain other directories or blobs in a storage container.
examples:
- name: Create a storage blob directory in a storage container.
text: az storage blob directory create -c MyContainer -d MyDirectoryPath --account-name MyStorageAccount
- name: Create a storage blob directory with permissions and umask.
text: az storage blob directory create -c MyContainer -d MyDirectoryPath --account-name MyStorageAccount --permissions rwxrwxrwx --umask 0000
"""
helps['storage blob directory delete'] = """
type: command
short-summary: Delete a storage blob directory in a storage container.
long-summary: >
This operation's behavior is different depending on whether Hierarchical Namespace
is enabled; if yes, then the delete operation can be atomic and instantaneous;
if not, the operation is performed in batches and a continuation token could be returned.
examples:
- name: Delete a storage blob directory in a storage container.
text: az storage blob directory delete -c MyContainer -d MyDirectoryPath --account-name MyStorageAccount
"""
helps['storage blob directory download'] = """
type: command
short-summary: Download blobs to a local file path.
examples:
- name: Download a single blob in a storage blob directory.
text: az storage blob directory download -c MyContainer --account-name MyStorageAccount -s "path/to/blob" -d "<local-path>"
- name: Download the entire directory in a storage container.
text: az storage blob directory download -c MyContainer --account-name MyStorageAccount -s SourceDirectoryPath -d "<local-path>" --recursive
- name: Download an entire subdirectory of a storage blob directory.
text: az storage blob directory download -c MyContainer --account-name MyStorageAccount -s "path/to/subdirectory" -d "<local-path>" --recursive
"""
helps['storage blob directory exists'] = """
type: command
short-summary: Check for the existence of a blob directory in a storage container.
examples:
- name: Check for the existence of a blob directory in a storage container.
text: az storage blob directory exists -c MyContainer -d MyDirectoryPath --account-name MyStorageAccount
"""
helps['storage blob directory list'] = """
type: command
short-summary: List blobs and blob subdirectories in a storage directory.
examples:
- name: List blobs and blob subdirectories in a storage directory.
text: az storage blob directory list -c MyContainer -d DestinationDirectoryPath --account-name MyStorageAccount
"""
helps['storage blob directory metadata'] = """
type: group
short-summary: Manage directory metadata.
"""
helps['storage blob directory metadata show'] = """
type: command
short-summary: Show all user-defined metadata for the specified blob directory.
examples:
- name: Show all user-defined metadata for the specified blob directory.
text: az storage blob directory metadata show -c MyContainer -d MyDirectoryPath --account-name MyStorageAccount
"""
helps['storage blob directory metadata update'] = """
type: command
short-summary: Set user-defined metadata for the specified blob directory as one or more name-value pairs.
examples:
- name: Set user-defined metadata for the specified blob directory as one or more name-value pairs.
text: az storage blob directory metadata update --metadata tag1=value1 -c MyContainer -d MyDirectoryPath --account-name MyStorageAccount
"""
helps['storage blob directory move'] = """
type: command
short-summary: Move a storage directory to another storage blob directory in a storage container.
long-summary: >
Move a storage directory and all its content (which can contain other directories or blobs) to another storage
blob directory in a storage container. This operation's behavior is different depending on whether Hierarchical
Namespace is enabled; if yes, the move operation is atomic and no marker is returned; if not, the operation is
performed in batches and a continuation token could be returned.
examples:
- name: Move a storage directory to another storage blob directory in a storage container.
text: az storage blob directory move -c MyContainer -d my-new-directory -s dir --account-name MyStorageAccount
- name: Move a storage subdirectory to another storage blob directory in a storage container.
text: az storage blob directory move -c MyContainer -d my-new-directory -s dir/subdirectory --account-name MyStorageAccount
"""
helps['storage blob directory show'] = """
type: command
short-summary: Show a storage blob directory properties in a storage container.
examples:
- name: Show a storage blob directory properties in a storage container.
text: az storage blob directory show -c MyContainer -d MyDirectoryPath --account-name MyStorageAccount
"""
helps['storage blob directory upload'] = """
type: command
short-summary: Upload blobs or subdirectories to a storage blob directory.
examples:
- name: Upload a single blob to a storage blob directory.
text: az storage blob directory upload -c MyContainer --account-name MyStorageAccount -s "path/to/file" -d directory
- name: Upload a local directory to a storage blob directory.
text: az storage blob directory upload -c MyContainer --account-name MyStorageAccount -s "path/to/directory" -d directory --recursive
- name: Upload a set of files in a local directory to a storage blob directory.
text: az storage blob directory upload -c MyContainer --account-name MyStorageAccount -s "path/to/file*" -d directory --recursive
"""
helps['storage file'] = """
type: group
short-summary: Manage file shares that use the SMB 3.0 protocol.

Просмотреть файл

@ -84,11 +84,10 @@ def load_arguments(self, _): # pylint: disable=too-many-locals, too-many-statem
help="Specify the security identifier (SID) for Azure Storage. "
"Required when --enable-files-adds is set to True")
sam_account_name_type = CLIArgumentType(min_api='2021-08-01', arg_group="Azure Active Directory Properties",
help="Specify the Active Directory SAMAccountName for Azure Storage.",
is_preview=True)
help="Specify the Active Directory SAMAccountName for Azure Storage.")
t_account_type = self.get_models('ActiveDirectoryPropertiesAccountType', resource_type=CUSTOM_MGMT_STORAGE)
account_type_type = CLIArgumentType(min_api='2021-08-01', arg_group="Azure Active Directory Properties",
arg_type=get_enum_type(t_account_type), is_preview=True,
arg_type=get_enum_type(t_account_type),
help="Specify the Active Directory account type for Azure Storage.")
t_routing_choice = self.get_models('RoutingChoice', resource_type=CUSTOM_MGMT_STORAGE)
routing_choice_type = CLIArgumentType(
@ -159,7 +158,7 @@ def load_arguments(self, _): # pylint: disable=too-many-locals, too-many-statem
resource_type=CUSTOM_MGMT_STORAGE
)
dns_endpoint_type_type = CLIArgumentType(
arg_type=get_enum_type(dns_endpoint_type_enum), is_preview=True,
arg_type=get_enum_type(dns_endpoint_type_enum),
options_list=['--dns-endpoint-type', '--endpoint'], min_api='2021-09-01',
help='Allow you to specify the type of endpoint. Set this to AzureDNSZone to create a large number of '
'accounts in a single subscription, which creates accounts in an Azure DNS Zone and the endpoint URL '
@ -228,9 +227,9 @@ def load_arguments(self, _): # pylint: disable=too-many-locals, too-many-statem
c.argument('custom_domain', help='User domain assigned to the storage account. Name is the CNAME source.')
c.argument('sku', help='The storage account SKU.', arg_type=get_enum_type(t_sku_name, default='standard_ragrs'))
c.argument('enable_sftp', arg_type=get_three_state_flag(), min_api='2021-08-01',
is_preview=True, help='Enable Secure File Transfer Protocol.')
help='Enable Secure File Transfer Protocol.')
c.argument('enable_local_user', arg_type=get_three_state_flag(), min_api='2021-08-01',
is_preview=True, help='Enable local user features.')
help='Enable local user features.')
c.argument('enable_files_aadds', aadds_type)
c.argument('enable_files_adds', adds_type)
c.argument('enable_files_aadkerb', aadkerb_type)
@ -286,7 +285,7 @@ def load_arguments(self, _): # pylint: disable=too-many-locals, too-many-statem
c.argument('sas_expiration_period', sas_expiration_period_type, is_preview=True)
c.argument('allow_cross_tenant_replication', allow_cross_tenant_replication_type)
c.argument('default_share_permission', default_share_permission_type)
c.argument('enable_nfs_v3', arg_type=get_three_state_flag(), is_preview=True, min_api='2021-01-01',
c.argument('enable_nfs_v3', arg_type=get_three_state_flag(), min_api='2021-01-01',
help='NFS 3.0 protocol support enabled if sets to true.')
c.argument('enable_alw', arg_type=get_three_state_flag(), min_api='2021-06-01',
help='The account level immutability property. The property is immutable and can only be set to true'
@ -315,7 +314,7 @@ def load_arguments(self, _): # pylint: disable=too-many-locals, too-many-statem
c.argument('public_network_access', arg_type=get_enum_type(public_network_access_enum), min_api='2021-06-01',
help='Enable or disable public network access to the storage account. '
'Possible values include: `Enabled` or `Disabled`.')
c.argument('enable_extended_groups', arg_type=get_three_state_flag(),
c.argument('enable_extended_groups', arg_type=get_three_state_flag(), is_preview=True,
help='Enable extended group support with local users feature, if set to true.')
with self.argument_context('storage account update', resource_type=CUSTOM_MGMT_STORAGE) as c:
@ -333,9 +332,9 @@ def load_arguments(self, _): # pylint: disable=too-many-locals, too-many-statem
arg_type=get_enum_type(['true', 'false']))
c.argument('tags', tags_type, default=None)
c.argument('enable_sftp', arg_type=get_three_state_flag(), min_api='2021-08-01',
is_preview=True, help='Enable Secure File Transfer Protocol.')
help='Enable Secure File Transfer Protocol.')
c.argument('enable_local_user', arg_type=get_three_state_flag(), min_api='2021-08-01',
is_preview=True, help='Enable local user features.')
help='Enable local user features.')
c.argument('enable_files_aadds', aadds_type)
c.argument('enable_files_adds', adds_type)
c.argument('enable_files_aadkerb', aadkerb_type)
@ -894,24 +893,24 @@ def load_arguments(self, _): # pylint: disable=too-many-locals, too-many-statem
for cmd in ['list-handle', 'close-handle']:
with self.argument_context('storage share ' + cmd) as c:
c.extra('disallow_trailing_dot', arg_type=get_three_state_flag(), default=False, is_preview=True,
c.extra('disallow_trailing_dot', arg_type=get_three_state_flag(), default=False,
help="If true, the trailing dot will be trimmed from the target URI. Default to False")
for cmd in ['create', 'delete', 'show', 'exists', 'metadata show', 'metadata update', 'list']:
with self.argument_context('storage directory ' + cmd) as c:
c.extra('disallow_trailing_dot', arg_type=get_three_state_flag(), default=False, is_preview=True,
c.extra('disallow_trailing_dot', arg_type=get_three_state_flag(), default=False,
help="If true, the trailing dot will be trimmed from the target URI. Default to False")
for cmd in ['list', 'delete', 'delete-batch', 'resize', 'url', 'generate-sas', 'show', 'update',
'exists', 'metadata show', 'metadata update', 'copy start', 'copy cancel', 'copy start-batch',
'upload', 'upload-batch', 'download', 'download-batch']:
with self.argument_context('storage file ' + cmd) as c:
c.extra('disallow_trailing_dot', arg_type=get_three_state_flag(), default=False, is_preview=True,
c.extra('disallow_trailing_dot', arg_type=get_three_state_flag(), default=False,
help="If true, the trailing dot will be trimmed from the target URI. Default to False")
for cmd in ['start', 'start-batch']:
with self.argument_context('storage file copy ' + cmd) as c:
c.extra('disallow_source_trailing_dot', arg_type=get_three_state_flag(), default=False, is_preview=True,
c.extra('disallow_source_trailing_dot', arg_type=get_three_state_flag(), default=False,
options_list=["--disallow-source-trailing-dot", "--disallow-src-trailing"],
help="If true, the trailing dot will be trimmed from the source URI. Default to False")
@ -947,15 +946,15 @@ def load_arguments(self, _): # pylint: disable=too-many-locals, too-many-statem
help='Indicates whether ssh key exists. Set it to false to remove existing SSH key.')
c.argument('has_ssh_password', arg_type=get_three_state_flag(),
help='Indicates whether ssh password exists. Set it to false to remove existing SSH password.')
c.argument('group_id',
c.argument('group_id', is_preview=True,
help='An identifier for associating a group of users.')
c.argument('allow_acl_authorization', options_list=['--allow-acl-authorization', '--allow-acl-auth'],
arg_type=get_three_state_flag(),
arg_type=get_three_state_flag(), is_preview=True,
help='Indicate whether ACL authorization is allowed for this user. '
'Set it to false to disallow using ACL authorization.')
c.argument('extended_groups', nargs='+',
c.argument('extended_groups', nargs='+', is_preview=True,
help='Supplementary group membership. Only applicable for local users enabled for NFSv3 access.')
with self.argument_context('storage account local-user create') as c:
c.argument('is_nfsv3_enabled', arg_type=get_three_state_flag(),
c.argument('is_nfsv3_enabled', arg_type=get_three_state_flag(), is_preview=True,
help='Indicate if the local user is enabled for access with NFSv3 protocol.')

Просмотреть файл

@ -5,9 +5,9 @@
from azure.cli.core.commands import CliCommandType
from azure.cli.core.commands.arm import show_exception_handler
from ._client_factory import (cf_sa, blob_data_service_factory, adls_blob_data_service_factory,
from ._client_factory import (cf_sa, blob_data_service_factory,
cf_share_client, cf_share_file_client, cf_share_directory_client, cf_local_users)
from .profiles import (CUSTOM_DATA_STORAGE, CUSTOM_DATA_STORAGE_ADLS, CUSTOM_MGMT_STORAGE,
from .profiles import (CUSTOM_DATA_STORAGE, CUSTOM_MGMT_STORAGE,
CUSTOM_DATA_STORAGE_FILESHARE)
@ -57,85 +57,14 @@ def load_command_table(self, _): # pylint: disable=too-many-locals, too-many-st
with self.command_group('storage azcopy', custom_command_type=get_custom_sdk('azcopy', None)) as g:
g.custom_command('run-command', 'storage_run_command', validator=lambda namespace: None)
# pylint: disable=line-too-long
adls_base_blob_sdk = CliCommandType(
operations_tmpl='azext_storage_preview.vendored_sdks.azure_adls_storage_preview.blob.baseblobservice'
'#BaseBlobService.{}',
client_factory=adls_blob_data_service_factory,
resource_type=CUSTOM_DATA_STORAGE_ADLS)
def _adls_deprecate_message(self):
msg = "This {} has been deprecated and will be removed in future release.".format(self.object_type)
msg += " Use '{}' instead.".format(self.redirect)
msg += " For more information go to"
msg += " https://github.com/Azure/azure-cli/blob/dev/src/azure-cli/azure/cli/command_modules/storage/docs/ADLS%20Gen2.md"
return msg
# New Blob Commands
with self.command_group('storage blob', command_type=adls_base_blob_sdk,
custom_command_type=get_custom_sdk('blob', adls_blob_data_service_factory,
CUSTOM_DATA_STORAGE_ADLS),
resource_type=CUSTOM_DATA_STORAGE_ADLS) as g:
g.storage_command_oauth('move', 'rename_path', is_preview=True,
deprecate_info=self.deprecate(redirect="az storage fs file move", hide=True,
message_func=_adls_deprecate_message))
with self.command_group('storage blob access', command_type=adls_base_blob_sdk,
custom_command_type=get_custom_sdk('blob', adls_blob_data_service_factory,
CUSTOM_DATA_STORAGE_ADLS),
resource_type=CUSTOM_DATA_STORAGE_ADLS,
deprecate_info=self.deprecate(redirect="az storage fs access", hide=True,
message_func=_adls_deprecate_message)) as g:
g.storage_command_oauth('set', 'set_path_access_control')
g.storage_command_oauth('update', 'set_path_access_control')
g.storage_command_oauth('show', 'get_path_access_control')
# TODO: Remove them after deprecate for two sprints
# Blob directory Commands Group
with self.command_group('storage blob directory', command_type=adls_base_blob_sdk,
custom_command_type=get_custom_sdk('blob', adls_blob_data_service_factory,
CUSTOM_DATA_STORAGE_ADLS),
resource_type=CUSTOM_DATA_STORAGE_ADLS, is_preview=True) as g:
from ._format import transform_blob_output
from ._transformers import (transform_storage_list_output, create_boolean_result_output_transformer)
g.storage_command_oauth('create', 'create_directory')
g.storage_command_oauth('delete', 'delete_directory')
g.storage_custom_command_oauth('move', 'rename_directory')
g.storage_custom_command_oauth('show', 'show_directory', table_transformer=transform_blob_output,
exception_handler=show_exception_handler)
g.storage_custom_command_oauth('list', 'list_directory', transform=transform_storage_list_output,
table_transformer=transform_blob_output)
g.storage_command_oauth('exists', 'exists', transform=create_boolean_result_output_transformer('exists'))
g.storage_command_oauth(
'metadata show', 'get_blob_metadata', exception_handler=show_exception_handler)
g.storage_command_oauth('metadata update', 'set_blob_metadata')
with self.command_group('storage blob directory', is_preview=True,
custom_command_type=get_custom_sdk('azcopy', adls_blob_data_service_factory,
CUSTOM_DATA_STORAGE_ADLS))as g:
g.storage_custom_command_oauth('upload', 'storage_blob_upload')
g.storage_custom_command_oauth('download', 'storage_blob_download')
with self.command_group('storage blob directory access', command_type=adls_base_blob_sdk, is_preview=True,
custom_command_type=get_custom_sdk('blob', adls_blob_data_service_factory,
CUSTOM_DATA_STORAGE_ADLS),
resource_type=CUSTOM_DATA_STORAGE_ADLS) as g:
g.storage_command_oauth('set', 'set_path_access_control')
g.storage_command_oauth('update', 'set_path_access_control')
g.storage_command_oauth('show', 'get_path_access_control')
with self.command_group('storage blob directory',
deprecate_info=self.deprecate(redirect="az storage fs directory", hide=True,
message_func=_adls_deprecate_message)) as g:
pass
share_client_sdk = CliCommandType(
operations_tmpl='azure.multiapi.storagev2.fileshare._share_client#ShareClient.{}',
client_factory=cf_share_client,
resource_type=CUSTOM_DATA_STORAGE_FILESHARE)
directory_client_sdk = CliCommandType(
operations_tmpl='azext_storage_preview.vendored_sdks.azure_storagev2.fileshare._directory_client#ShareDirectoryClient.{}',
operations_tmpl='azext_storage_preview.vendored_sdks.azure_storagev2.fileshare._directory_client'
'#ShareDirectoryClient.{}',
client_factory=cf_share_directory_client,
resource_type=CUSTOM_DATA_STORAGE_FILESHARE)
@ -155,7 +84,7 @@ def load_command_table(self, _): # pylint: disable=too-many-locals, too-many-st
with self.command_group('storage directory', command_type=directory_client_sdk,
resource_type=CUSTOM_DATA_STORAGE_FILESHARE,
custom_command_type=get_custom_sdk('directory', cf_share_directory_client)) as g:
from ._transformers import transform_share_directory_json_output
from ._transformers import transform_share_directory_json_output, create_boolean_result_output_transformer
from ._format import transform_file_directory_result, transform_file_output, transform_boolean_for_table
g.storage_custom_command_oauth('create', 'create_directory',
transform=create_boolean_result_output_transformer('created'),
@ -229,7 +158,7 @@ def load_command_table(self, _): # pylint: disable=too-many-locals, too-many-st
with self.command_group('storage account local-user', local_users_sdk,
custom_command_type=local_users_custom_type,
resource_type=CUSTOM_MGMT_STORAGE, min_api='2021-08-01', is_preview=True) as g:
resource_type=CUSTOM_MGMT_STORAGE, min_api='2021-08-01') as g:
g.custom_command('create', 'create_local_user')
g.custom_command('update', 'update_local_user')
g.command('delete', 'delete')

Просмотреть файл

@ -7,7 +7,6 @@ from azure.cli.core.profiles import CustomResourceType
CUSTOM_DATA_STORAGE = CustomResourceType('azext_storage_preview.vendored_sdks.azure_storage', None)
CUSTOM_DATA_STORAGE_ADLS = CustomResourceType('azext_storage_preview.vendored_sdks.azure_adls_storage_preview', None)
CUSTOM_MGMT_STORAGE = CustomResourceType('azext_storage_preview.vendored_sdks.azure_mgmt_storage',
'StorageManagementClient')
CUSTOM_DATA_STORAGE_FILESHARE = CustomResourceType('azext_storage_preview.vendored_sdks.azure_storagev2.fileshare',

Просмотреть файл

@ -7,7 +7,7 @@
from azure.cli.core.profiles import get_sdk, supported_api_version, ResourceType
from azure.cli.core.profiles._shared import APIVersionException
from .profiles import CUSTOM_DATA_STORAGE, CUSTOM_DATA_STORAGE_ADLS
from .profiles import CUSTOM_DATA_STORAGE
def cosmosdb_table_exists(cli_ctx):
@ -37,19 +37,6 @@ def get_blob_service_by_type(cli_ctx, blob_type):
return None
def get_adls_blob_service_by_type(cli_ctx, blob_type):
type_to_service = {
'block': lambda ctx: get_sdk(ctx, CUSTOM_DATA_STORAGE_ADLS, 'BlockBlobService', mod='blob'),
'page': lambda ctx: get_sdk(ctx, CUSTOM_DATA_STORAGE_ADLS, 'PageBlobService', mod='blob'),
'append': lambda ctx: get_sdk(ctx, CUSTOM_DATA_STORAGE_ADLS, 'AppendBlobService', mod='blob')
}
try:
return type_to_service[blob_type](cli_ctx)
except KeyError:
return None
def get_blob_types():
return 'block', 'page', 'append'

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,320 +0,0 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import unittest
import time
from azure.cli.testsdk import (LiveScenarioTest, ResourceGroupPreparer, ScenarioTest,
JMESPathCheck, api_version_constraint, StorageAccountPreparer)
from .storage_test_util import StorageScenarioMixin, StorageTestFilesPreparer
from ...profiles import CUSTOM_MGMT_STORAGE, CUSTOM_DATA_STORAGE_FILEDATALAKE
class StorageADLSTests(StorageScenarioMixin, ScenarioTest):
@api_version_constraint(CUSTOM_MGMT_STORAGE, min_api='2018-02-01')
@ResourceGroupPreparer()
@StorageAccountPreparer(name_prefix='clitestaldsaccount', kind='StorageV2', hns=True)
def test_storage_adls_blob(self, resource_group, storage_account_info):
account_info = storage_account_info
self.kwargs.update({
'sc': account_info[0],
'rg': resource_group
})
container = self.create_container(account_info)
directory = 'testdirectory'
# Create a storage blob directory and check its existence
self.storage_cmd('storage blob directory exists -c {} -d {}', account_info, container, directory) \
.assert_with_checks(JMESPathCheck('exists', False))
self.storage_cmd('storage blob directory create -c {} -d {}', account_info, container, directory)
self.storage_cmd('storage blob directory exists -c {} -d {} ', account_info, container, directory)\
.assert_with_checks(JMESPathCheck('exists', True))
self.storage_cmd('storage fs file list -f {}', account_info, container) \
.assert_with_checks(JMESPathCheck('length(@)', 1)) \
.assert_with_checks(JMESPathCheck('[0].isDirectory', True))
self.storage_cmd('storage blob directory show -c {} -d {} ', account_info, container, directory) \
.assert_with_checks(JMESPathCheck('metadata.hdi_isfolder', "true"))
self.storage_cmd('storage blob directory access show -c {} -d {}', account_info, container, directory) \
.assert_with_checks(JMESPathCheck('permissions', "rwxr-x---"))
# Argument validation: Throw error when using existing directory name
with self.assertRaises(SystemExit):
self.storage_cmd('storage blob directory create -c {} -d {}', account_info, container, directory)
# Create a storage blob directory with permissions
directory2 = 'testdirectory2'
self.storage_cmd('storage blob directory create -c {} -d {} --permissions rwxrwxrwx --umask 0000',
account_info, container, directory2)
self.storage_cmd('storage blob directory show -c {} -d {} ', account_info, container, directory2) \
.assert_with_checks(JMESPathCheck('metadata.hdi_isfolder', "true"))
self.storage_cmd('storage blob directory access show -c {} -d {}', account_info, container, directory2) \
.assert_with_checks(JMESPathCheck('permissions', "rwxrwxrwx"))
# Storage blob access control
local_file = self.create_temp_file(128)
blob = self.create_random_name('blob', 24)
self.storage_cmd('storage blob upload -c {} -f "{}" -n {}', account_info, container, local_file, blob)
acl = "user::rwx,group::r--,other::---"
self.storage_cmd('storage blob access set -c {} -b {} -a "{}"', account_info, container, blob, acl)
self.storage_cmd('storage blob access show -c {} -b {}', account_info, container, blob) \
.assert_with_checks(JMESPathCheck('acl', acl))
self.storage_cmd('storage blob access update -c {} -b {} --permissions "rwxrwxrwx"', account_info,
container, blob, acl)
self.storage_cmd('storage blob access show -c {} -b {}', account_info, container, blob)\
.assert_with_checks(JMESPathCheck('permissions', "rwxrwxrwx"))
# Storage blob directory access control
acl = "user::rwx,group::r--,other::---"
self.storage_cmd('storage blob directory access set -c {} -d {} -a "{}"', account_info, container, directory, acl)
self.storage_cmd('storage blob directory access show -c {} -d {}', account_info, container, directory) \
.assert_with_checks(JMESPathCheck('acl', acl))
self.storage_cmd('storage blob directory access update -c {} -d {} --permissions "rwxrwxrwx"', account_info,
container, directory, acl)
self.storage_cmd('storage blob directory access show -c {} -d {}', account_info, container,
directory).assert_with_checks(JMESPathCheck('permissions', "rwxrwxrwx"))
# Storage blob directory metadata
self.storage_cmd('storage blob directory metadata update -c {} -d {} --metadata "tag1=value1"', account_info,
container, directory)
self.storage_cmd('storage blob directory metadata show -c {} -d {} ', account_info, container, directory) \
.assert_with_checks(JMESPathCheck('tag1', "value1"))
# Remove blob directory
self.storage_cmd('storage blob directory delete -c {} -d {} --recursive', account_info,
container, directory, directory)
self.storage_cmd('storage blob directory exists -c {} -d {}', account_info, container, directory) \
.assert_with_checks(JMESPathCheck('exists', False))
class StorageADLSDirectoryMoveTests(StorageScenarioMixin, LiveScenarioTest):
@api_version_constraint(CUSTOM_MGMT_STORAGE, min_api='2018-02-01')
@StorageTestFilesPreparer()
@ResourceGroupPreparer()
def test_storage_adls_blob_directory_move(self, resource_group, test_dir):
storage_account = self.create_random_name(prefix='clitestaldsaccount', length=24)
self.kwargs.update({
'sc': storage_account,
'rg': resource_group
})
self.cmd('storage account create -n {sc} -g {rg} -l centralus --kind StorageV2 --hierarchical-namespace true '
' --https-only')
account_info = self.get_account_info(resource_group, storage_account)
container = self.create_container(account_info)
directory = 'dir'
des_directory = 'dir1'
self.storage_cmd('storage blob directory create -c {} -d {}', account_info, container, directory)
self.storage_cmd('storage blob directory upload -c {} -d {} -s "{}" --recursive', account_info, container,
directory, os.path.join(test_dir, 'apple'))
# Move from a directory to a nonexistent directory
self.storage_cmd('storage blob directory exists -c {} -d {} ', account_info, container, des_directory) \
.assert_with_checks(JMESPathCheck('exists', False))
self.storage_cmd('storage blob directory move -c {} -d {} -s {}', account_info,
container, des_directory, directory)
self.storage_cmd('storage blob directory exists -c {} -d {} ', account_info, container, directory) \
.assert_with_checks(JMESPathCheck('exists', False))
self.storage_cmd('storage blob directory exists -c {} -d {} ', account_info, container, des_directory) \
.assert_with_checks(JMESPathCheck('exists', True))
self.storage_cmd('storage blob directory list -c {} -d {}', account_info, container, des_directory) \
.assert_with_checks(JMESPathCheck('length(@)', 11))
# Test directory name contains Spaces
contain_space_dir = 'test move directory'
# Move directory to contain_space_dir
self.storage_cmd('storage blob directory exists -c "{}" -d "{}"', account_info, container, des_directory) \
.assert_with_checks(JMESPathCheck('exists', True))
self.storage_cmd('storage blob directory exists -c "{}" -d "{}"', account_info, container, contain_space_dir) \
.assert_with_checks(JMESPathCheck('exists', False))
self.storage_cmd('storage blob directory move -c "{}" -d "{}" -s "{}"', account_info, container,
contain_space_dir, des_directory)
self.storage_cmd('storage blob directory exists -c "{}" -d "{}"', account_info, container, contain_space_dir) \
.assert_with_checks(JMESPathCheck('exists', True))
self.storage_cmd('storage blob directory exists -c "{}" -d "{}"', account_info, container, des_directory) \
.assert_with_checks(JMESPathCheck('exists', False))
# Move contain_space_dir back to directory
self.storage_cmd('storage blob directory move -c "{}" -d "{}" -s "{}"', account_info, container,
des_directory, contain_space_dir)
self.storage_cmd('storage blob directory exists -c "{}" -d "{}"', account_info, container, des_directory) \
.assert_with_checks(JMESPathCheck('exists', True))
self.storage_cmd('storage blob directory exists -c "{}" -d "{}"', account_info, container, contain_space_dir) \
.assert_with_checks(JMESPathCheck('exists', False))
# Move from a directory to a existing empty directory
directory2 = 'dir2'
self.storage_cmd('storage blob directory create -c {} -d {}', account_info, container, directory2)
self.storage_cmd('storage blob directory exists -c {} -d {} ', account_info, container, des_directory) \
.assert_with_checks(JMESPathCheck('exists', True))
self.storage_cmd('storage blob directory move -c {} -d {} -s {}', account_info,
container, directory2, des_directory)
self.storage_cmd('storage blob directory exists -c {} -d {} ', account_info, container, des_directory) \
.assert_with_checks(JMESPathCheck('exists', False))
self.storage_cmd('storage blob directory list -c {} -d {}', account_info, container, directory2) \
.assert_with_checks(JMESPathCheck('length(@)', 12))
# Move from a directory to a existing nonempty directory with mode "legacy"
directory3 = 'dir3'
self.storage_cmd('storage blob directory create -c {} -d {}', account_info, container, directory3)
self.storage_cmd('storage blob directory upload -c {} -d {} -s "{}"', account_info, container, directory3,
os.path.join(test_dir, 'readme'))
self.cmd('storage blob directory move -c {} -d {} -s {} --account-name {} --move-mode legacy'.format(
container, directory3, directory2, storage_account), expect_failure=True)
# Move from a directory to a existing nonempty directory with mode "posix"
self.storage_cmd('storage blob directory move -c {} -d {} -s {} --move-mode posix', account_info,
container, directory3, directory2)
self.storage_cmd('storage blob directory exists -c {} -d {}', account_info, container,
'/'.join([directory3, directory2])) \
.assert_with_checks(JMESPathCheck('exists', True))
# Move from a subdirectory to a new directory with mode "posix"
directory4 = "dir4"
self.storage_cmd('storage blob directory move -c {} -d {} -s {} --move-mode posix', account_info,
container, directory4, '/'.join([directory3, directory2]))
self.storage_cmd('storage blob directory list -c {} -d {}', account_info, container, directory4) \
.assert_with_checks(JMESPathCheck('length(@)', 12))
# Argument validation: Throw error when source path is blob name
with self.assertRaises(SystemExit):
self.storage_cmd('storage blob directory move -c {} -d {} -s {}', account_info,
container, directory4, '/'.join([directory3, 'readme']))
class StorageADLSMoveTests(StorageScenarioMixin, ScenarioTest):
@api_version_constraint(CUSTOM_MGMT_STORAGE, min_api='2018-02-01')
@ResourceGroupPreparer(location="centralus")
@StorageAccountPreparer(name_prefix='clitestaldsaccount', kind='StorageV2', hns=True, location='centralus')
def test_storage_adls_blob_move(self, resource_group, storage_account_info):
account_info = storage_account_info
self.kwargs.update({
'sc': account_info[0],
'rg': resource_group
})
container = self.create_container(account_info)
directory = 'dir'
des_directory = 'dir1'
local_file = self.create_temp_file(128)
blob = self.create_random_name('blob', 24)
self.storage_cmd('storage blob directory create -c {} -d {}', account_info, container, directory)
self.storage_cmd('storage blob upload -c {} -f "{}" -n {}', account_info, container, local_file,
'/'.join([directory, blob]))
self.storage_cmd('storage blob directory create -c {} -d {}', account_info, container, des_directory)
# Move a blob between different directory in a container
self.storage_cmd('storage blob move -c {} -d {} -s {}', account_info,
container, '/'.join([des_directory, blob]), '/'.join([directory, blob]))
self.storage_cmd('storage blob directory list -c {} -d {}', account_info, container, des_directory) \
.assert_with_checks(JMESPathCheck('length(@)', 1))
self.storage_cmd('storage blob directory list -c {} -d {}', account_info, container, directory) \
.assert_with_checks(JMESPathCheck('length(@)', 0))
# Move a blob in a directory
new_blob = self.create_random_name('blob', 24)
self.storage_cmd('storage blob move -c {} -d {} -s {}', account_info,
container, '/'.join([des_directory, new_blob]), '/'.join([des_directory, blob]))
self.storage_cmd('storage blob directory list -c {} -d {}', account_info, container, des_directory) \
.assert_with_checks(JMESPathCheck('[0].name', '/'.join([des_directory, new_blob])))
with self.assertRaises(SystemExit):
self.storage_cmd('storage blob move -c {} -d {} -s {}', account_info,
container, blob, des_directory)
class StorageADLSDirectoryUploadTests(StorageScenarioMixin, LiveScenarioTest):
@api_version_constraint(CUSTOM_MGMT_STORAGE, min_api='2018-02-01')
@StorageTestFilesPreparer()
@ResourceGroupPreparer()
def test_storage_adls_blob_directory_upload(self, resource_group, test_dir):
storage_account = self.create_random_name(prefix='clitestaldsaccount', length=24)
self.kwargs.update({
'sc': storage_account,
'rg': resource_group
})
self.cmd('storage account create -n {sc} -g {rg} --kind StorageV2 --hierarchical-namespace true --https-only')
account_info = self.get_account_info(resource_group, storage_account)
container = self.create_container(account_info)
directory = 'dir'
self.storage_cmd('storage blob directory create -c {} -d {}', account_info, container, directory)
# Upload a single blob to the blob directory
self.storage_cmd('storage blob directory upload -c {} -d {} -s "{}"', account_info, container, directory,
os.path.join(test_dir, 'readme'))
self.storage_cmd('storage blob directory list -c {} -d {}', account_info, container, directory) \
.assert_with_checks(JMESPathCheck('length(@)', 1))
# Upload a local directory to the blob directory
self.storage_cmd('storage blob directory upload -c {} -d {} -s "{}" --recursive', account_info, container,
directory, os.path.join(test_dir, 'apple'))
self.storage_cmd('storage blob directory list -c {} -d {}', account_info, container, directory) \
.assert_with_checks(JMESPathCheck('length(@)', 12))
self.storage_cmd('storage blob directory list -c {} -d {} --num-results 9', account_info, container, directory) \
.assert_with_checks(JMESPathCheck('length(@)', 9))
# Upload files in a local directory to the blob directory
self.storage_cmd('storage blob directory upload -c {} -d {} -s "{}" --recursive', account_info, container,
directory, os.path.join(test_dir, 'butter/file_*'))
self.storage_cmd('storage blob directory list -c {} -d {}', account_info, container, directory) \
.assert_with_checks(JMESPathCheck('length(@)', 22))
# Upload files in a local directory to the blob directory
self.storage_cmd('storage blob directory upload -c {} -d {} -s "{}" --recursive', account_info, container,
directory, os.path.join(test_dir, 'butter/file_*'))
# Upload files in a local directory to the blob subdirectory
self.storage_cmd('storage blob directory upload -c {} -d {} -s "{}" --recursive', account_info, container,
'/'.join([directory, 'subdir']), os.path.join(test_dir, 'butter/file_*'))
self.storage_cmd('storage blob directory list -c {} -d {}', account_info, container, '/'.join([directory, 'subdir'])) \
.assert_with_checks(JMESPathCheck('length(@)', 10))
# Argument validation: Throw error when source path is blob name
with self.assertRaises(SystemExit):
self.cmd('storage blob directory upload -c {} -d {} -s {} --account-name {}'.format(
container, '/'.join([directory, 'readme']), test_dir, storage_account))
class StorageADLSDirectoryDownloadTests(StorageScenarioMixin, LiveScenarioTest):
@api_version_constraint(CUSTOM_MGMT_STORAGE, min_api='2018-02-01')
@StorageTestFilesPreparer()
@ResourceGroupPreparer()
def test_storage_adls_blob_directory_download(self, resource_group, test_dir):
storage_account = self.create_random_name(prefix='clitestaldsaccount', length=24)
self.kwargs.update({
'sc': storage_account,
'rg': resource_group
})
self.cmd('storage account create -n {sc} -g {rg} --kind StorageV2 --hierarchical-namespace true --https-only ')
account_info = self.get_account_info(resource_group, storage_account)
container = self.create_container(account_info)
directory = 'dir'
self.storage_cmd('storage blob directory upload -c {} -d {} -s "{}" --recursive', account_info, container,
directory, os.path.join(test_dir, 'readme'))
self.storage_cmd('storage blob directory upload -c {} -d {} -s "{}" --recursive', account_info, container,
directory, os.path.join(test_dir, 'apple'))
local_folder = self.create_temp_dir()
# Download a single file
self.storage_cmd('storage blob directory download -c {} -s "{}" -d "{}" --recursive', account_info, container,
'/'.join([directory, 'readme']), local_folder)
self.assertEqual(1, sum(len(f) for r, d, f in os.walk(local_folder)))
# Download entire directory
self.storage_cmd('storage blob directory download -c {} -s {} -d "{}" --recursive', account_info, container,
directory, local_folder)
self.assertEqual(2, sum(len(d) for r, d, f in os.walk(local_folder)))
self.assertEqual(12, sum(len(f) for r, d, f in os.walk(local_folder)))
# Download an entire subdirectory of a storage blob directory.
self.storage_cmd('storage blob directory download -c {} -s {} -d "{}" --recursive', account_info, container,
'/'.join([directory, 'apple']), local_folder)
self.assertEqual(3, sum(len(d) for r, d, f in os.walk(local_folder)))
if __name__ == '__main__':
unittest.main()

Просмотреть файл

@ -1,6 +0,0 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
__import__('pkg_resources').declare_namespace(__name__)

Просмотреть файл

@ -1,4 +0,0 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------

Просмотреть файл

@ -1,37 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from .appendblobservice import AppendBlobService
from .blockblobservice import BlockBlobService
from .models import (
Container,
ContainerProperties,
Blob,
BlobProperties,
BlobBlock,
BlobBlockList,
PageRange,
ContentSettings,
CopyProperties,
ContainerPermissions,
BlobPermissions,
_LeaseActions,
AppendBlockProperties,
PageBlobProperties,
ResourceProperties,
Include,
SequenceNumberAction,
BlockListType,
PublicAccess,
BlobPrefix,
DeleteSnapshot,
BatchDeleteSubRequest,
BatchSetBlobTierSubRequest,
BatchSubResponse,
CustomerProvidedEncryptionKey,
RehydratePriority,
)
from .pageblobservice import PageBlobService
from ._constants import __version__

Просмотреть файл

@ -1,17 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
__author__ = 'Microsoft Corp. <ptvshelp@microsoft.com>'
__version__ = '2.1.0'
# x-ms-version for storage service.
X_MS_VERSION = '2019-02-02'
# internal configurations, should not be changed
_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024
_BLOB_SERVICE_PUBLIC_CLOUD_HOST = 'blob.core.windows.net'
_DFS_SERVICE_PUBLIC_CLOUD_HOST = 'dfs.core.windows.net'

Просмотреть файл

@ -1,667 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from azure.common import AzureException
from dateutil import parser
from ..common._http import HTTPResponse
try:
from xml.etree import cElementTree as ETree
except ImportError:
from xml.etree import ElementTree as ETree
from ..common._common_conversion import (
_decode_base64_to_text,
_to_str,
_get_content_md5
)
from ..common._deserialization import (
_parse_properties,
_to_int,
_parse_metadata,
_convert_xml_to_signed_identifiers,
_bool,
)
from .models import (
Container,
Blob,
BlobBlock,
BlobBlockList,
BlobBlockState,
BlobProperties,
PageRange,
ContainerProperties,
AppendBlockProperties,
PageBlobProperties,
ResourceProperties,
BlobPrefix,
AccountInformation,
BatchSubResponse,
UserDelegationKey,
PathProperties,
)
from ._encryption import _decrypt_blob
from ..common.models import _list
from ..common._error import (
_validate_content_match,
_ERROR_DECRYPTION_FAILURE,
)
from io import BytesIO
_HTTP_LINE_ENDING = "\r\n"
def _parse_cpk_headers(response, properties):
server_encrypted = response.headers.get('x-ms-request-server-encrypted')
if server_encrypted is not None:
properties.request_server_encrypted = _bool(server_encrypted)
properties.encryption_key_sha256 = response.headers.get('x-ms-encryption-key-sha256')
def _parse_base_properties(response):
'''
Extracts basic response headers.
'''
resource_properties = ResourceProperties()
resource_properties.last_modified = parser.parse(response.headers.get('last-modified'))
resource_properties.etag = response.headers.get('etag')
_parse_cpk_headers(response, resource_properties)
return resource_properties
def _parse_page_properties(response):
'''
Extracts page response headers.
'''
put_page = PageBlobProperties()
put_page.last_modified = parser.parse(response.headers.get('last-modified'))
put_page.etag = response.headers.get('etag')
put_page.sequence_number = _to_int(response.headers.get('x-ms-blob-sequence-number'))
_parse_cpk_headers(response, put_page)
return put_page
def _parse_append_block(response):
'''
Extracts append block response headers.
'''
append_block = AppendBlockProperties()
append_block.last_modified = parser.parse(response.headers.get('last-modified'))
append_block.etag = response.headers.get('etag')
append_block.append_offset = _to_int(response.headers.get('x-ms-blob-append-offset'))
append_block.committed_block_count = _to_int(response.headers.get('x-ms-blob-committed-block-count'))
_parse_cpk_headers(response, append_block)
return append_block
def _parse_snapshot_blob(response, name):
'''
Extracts snapshot return header.
'''
snapshot = response.headers.get('x-ms-snapshot')
return _parse_blob(response, name, snapshot)
def _parse_lease(response):
'''
Extracts lease time and ID return headers.
'''
lease = {'time': response.headers.get('x-ms-lease-time')}
if lease['time']:
lease['time'] = _to_int(lease['time'])
lease['id'] = response.headers.get('x-ms-lease-id')
return lease
def _parse_blob(response, name, snapshot, validate_content=False, require_encryption=False,
key_encryption_key=None, key_resolver_function=None, start_offset=None, end_offset=None):
if response is None:
return None
metadata = _parse_metadata(response)
props = _parse_properties(response, BlobProperties)
# For range gets, only look at 'x-ms-blob-content-md5' for overall MD5
content_settings = getattr(props, 'content_settings')
if 'content-range' in response.headers:
if 'x-ms-blob-content-md5' in response.headers:
setattr(content_settings, 'content_md5', _to_str(response.headers['x-ms-blob-content-md5']))
else:
delattr(content_settings, 'content_md5')
if validate_content:
computed_md5 = _get_content_md5(response.body)
_validate_content_match(response.headers['content-md5'], computed_md5)
if key_encryption_key is not None or key_resolver_function is not None:
try:
response.body = _decrypt_blob(require_encryption, key_encryption_key, key_resolver_function,
response, start_offset, end_offset)
except:
raise AzureException(_ERROR_DECRYPTION_FAILURE)
return Blob(name, snapshot, response.body, props, metadata)
def _parse_container(response, name):
if response is None:
return None
metadata = _parse_metadata(response)
props = _parse_properties(response, ContainerProperties)
return Container(name, props, metadata)
def _convert_xml_to_signed_identifiers_and_access(response):
acl = _convert_xml_to_signed_identifiers(response)
acl.public_access = response.headers.get('x-ms-blob-public-access')
return acl
def _convert_xml_to_containers(response):
'''
<?xml version="1.0" encoding="utf-8"?>
<EnumerationResults ServiceEndpoint="https://myaccount.blob.core.windows.net">
<Prefix>string-value</Prefix>
<Marker>string-value</Marker>
<MaxResults>int-value</MaxResults>
<Containers>
<Container>
<Name>container-name</Name>
<Properties>
<Last-Modified>date/time-value</Last-Modified>
<Etag>etag</Etag>
<LeaseStatus>locked | unlocked</LeaseStatus>
<LeaseState>available | leased | expired | breaking | broken</LeaseState>
<LeaseDuration>infinite | fixed</LeaseDuration>
<PublicAccess>blob | container</PublicAccess>
<HasImmutabilityPolicy>true | false</HasImmutabilityPolicy>
<HasLegalHold>true | false</HasLegalHold>
</Properties>
<Metadata>
<metadata-name>value</metadata-name>
</Metadata>
</Container>
</Containers>
<NextMarker>marker-value</NextMarker>
</EnumerationResults>
'''
if response is None or response.body is None:
return None
containers = _list()
list_element = ETree.fromstring(response.body)
# Set next marker
setattr(containers, 'next_marker', list_element.findtext('NextMarker'))
containers_element = list_element.find('Containers')
for container_element in containers_element.findall('Container'):
# Name element
container = Container()
container.name = container_element.findtext('Name')
# Metadata
metadata_root_element = container_element.find('Metadata')
if metadata_root_element is not None:
container.metadata = dict()
for metadata_element in metadata_root_element:
container.metadata[metadata_element.tag] = metadata_element.text
# Properties
properties_element = container_element.find('Properties')
container.properties.etag = properties_element.findtext('Etag')
container.properties.last_modified = parser.parse(properties_element.findtext('Last-Modified'))
container.properties.lease_status = properties_element.findtext('LeaseStatus')
container.properties.lease_state = properties_element.findtext('LeaseState')
container.properties.lease_duration = properties_element.findtext('LeaseDuration')
container.properties.public_access = properties_element.findtext('PublicAccess')
container.properties.has_immutability_policy = properties_element.findtext('HasImmutabilityPolicy')
container.properties.has_legal_hold = properties_element.findtext('HasLegalHold')
# Add container to list
containers.append(container)
return containers
LIST_BLOBS_ATTRIBUTE_MAP = {
'Last-Modified': (None, 'last_modified', parser.parse),
'Etag': (None, 'etag', _to_str),
'x-ms-blob-sequence-number': (None, 'sequence_number', _to_int),
'BlobType': (None, 'blob_type', _to_str),
'Content-Length': (None, 'content_length', _to_int),
'ServerEncrypted': (None, 'server_encrypted', _bool),
'Content-Type': ('content_settings', 'content_type', _to_str),
'Content-Encoding': ('content_settings', 'content_encoding', _to_str),
'Content-Disposition': ('content_settings', 'content_disposition', _to_str),
'Content-Language': ('content_settings', 'content_language', _to_str),
'Content-MD5': ('content_settings', 'content_md5', _to_str),
'Cache-Control': ('content_settings', 'cache_control', _to_str),
'LeaseStatus': ('lease', 'status', _to_str),
'LeaseState': ('lease', 'state', _to_str),
'LeaseDuration': ('lease', 'duration', _to_str),
'CopyId': ('copy', 'id', _to_str),
'CopySource': ('copy', 'source', _to_str),
'CopyStatus': ('copy', 'status', _to_str),
'CopyProgress': ('copy', 'progress', _to_str),
'CopyCompletionTime': ('copy', 'completion_time', _to_str),
'CopyStatusDescription': ('copy', 'status_description', _to_str),
'AccessTier': (None, 'blob_tier', _to_str),
'AccessTierChangeTime': (None, 'blob_tier_change_time', parser.parse),
'AccessTierInferred': (None, 'blob_tier_inferred', _bool),
'ArchiveStatus': (None, 'rehydration_status', _to_str),
'DeletedTime': (None, 'deleted_time', parser.parse),
'RemainingRetentionDays': (None, 'remaining_retention_days', _to_int),
'Creation-Time': (None, 'creation_time', parser.parse),
}
def _convert_xml_to_blob_list(response):
'''
<?xml version="1.0" encoding="utf-8"?>
<EnumerationResults ServiceEndpoint="http://myaccount.blob.core.windows.net/" ContainerName="mycontainer">
<Prefix>string-value</Prefix>
<Marker>string-value</Marker>
<MaxResults>int-value</MaxResults>
<Delimiter>string-value</Delimiter>
<Blobs>
<Blob>
<Name>blob-name</name>
<Deleted>true</Deleted>
<Snapshot>date-time-value</Snapshot>
<Properties>
<Last-Modified>date-time-value</Last-Modified>
<Etag>etag</Etag>
<Content-Length>size-in-bytes</Content-Length>
<Content-Type>blob-content-type</Content-Type>
<Content-Encoding />
<Content-Language />
<Content-MD5 />
<Cache-Control />
<x-ms-blob-sequence-number>sequence-number</x-ms-blob-sequence-number>
<BlobType>BlockBlob|PageBlob|AppendBlob</BlobType>
<LeaseStatus>locked|unlocked</LeaseStatus>
<LeaseState>available | leased | expired | breaking | broken</LeaseState>
<LeaseDuration>infinite | fixed</LeaseDuration>
<CopyId>id</CopyId>
<CopyStatus>pending | success | aborted | failed </CopyStatus>
<CopySource>source url</CopySource>
<CopyProgress>bytes copied/bytes total</CopyProgress>
<CopyCompletionTime>datetime</CopyCompletionTime>
<CopyStatusDescription>error string</CopyStatusDescription>
<AccessTier>P4 | P6 | P10 | P20 | P30 | P40 | P50 | P60 | Archive | Cool | Hot</AccessTier>
<AccessTierChangeTime>date-time-value</AccessTierChangeTime>
<AccessTierInferred>true</AccessTierInferred>
<DeletedTime>datetime</DeletedTime>
<RemainingRetentionDays>int</RemainingRetentionDays>
<Creation-Time>date-time-value</Creation-Time>
</Properties>
<Metadata>
<Name>value</Name>
</Metadata>
</Blob>
<BlobPrefix>
<Name>blob-prefix</Name>
</BlobPrefix>
</Blobs>
<NextMarker />
</EnumerationResults>
'''
if response is None or response.body is None:
return None
blob_list = _list()
list_element = ETree.fromstring(response.body)
setattr(blob_list, 'next_marker', list_element.findtext('NextMarker'))
blobs_element = list_element.find('Blobs')
blob_prefix_elements = blobs_element.findall('BlobPrefix')
if blob_prefix_elements is not None:
for blob_prefix_element in blob_prefix_elements:
prefix = BlobPrefix()
prefix.name = blob_prefix_element.findtext('Name')
blob_list.append(prefix)
for blob_element in blobs_element.findall('Blob'):
blob = Blob()
blob.name = blob_element.findtext('Name')
blob.snapshot = blob_element.findtext('Snapshot')
deleted = blob_element.findtext('Deleted')
if deleted:
blob.deleted = _bool(deleted)
# Properties
properties_element = blob_element.find('Properties')
if properties_element is not None:
for property_element in properties_element:
info = LIST_BLOBS_ATTRIBUTE_MAP.get(property_element.tag)
if info is None:
setattr(blob.properties, property_element.tag, _to_str(property_element.text))
elif info[0] is None:
setattr(blob.properties, info[1], info[2](property_element.text))
else:
attr = getattr(blob.properties, info[0])
setattr(attr, info[1], info[2](property_element.text))
# Metadata
metadata_root_element = blob_element.find('Metadata')
if metadata_root_element is not None:
blob.metadata = dict()
for metadata_element in metadata_root_element:
blob.metadata[metadata_element.tag] = metadata_element.text
# Add blob to list
blob_list.append(blob)
return blob_list
def _convert_xml_to_blob_name_list(response):
'''
<?xml version="1.0" encoding="utf-8"?>
<EnumerationResults ServiceEndpoint="http://myaccount.blob.core.windows.net/" ContainerName="mycontainer">
<Prefix>string-value</Prefix>
<Marker>string-value</Marker>
<MaxResults>int-value</MaxResults>
<Delimiter>string-value</Delimiter>
<Blobs>
<Blob>
<Name>blob-name</name>
<Deleted>true</Deleted>
<Snapshot>date-time-value</Snapshot>
<Properties>
<Last-Modified>date-time-value</Last-Modified>
<Etag>etag</Etag>
<Content-Length>size-in-bytes</Content-Length>
<Content-Type>blob-content-type</Content-Type>
<Content-Encoding />
<Content-Language />
<Content-MD5 />
<Cache-Control />
<x-ms-blob-sequence-number>sequence-number</x-ms-blob-sequence-number>
<BlobType>BlockBlob|PageBlob|AppendBlob</BlobType>
<LeaseStatus>locked|unlocked</LeaseStatus>
<LeaseState>available | leased | expired | breaking | broken</LeaseState>
<LeaseDuration>infinite | fixed</LeaseDuration>
<CopyId>id</CopyId>
<CopyStatus>pending | success | aborted | failed </CopyStatus>
<CopySource>source url</CopySource>
<CopyProgress>bytes copied/bytes total</CopyProgress>
<CopyCompletionTime>datetime</CopyCompletionTime>
<CopyStatusDescription>error string</CopyStatusDescription>
<AccessTier>P4 | P6 | P10 | P20 | P30 | P40 | P50 | P60 | Archive | Cool | Hot</AccessTier>
<AccessTierChangeTime>date-time-value</AccessTierChangeTime>
<AccessTierInferred>true</AccessTierInferred>
<DeletedTime>datetime</DeletedTime>
<RemainingRetentionDays>int</RemainingRetentionDays>
<Creation-Time>date-time-value</Creation-Time>
</Properties>
<Metadata>
<Name>value</Name>
</Metadata>
</Blob>
<BlobPrefix>
<Name>blob-prefix</Name>
</BlobPrefix>
</Blobs>
<NextMarker />
</EnumerationResults>
'''
if response is None or response.body is None:
return None
blob_list = _list()
list_element = ETree.fromstring(response.body)
setattr(blob_list, 'next_marker', list_element.findtext('NextMarker'))
blobs_element = list_element.find('Blobs')
blob_prefix_elements = blobs_element.findall('BlobPrefix')
if blob_prefix_elements is not None:
for blob_prefix_element in blob_prefix_elements:
blob_list.append(blob_prefix_element.findtext('Name'))
for blob_element in blobs_element.findall('Blob'):
blob_list.append(blob_element.findtext('Name'))
return blob_list
def _convert_xml_to_block_list(response):
'''
<?xml version="1.0" encoding="utf-8"?>
<BlockList>
<CommittedBlocks>
<Block>
<Name>base64-encoded-block-id</Name>
<Size>size-in-bytes</Size>
</Block>
</CommittedBlocks>
<UncommittedBlocks>
<Block>
<Name>base64-encoded-block-id</Name>
<Size>size-in-bytes</Size>
</Block>
</UncommittedBlocks>
</BlockList>
Converts xml response to block list class.
'''
if response is None or response.body is None:
return None
block_list = BlobBlockList()
list_element = ETree.fromstring(response.body)
committed_blocks_element = list_element.find('CommittedBlocks')
if committed_blocks_element is not None:
for block_element in committed_blocks_element.findall('Block'):
block_id = _decode_base64_to_text(block_element.findtext('Name', ''))
block_size = int(block_element.findtext('Size'))
block = BlobBlock(id=block_id, state=BlobBlockState.Committed)
block._set_size(block_size)
block_list.committed_blocks.append(block)
uncommitted_blocks_element = list_element.find('UncommittedBlocks')
if uncommitted_blocks_element is not None:
for block_element in uncommitted_blocks_element.findall('Block'):
block_id = _decode_base64_to_text(block_element.findtext('Name', ''))
block_size = int(block_element.findtext('Size'))
block = BlobBlock(id=block_id, state=BlobBlockState.Uncommitted)
block._set_size(block_size)
block_list.uncommitted_blocks.append(block)
return block_list
def _convert_xml_to_page_ranges(response):
'''
<?xml version="1.0" encoding="utf-8"?>
<PageList>
<PageRange>
<Start>Start Byte</Start>
<End>End Byte</End>
</PageRange>
<ClearRange>
<Start>Start Byte</Start>
<End>End Byte</End>
</ClearRange>
<PageRange>
<Start>Start Byte</Start>
<End>End Byte</End>
</PageRange>
</PageList>
'''
if response is None or response.body is None:
return None
page_list = list()
list_element = ETree.fromstring(response.body)
for page_range_element in list_element:
if page_range_element.tag == 'PageRange':
is_cleared = False
elif page_range_element.tag == 'ClearRange':
is_cleared = True
else:
pass # ignore any unrecognized Page Range types
page_list.append(
PageRange(
int(page_range_element.findtext('Start')),
int(page_range_element.findtext('End')),
is_cleared
)
)
return page_list
def _parse_account_information(response):
account_info = AccountInformation()
account_info.sku_name = response.headers['x-ms-sku-name']
account_info.account_kind = response.headers['x-ms-account-kind']
return account_info
def _convert_xml_to_user_delegation_key(response):
"""
<?xml version="1.0" encoding="utf-8"?>
<UserDelegationKey>
<SignedOid> Guid </SignedOid>
<SignedTid> Guid </SignedTid>
<SignedStart> String, formatted ISO Date </SignedStart>
<SignedExpiry> String, formatted ISO Date </SignedExpiry>
<SignedService>b</SignedService>
<SignedVersion> String, rest api version used to create delegation key </SignedVersion>
<Value>Ovg+o0K/0/2V8upg7AwlyAPCriEcOSXKuBu2Gv/PU70Y7aWDW3C2ZRmw6kYWqPWBaM1GosLkcSZkgsobAlT+Sw==</value>
</UserDelegationKey >
Converts xml response to UserDelegationKey class.
"""
if response is None or response.body is None:
return None
delegation_key = UserDelegationKey()
key_element = ETree.fromstring(response.body)
delegation_key.signed_oid = key_element.findtext('SignedOid')
delegation_key.signed_tid = key_element.findtext('SignedTid')
delegation_key.signed_start = key_element.findtext('SignedStart')
delegation_key.signed_expiry = key_element.findtext('SignedExpiry')
delegation_key.signed_service = key_element.findtext('SignedService')
delegation_key.signed_version = key_element.findtext('SignedVersion')
delegation_key.value = key_element.findtext('Value')
return delegation_key
def _ingest_batch_response(batch_response, batch_sub_requests):
"""
Takes the response to a batch request and parses the response into the separate responses.
:param :class:`~azure.storage.common._http.HTTPResponse` batch_response:
batchResponse The response of the HTTP batch request generated by this object.
:return: sub-responses parsed from batch HTTP response
:rtype: list of :class:`~azure.storage.common._http.HTTPResponse`
"""
parsed_batch_sub_response_list = []
# header value format: `multipart/mixed; boundary=<delimiter>`
response_delimiter = batch_response.headers.get('content-type').split("=")[1]
response_body = batch_response.body.decode('utf-8')
# split byte[] on the "substring" "--<delim>\r\n"
sub_response_list = response_body.split("--" + response_delimiter + _HTTP_LINE_ENDING)
# strip final, slightly different delim "\r\n--<delim>--" off last entry
sub_response_list[len(sub_response_list) - 1] = \
sub_response_list[len(sub_response_list) - 1].split(_HTTP_LINE_ENDING + "--" + response_delimiter + "--")[0]
for sub_response in sub_response_list:
if len(sub_response) != 0:
http_response = _parse_sub_response_to_http_response(sub_response)
is_successful = 200 <= http_response.status < 300
index_of_sub_request = _to_int(http_response.headers.get('Content-ID'))
batch_sub_request = batch_sub_requests[index_of_sub_request]
parsed_batch_sub_response_list.append(BatchSubResponse(is_successful, http_response, batch_sub_request))
return parsed_batch_sub_response_list
def _parse_sub_response_to_http_response(sub_response):
"""
Header: Value (1 or more times)
HTTP/<version> <statusCode> <statusName>
Header: Value (1 or more times)
body (if any)
:param sub_response:
The raw bytes of this sub-response.
:return: An HttpResponse object.
"""
empty_line = _HTTP_LINE_ENDING.encode('utf-8')
num_empty_lines = 0
batch_http_sub_response = HTTPResponse(None, '', dict(), b'')
try:
body_stream = BytesIO()
body_stream.write(sub_response.encode('utf-8'))
body_stream.seek(0)
while True:
line = body_stream.readline()
if line == b'':
return batch_http_sub_response
if line.startswith("HTTP".encode('utf-8')):
batch_http_sub_response.status = _to_int(line.decode('utf-8').split(" ")[1])
elif line == empty_line:
num_empty_lines += 1
elif line.startswith("x-ms-error-code".encode('utf-8')):
batch_http_sub_response.message = line.decode('utf-8').split(": ")[1].rstrip()
elif num_empty_lines is 2:
batch_http_sub_response.body += line
else:
header = line.decode('utf-8').split(": ")[0]
value = line.decode('utf-8').split(": ")[1].rstrip()
batch_http_sub_response.headers[header] = value
finally:
body_stream.close()
return batch_http_sub_response
def _parse_continuation_token(response):
marker = response.headers.get('x-ms-continuation')
return marker if marker is not '' else None
def _parse_path_permission_and_acl(response):
props = PathProperties()
props.owner = response.headers.get('x-ms-owner')
props.group = response.headers.get('x-ms-group')
props.permissions = response.headers.get('x-ms-permissions')
props.acl = response.headers.get('x-ms-acl')
return props

Просмотреть файл

@ -1,181 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import threading
def _download_blob_chunks(blob_service, container_name, blob_name, snapshot,
download_size, block_size, progress, start_range, end_range,
stream, max_connections, progress_callback, validate_content,
lease_id, if_modified_since, if_unmodified_since, if_match,
if_none_match, timeout, operation_context, cpk):
downloader_class = _ParallelBlobChunkDownloader if max_connections > 1 else _SequentialBlobChunkDownloader
downloader = downloader_class(
blob_service,
container_name,
blob_name,
snapshot,
download_size,
block_size,
progress,
start_range,
end_range,
stream,
progress_callback,
validate_content,
lease_id,
if_modified_since,
if_unmodified_since,
if_match,
if_none_match,
timeout,
operation_context,
cpk,
)
if max_connections > 1:
import concurrent.futures
executor = concurrent.futures.ThreadPoolExecutor(max_connections)
list(executor.map(downloader.process_chunk, downloader.get_chunk_offsets()))
else:
for chunk in downloader.get_chunk_offsets():
downloader.process_chunk(chunk)
class _BlobChunkDownloader(object):
def __init__(self, blob_service, container_name, blob_name, snapshot, download_size,
chunk_size, progress, start_range, end_range, stream,
progress_callback, validate_content, lease_id, if_modified_since,
if_unmodified_since, if_match, if_none_match, timeout, operation_context, cpk):
# identifiers for the blob
self.blob_service = blob_service
self.container_name = container_name
self.blob_name = blob_name
self.snapshot = snapshot
# information on the download range/chunk size
self.chunk_size = chunk_size
self.download_size = download_size
self.start_index = start_range
self.blob_end = end_range
# the destination that we will write to
self.stream = stream
# progress related
self.progress_callback = progress_callback
self.progress_total = progress
# parameters for each get blob operation
self.timeout = timeout
self.operation_context = operation_context
self.validate_content = validate_content
self.lease_id = lease_id
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
self.if_match = if_match
self.if_none_match = if_none_match
self.cpk = cpk
def get_chunk_offsets(self):
index = self.start_index
while index < self.blob_end:
yield index
index += self.chunk_size
def process_chunk(self, chunk_start):
if chunk_start + self.chunk_size > self.blob_end:
chunk_end = self.blob_end
else:
chunk_end = chunk_start + self.chunk_size
chunk_data = self._download_chunk(chunk_start, chunk_end).content
length = chunk_end - chunk_start
if length > 0:
self._write_to_stream(chunk_data, chunk_start)
self._update_progress(length)
# should be provided by the subclass
def _update_progress(self, length):
pass
# should be provided by the subclass
def _write_to_stream(self, chunk_data, chunk_start):
pass
def _download_chunk(self, chunk_start, chunk_end):
response = self.blob_service._get_blob(
self.container_name,
self.blob_name,
snapshot=self.snapshot,
start_range=chunk_start,
end_range=chunk_end - 1,
validate_content=self.validate_content,
lease_id=self.lease_id,
if_modified_since=self.if_modified_since,
if_unmodified_since=self.if_unmodified_since,
if_match=self.if_match,
if_none_match=self.if_none_match,
timeout=self.timeout,
_context=self.operation_context,
cpk=self.cpk,
)
# This makes sure that if_match is set so that we can validate
# that subsequent downloads are to an unmodified blob
self.if_match = response.properties.etag
return response
class _ParallelBlobChunkDownloader(_BlobChunkDownloader):
def __init__(self, blob_service, container_name, blob_name, snapshot, download_size,
chunk_size, progress, start_range, end_range, stream,
progress_callback, validate_content, lease_id, if_modified_since,
if_unmodified_since, if_match, if_none_match, timeout, operation_context, cpk):
super(_ParallelBlobChunkDownloader, self).__init__(blob_service, container_name, blob_name, snapshot,
download_size,
chunk_size, progress, start_range, end_range, stream,
progress_callback, validate_content, lease_id,
if_modified_since,
if_unmodified_since, if_match, if_none_match, timeout,
operation_context, cpk)
# for a parallel download, the stream is always seekable, so we note down the current position
# in order to seek to the right place when out-of-order chunks come in
self.stream_start = stream.tell()
# since parallel operations are going on
# it is essential to protect the writing and progress reporting operations
self.stream_lock = threading.Lock()
self.progress_lock = threading.Lock()
def _update_progress(self, length):
if self.progress_callback is not None:
with self.progress_lock:
self.progress_total += length
total_so_far = self.progress_total
self.progress_callback(total_so_far, self.download_size)
def _write_to_stream(self, chunk_data, chunk_start):
with self.stream_lock:
self.stream.seek(self.stream_start + (chunk_start - self.start_index))
self.stream.write(chunk_data)
class _SequentialBlobChunkDownloader(_BlobChunkDownloader):
def __init__(self, *args):
super(_SequentialBlobChunkDownloader, self).__init__(*args)
def _update_progress(self, length):
if self.progress_callback is not None:
self.progress_total += length
self.progress_callback(self.progress_total, self.download_size)
def _write_to_stream(self, chunk_data, chunk_start):
# chunk_start is ignored in the case of sequential download since we cannot seek the destination stream
self.stream.write(chunk_data)

Просмотреть файл

@ -1,187 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from json import (
dumps,
loads,
)
from os import urandom
from cryptography.hazmat.primitives.padding import PKCS7
from ..common._encryption import (
_generate_encryption_data_dict,
_generate_AES_CBC_cipher,
_dict_to_encryption_data,
_validate_and_unwrap_cek,
_EncryptionAlgorithm,
)
from ..common._error import (
_validate_not_none,
_validate_key_encryption_key_wrap,
_ERROR_DATA_NOT_ENCRYPTED,
_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM,
)
def _encrypt_blob(blob, key_encryption_key):
'''
Encrypts the given blob using AES256 in CBC mode with 128 bit padding.
Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek).
Returns a json-formatted string containing the encryption metadata. This method should
only be used when a blob is small enough for single shot upload. Encrypting larger blobs
is done as a part of the _upload_blob_chunks method.
:param bytes blob:
The blob to be encrypted.
:param object key_encryption_key:
The user-provided key-encryption-key. Must implement the following methods:
wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
get_kid()--returns a string key id for this key-encryption-key.
:return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data.
:rtype: (str, bytes)
'''
_validate_not_none('blob', blob)
_validate_not_none('key_encryption_key', key_encryption_key)
_validate_key_encryption_key_wrap(key_encryption_key)
# AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks
content_encryption_key = urandom(32)
initialization_vector = urandom(16)
cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector)
# PKCS7 with 16 byte blocks ensures compatibility with AES.
padder = PKCS7(128).padder()
padded_data = padder.update(blob) + padder.finalize()
# Encrypt the data.
encryptor = cipher.encryptor()
encrypted_data = encryptor.update(padded_data) + encryptor.finalize()
encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key,
initialization_vector)
encryption_data['EncryptionMode'] = 'FullBlob'
return dumps(encryption_data), encrypted_data
def _generate_blob_encryption_data(key_encryption_key):
'''
Generates the encryption_metadata for the blob.
:param bytes key_encryption_key:
The key-encryption-key used to wrap the cek associate with this blob.
:return: A tuple containing the cek and iv for this blob as well as the
serialized encryption metadata for the blob.
:rtype: (bytes, bytes, str)
'''
encryption_data = None
content_encryption_key = None
initialization_vector = None
if key_encryption_key:
_validate_key_encryption_key_wrap(key_encryption_key)
content_encryption_key = urandom(32)
initialization_vector = urandom(16)
encryption_data = _generate_encryption_data_dict(key_encryption_key,
content_encryption_key,
initialization_vector)
encryption_data['EncryptionMode'] = 'FullBlob'
encryption_data = dumps(encryption_data)
return content_encryption_key, initialization_vector, encryption_data
def _decrypt_blob(require_encryption, key_encryption_key, key_resolver,
response, start_offset, end_offset):
'''
Decrypts the given blob contents and returns only the requested range.
:param bool require_encryption:
Whether or not the calling blob service requires objects to be decrypted.
:param object key_encryption_key:
The user-provided key-encryption-key. Must implement the following methods:
wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
get_kid()--returns a string key id for this key-encryption-key.
:param key_resolver(kid):
The user-provided key resolver. Uses the kid string to return a key-encryption-key
implementing the interface defined above.
:return: The decrypted blob content.
:rtype: bytes
'''
_validate_not_none('response', response)
content = response.body
_validate_not_none('content', content)
try:
encryption_data = _dict_to_encryption_data(loads(response.headers['x-ms-meta-encryptiondata']))
except:
if require_encryption:
raise ValueError(_ERROR_DATA_NOT_ENCRYPTED)
return content
if not (encryption_data.encryption_agent.encryption_algorithm == _EncryptionAlgorithm.AES_CBC_256):
raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM)
blob_type = response.headers['x-ms-blob-type']
iv = None
unpad = False
start_range, end_range = 0, len(content)
if 'content-range' in response.headers:
content_range = response.headers['content-range']
# Format: 'bytes x-y/size'
# Ignore the word 'bytes'
content_range = content_range.split(' ')
content_range = content_range[1].split('-')
start_range = int(content_range[0])
content_range = content_range[1].split('/')
end_range = int(content_range[0])
blob_size = int(content_range[1])
if start_offset >= 16:
iv = content[:16]
content = content[16:]
start_offset -= 16
else:
iv = encryption_data.content_encryption_IV
if end_range == blob_size - 1:
unpad = True
else:
unpad = True
iv = encryption_data.content_encryption_IV
if blob_type == 'PageBlob':
unpad = False
content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver)
cipher = _generate_AES_CBC_cipher(content_encryption_key, iv)
decryptor = cipher.decryptor()
content = decryptor.update(content) + decryptor.finalize()
if unpad:
unpadder = PKCS7(128).unpadder()
content = unpadder.update(content) + unpadder.finalize()
return content[start_offset: len(content) - end_offset]
def _get_blob_encryptor_and_padder(cek, iv, should_pad):
encryptor = None
padder = None
if cek is not None and iv is not None:
cipher = _generate_AES_CBC_cipher(cek, iv)
encryptor = cipher.encryptor()
padder = PKCS7(128).padder() if should_pad else None
return encryptor, padder

Просмотреть файл

@ -1,29 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
_ERROR_PAGE_BLOB_SIZE_ALIGNMENT = \
'Invalid page blob size: {0}. ' + \
'The size must be aligned to a 512-byte boundary.'
_ERROR_PAGE_BLOB_START_ALIGNMENT = \
'start_range must align with 512 page size'
_ERROR_PAGE_BLOB_END_ALIGNMENT = \
'end_range must align with 512 page size'
_ERROR_INVALID_BLOCK_ID = \
'All blocks in block list need to have valid block ids.'
_ERROR_INVALID_LEASE_DURATION = \
"lease_duration param needs to be between 15 and 60 or -1."
_ERROR_INVALID_LEASE_BREAK_PERIOD = \
"lease_break_period param needs to be between 0 and 60."
_ERROR_NO_SINGLE_THREAD_CHUNKING = \
'To use blob chunk downloader more than 1 thread must be ' + \
'used since get_blob_to_bytes should be called for single threaded ' + \
'blob downloads.'

Просмотреть файл

@ -1,313 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from xml.sax.saxutils import escape as xml_escape
from datetime import date
try:
from xml.etree import cElementTree as ETree
except ImportError:
from xml.etree import ElementTree as ETree
from ..common._common_conversion import (
_encode_base64,
_str,
)
from ..common._serialization import (
_to_utc_datetime,
)
from ..common._error import (
_validate_not_none,
_ERROR_START_END_NEEDED_FOR_MD5,
_ERROR_RANGE_TOO_LARGE_FOR_MD5,
)
from ._error import (
_ERROR_PAGE_BLOB_START_ALIGNMENT,
_ERROR_PAGE_BLOB_END_ALIGNMENT,
_ERROR_INVALID_BLOCK_ID,
)
from io import BytesIO
_REQUEST_DELIMITER_PREFIX = "batch_"
_HTTP1_1_IDENTIFIER = "HTTP/1.1"
_HTTP_LINE_ENDING = "\r\n"
def _get_path(container_name=None, blob_name=None):
'''
Creates the path to access a blob resource.
container_name:
Name of container.
blob_name:
The path to the blob.
'''
if container_name and blob_name:
return '/{0}/{1}'.format(
_str(container_name),
_str(blob_name))
elif container_name:
return '/{0}'.format(_str(container_name))
else:
return '/'
def _validate_and_add_cpk_headers(request, encryption_key, protocol):
if encryption_key is None:
return
if protocol.lower() != 'https':
raise ValueError("Customer provided encryption key must be used over HTTPS.")
request.headers['x-ms-encryption-key'] = encryption_key.key_value
request.headers['x-ms-encryption-key-sha256'] = encryption_key.key_hash
request.headers['x-ms-encryption-algorithm'] = encryption_key.algorithm
def _validate_and_format_range_headers(request, start_range, end_range, start_range_required=True,
end_range_required=True, check_content_md5=False, align_to_page=False,
range_header_name='x-ms-range'):
# If end range is provided, start range must be provided
if start_range_required or end_range is not None:
_validate_not_none('start_range', start_range)
if end_range_required:
_validate_not_none('end_range', end_range)
# Page ranges must be 512 aligned
if align_to_page:
if start_range is not None and start_range % 512 != 0:
raise ValueError(_ERROR_PAGE_BLOB_START_ALIGNMENT)
if end_range is not None and end_range % 512 != 511:
raise ValueError(_ERROR_PAGE_BLOB_END_ALIGNMENT)
# Format based on whether end_range is present
request.headers = request.headers or {}
if end_range is not None:
request.headers[range_header_name] = 'bytes={0}-{1}'.format(start_range, end_range)
elif start_range is not None:
request.headers[range_header_name] = "bytes={0}-".format(start_range)
# Content MD5 can only be provided for a complete range less than 4MB in size
if check_content_md5:
if start_range is None or end_range is None:
raise ValueError(_ERROR_START_END_NEEDED_FOR_MD5)
if end_range - start_range > 4 * 1024 * 1024:
raise ValueError(_ERROR_RANGE_TOO_LARGE_FOR_MD5)
request.headers['x-ms-range-get-content-md5'] = 'true'
def _convert_block_list_to_xml(block_id_list):
'''
<?xml version="1.0" encoding="utf-8"?>
<BlockList>
<Committed>first-base64-encoded-block-id</Committed>
<Uncommitted>second-base64-encoded-block-id</Uncommitted>
<Latest>third-base64-encoded-block-id</Latest>
</BlockList>
Convert a block list to xml to send.
block_id_list:
A list of BlobBlock containing the block ids and block state that are used in put_block_list.
Only get block from latest blocks.
'''
if block_id_list is None:
return ''
block_list_element = ETree.Element('BlockList')
# Enabled
for block in block_id_list:
if block.id is None:
raise ValueError(_ERROR_INVALID_BLOCK_ID)
id = xml_escape(_str(format(_encode_base64(block.id))))
ETree.SubElement(block_list_element, block.state).text = id
# Add xml declaration and serialize
try:
stream = BytesIO()
ETree.ElementTree(block_list_element).write(stream, xml_declaration=True, encoding='utf-8', method='xml')
except:
raise
finally:
output = stream.getvalue()
stream.close()
# return xml value
return output
def _convert_delegation_key_info_to_xml(start_time, expiry_time):
"""
<?xml version="1.0" encoding="utf-8"?>
<KeyInfo>
<Start> String, formatted ISO Date </Start>
<Expiry> String, formatted ISO Date </Expiry>
</KeyInfo>
Convert key info to xml to send.
"""
if start_time is None or expiry_time is None:
raise ValueError("delegation key start/end times are required")
key_info_element = ETree.Element('KeyInfo')
ETree.SubElement(key_info_element, 'Start').text = \
_to_utc_datetime(start_time) if isinstance(start_time, date) else start_time
ETree.SubElement(key_info_element, 'Expiry').text = \
_to_utc_datetime(expiry_time) if isinstance(expiry_time, date) else expiry_time
# Add xml declaration and serialize
try:
stream = BytesIO()
ETree.ElementTree(key_info_element).write(stream, xml_declaration=True, encoding='utf-8', method='xml')
finally:
output = stream.getvalue()
stream.close()
# return xml value
return output
def _serialize_batch_body(requests, batch_id):
"""
--<delimiter>
<subrequest>
--<delimiter>
<subrequest> (repeated as needed)
--<delimiter>--
Serializes the requests in this batch to a single HTTP mixed/multipart body.
:param list(class:`~..common._http.HTTPRequest`) requests:
a list of sub-request for the batch request
:param str batch_id:
to be embedded in batch sub-request delimiter
:return: The body bytes for this batch.
"""
if requests is None or len(requests) is 0:
raise ValueError('Please provide sub-request(s) for this batch request')
delimiter_bytes = (_get_batch_request_delimiter(batch_id, True, False) + _HTTP_LINE_ENDING).encode('utf-8')
newline_bytes = _HTTP_LINE_ENDING.encode('utf-8')
batch_body = list()
for request in requests:
batch_body.append(delimiter_bytes)
batch_body.append(_make_body_from_sub_request(request))
batch_body.append(newline_bytes)
batch_body.append(_get_batch_request_delimiter(batch_id, True, True).encode('utf-8'))
# final line of body MUST have \r\n at the end, or it will not be properly read by the service
batch_body.append(newline_bytes)
return bytes().join(batch_body)
def _get_batch_request_delimiter(batch_id, is_prepend_dashes=False, is_append_dashes=False):
"""
Gets the delimiter used for this batch request's mixed/multipart HTTP format.
:param batch_id Randomly generated id
:param is_prepend_dashes Whether to include the starting dashes. Used in the body, but non on defining the delimiter.
:param is_append_dashes Whether to include the ending dashes. Used in the body on the closing delimiter only.
:return: The delimiter, WITHOUT a trailing newline.
"""
prepend_dashes = '--' if is_prepend_dashes else ''
append_dashes = '--' if is_append_dashes else ''
return prepend_dashes + _REQUEST_DELIMITER_PREFIX + batch_id + append_dashes
def _make_body_from_sub_request(sub_request):
"""
Content-Type: application/http
Content-ID: <sequential int ID>
Content-Transfer-Encoding: <value> (if present)
<verb> <path><query> HTTP/<version>
<header key>: <header value> (repeated as necessary)
Content-Length: <value>
(newline if content length > 0)
<body> (if content length > 0)
Serializes an http request.
:param :class:`~..common._http.HTTPRequest` sub_request Request to serialize.
:return: The serialized sub-request in bytes
"""
# put the sub-request's headers into a list for efficient str concatenation
sub_request_body = list()
# get headers for ease of manipulation; remove headers as they are used
headers = sub_request.headers
# append opening headers
sub_request_body.append("Content-Type: application/http")
sub_request_body.append(_HTTP_LINE_ENDING)
sub_request_body.append("Content-ID: ")
sub_request_body.append(headers.pop("Content-ID", ""))
sub_request_body.append(_HTTP_LINE_ENDING)
sub_request_body.append("Content-Transfer-Encoding: ")
sub_request_body.append(headers.pop("Content-Transfer-Encoding", ""))
sub_request_body.append(_HTTP_LINE_ENDING)
# append blank line
sub_request_body.append(_HTTP_LINE_ENDING)
# append HTTP verb and path and query and HTTP version
sub_request_body.append(sub_request.method)
sub_request_body.append(' ')
sub_request_body.append(sub_request.path)
sub_request_body.append("" if sub_request.query is None else '?' + _serialize_query(sub_request.query))
sub_request_body.append(' ')
sub_request_body.append(_HTTP1_1_IDENTIFIER)
sub_request_body.append(_HTTP_LINE_ENDING)
# append remaining headers (this will set the Content-Length, as it was set on `sub-request`)
for header_name, header_value in headers.items():
if header_value is not None:
sub_request_body.append(header_name)
sub_request_body.append(": ")
sub_request_body.append(header_value)
sub_request_body.append(_HTTP_LINE_ENDING)
# finished if no body
if sub_request.body is None:
return sub_request_body.encode('utf-8')
# append blank line
sub_request_body.append(_HTTP_LINE_ENDING)
sub_request_body.append(sub_request.body)
return ''.join(sub_request_body).encode('utf-8')
def _serialize_query(query):
serialized_query = []
for query_key, query_value in query.items():
if query_value is not None:
serialized_query.append(query_key)
serialized_query.append("=")
serialized_query.append(query_value)
serialized_query.append("&")
if len(serialized_query) is not 0:
del serialized_query[-1]
return ''.join(serialized_query)
# TODO to be removed after service update
def _add_file_or_directory_properties_header(properties_dict, request):
if properties_dict:
if not request.headers:
request.headers = {}
request.headers['x-ms-properties'] = \
",".join(["{}={}".format(str(name), _encode_base64(value)) for name, value in properties_dict.items()])

Просмотреть файл

@ -1,502 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation)
from threading import Lock
from math import ceil
from ..common._common_conversion import _encode_base64
from ..common._error import _ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM
from ..common._serialization import (
url_quote,
_get_data_bytes_only,
_len_plus
)
from ._deserialization import _parse_base_properties
from ._constants import (
_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE
)
from ._encryption import (
_get_blob_encryptor_and_padder,
)
from .models import BlobBlock
def _upload_blob_chunks(blob_service, container_name, blob_name,
blob_size, block_size, stream, max_connections,
progress_callback, validate_content, lease_id, uploader_class,
maxsize_condition=None, if_modified_since=None, if_unmodified_since=None, if_match=None,
if_none_match=None, timeout=None, cpk=None,
content_encryption_key=None, initialization_vector=None, resource_properties=None):
encryptor, padder = _get_blob_encryptor_and_padder(content_encryption_key, initialization_vector,
uploader_class is not _PageBlobChunkUploader)
uploader = uploader_class(
blob_service,
container_name,
blob_name,
blob_size,
block_size,
stream,
max_connections > 1,
progress_callback,
validate_content,
lease_id,
timeout,
encryptor,
padder,
cpk,
)
uploader.maxsize_condition = maxsize_condition
# Access conditions do not work with parallelism
if max_connections > 1:
uploader.if_match = uploader.if_none_match = uploader.if_modified_since = uploader.if_unmodified_since = None
else:
uploader.if_match = if_match
uploader.if_none_match = if_none_match
uploader.if_modified_since = if_modified_since
uploader.if_unmodified_since = if_unmodified_since
if progress_callback is not None:
progress_callback(0, blob_size)
if max_connections > 1:
import concurrent.futures
from threading import BoundedSemaphore
'''
Ensures we bound the chunking so we only buffer and submit 'max_connections' amount of work items to the executor.
This is necessary as the executor queue will keep accepting submitted work items, which results in buffering all the blocks if
the max_connections + 1 ensures the next chunk is already buffered and ready for when the worker thread is available.
'''
chunk_throttler = BoundedSemaphore(max_connections + 1)
executor = concurrent.futures.ThreadPoolExecutor(max_connections)
futures = []
running_futures = []
# Check for exceptions and fail fast.
for chunk in uploader.get_chunk_streams():
for f in running_futures:
if f.done():
if f.exception():
raise f.exception()
else:
running_futures.remove(f)
chunk_throttler.acquire()
future = executor.submit(uploader.process_chunk, chunk)
# Calls callback upon completion (even if the callback was added after the Future task is done).
future.add_done_callback(lambda x: chunk_throttler.release())
futures.append(future)
running_futures.append(future)
# result() will wait until completion and also raise any exceptions that may have been set.
range_ids = [f.result() for f in futures]
else:
range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()]
if resource_properties and uploader.response_properties is not None:
resource_properties.clone(uploader.response_properties)
return range_ids
def _upload_blob_substream_blocks(blob_service, container_name, blob_name,
blob_size, block_size, stream, max_connections,
progress_callback, validate_content, lease_id, uploader_class,
maxsize_condition=None, if_match=None, timeout=None, cpk=None):
uploader = uploader_class(
blob_service,
container_name,
blob_name,
blob_size,
block_size,
stream,
max_connections > 1,
progress_callback,
validate_content,
lease_id,
timeout,
None,
None,
cpk,
)
uploader.maxsize_condition = maxsize_condition
# ETag matching does not work with parallelism as a ranged upload may start
# before the previous finishes and provides an etag
uploader.if_match = if_match if not max_connections > 1 else None
if progress_callback is not None:
progress_callback(0, blob_size)
if max_connections > 1:
import concurrent.futures
executor = concurrent.futures.ThreadPoolExecutor(max_connections)
range_ids = list(executor.map(uploader.process_substream_block, uploader.get_substream_blocks()))
else:
range_ids = [uploader.process_substream_block(result) for result in uploader.get_substream_blocks()]
return range_ids
class _BlobChunkUploader(object):
def __init__(self, blob_service, container_name, blob_name, blob_size,
chunk_size, stream, parallel, progress_callback,
validate_content, lease_id, timeout, encryptor, padder, cpk):
self.blob_service = blob_service
self.container_name = container_name
self.blob_name = blob_name
self.blob_size = blob_size
self.chunk_size = chunk_size
self.stream = stream
self.parallel = parallel
self.stream_start = stream.tell() if parallel else None
self.stream_lock = Lock() if parallel else None
self.progress_callback = progress_callback
self.progress_total = 0
self.progress_lock = Lock() if parallel else None
self.validate_content = validate_content
self.lease_id = lease_id
self.timeout = timeout
self.encryptor = encryptor
self.padder = padder
self.response_properties = None
self.cpk = cpk
def get_chunk_streams(self):
index = 0
while True:
data = b''
read_size = self.chunk_size
# Buffer until we either reach the end of the stream or get a whole chunk.
while True:
if self.blob_size:
read_size = min(self.chunk_size - len(data), self.blob_size - (index + len(data)))
temp = self.stream.read(read_size)
temp = _get_data_bytes_only('temp', temp)
data += temp
# We have read an empty string and so are at the end
# of the buffer or we have read a full chunk.
if temp == b'' or len(data) == self.chunk_size:
break
if len(data) == self.chunk_size:
if self.padder:
data = self.padder.update(data)
if self.encryptor:
data = self.encryptor.update(data)
yield index, data
else:
if self.padder:
data = self.padder.update(data) + self.padder.finalize()
if self.encryptor:
data = self.encryptor.update(data) + self.encryptor.finalize()
if len(data) > 0:
yield index, data
break
index += len(data)
def process_chunk(self, chunk_data):
chunk_bytes = chunk_data[1]
chunk_offset = chunk_data[0]
return self._upload_chunk_with_progress(chunk_offset, chunk_bytes)
def _update_progress(self, length):
if self.progress_callback is not None:
if self.progress_lock is not None:
with self.progress_lock:
self.progress_total += length
total = self.progress_total
else:
self.progress_total += length
total = self.progress_total
self.progress_callback(total, self.blob_size)
def _upload_chunk_with_progress(self, chunk_offset, chunk_data):
range_id = self._upload_chunk(chunk_offset, chunk_data)
self._update_progress(len(chunk_data))
return range_id
def get_substream_blocks(self):
assert self.chunk_size is not None
lock = self.stream_lock
blob_length = self.blob_size
if blob_length is None:
blob_length = _len_plus(self.stream)
if blob_length is None:
raise ValueError(_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM.format('stream'))
blocks = int(ceil(blob_length / (self.chunk_size * 1.0)))
last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size
for i in range(blocks):
yield ('BlockId{}'.format("%05d" % i),
_SubStream(self.stream, i * self.chunk_size, last_block_size if i == blocks - 1 else self.chunk_size,
lock))
def process_substream_block(self, block_data):
return self._upload_substream_block_with_progress(block_data[0], block_data[1])
def _upload_substream_block_with_progress(self, block_id, block_stream):
range_id = self._upload_substream_block(block_id, block_stream)
self._update_progress(len(block_stream))
return range_id
def set_response_properties(self, resp):
self.response_properties = resp
class _BlockBlobChunkUploader(_BlobChunkUploader):
def _upload_chunk(self, chunk_offset, chunk_data):
block_id = url_quote(_encode_base64('{0:032d}'.format(chunk_offset)))
self.blob_service._put_block(
self.container_name,
self.blob_name,
chunk_data,
block_id,
validate_content=self.validate_content,
lease_id=self.lease_id,
timeout=self.timeout,
cpk=self.cpk,
)
return BlobBlock(block_id)
def _upload_substream_block(self, block_id, block_stream):
try:
self.blob_service._put_block(
self.container_name,
self.blob_name,
block_stream,
block_id,
validate_content=self.validate_content,
lease_id=self.lease_id,
timeout=self.timeout,
cpk=self.cpk,
)
finally:
block_stream.close()
return BlobBlock(block_id)
class _PageBlobChunkUploader(_BlobChunkUploader):
def _is_chunk_empty(self, chunk_data):
# read until non-zero byte is encountered
# if reached the end without returning, then chunk_data is all 0's
for each_byte in chunk_data:
if each_byte != 0 and each_byte != b'\x00':
return False
return True
def _upload_chunk(self, chunk_start, chunk_data):
# avoid uploading the empty pages
if not self._is_chunk_empty(chunk_data):
chunk_end = chunk_start + len(chunk_data) - 1
resp = self.blob_service._update_page(
self.container_name,
self.blob_name,
chunk_data,
chunk_start,
chunk_end,
validate_content=self.validate_content,
lease_id=self.lease_id,
if_match=self.if_match,
timeout=self.timeout,
cpk=self.cpk,
)
if not self.parallel:
self.if_match = resp.etag
self.set_response_properties(resp)
class _AppendBlobChunkUploader(_BlobChunkUploader):
def _upload_chunk(self, chunk_offset, chunk_data):
if not hasattr(self, 'current_length'):
resp = self.blob_service.append_block(
self.container_name,
self.blob_name,
chunk_data,
validate_content=self.validate_content,
lease_id=self.lease_id,
maxsize_condition=self.maxsize_condition,
timeout=self.timeout,
if_modified_since=self.if_modified_since,
if_unmodified_since=self.if_unmodified_since,
if_match=self.if_match,
if_none_match=self.if_none_match,
cpk=self.cpk,
)
self.current_length = resp.append_offset
else:
resp = self.blob_service.append_block(
self.container_name,
self.blob_name,
chunk_data,
validate_content=self.validate_content,
lease_id=self.lease_id,
maxsize_condition=self.maxsize_condition,
appendpos_condition=self.current_length + chunk_offset,
timeout=self.timeout,
cpk=self.cpk,
)
self.set_response_properties(resp)
class _SubStream(IOBase):
def __init__(self, wrapped_stream, stream_begin_index, length, lockObj):
# Python 2.7: file-like objects created with open() typically support seek(), but are not
# derivations of io.IOBase and thus do not implement seekable().
# Python > 3.0: file-like objects created with open() are derived from io.IOBase.
try:
# only the main thread runs this, so there's no need grabbing the lock
wrapped_stream.seek(0, SEEK_CUR)
except:
raise ValueError("Wrapped stream must support seek().")
self._lock = lockObj
self._wrapped_stream = wrapped_stream
self._position = 0
self._stream_begin_index = stream_begin_index
self._length = length
self._buffer = BytesIO()
# we must avoid buffering more than necessary, and also not use up too much memory
# so the max buffer size is capped at 4MB
self._max_buffer_size = length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE \
else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE
self._current_buffer_start = 0
self._current_buffer_size = 0
def __len__(self):
return self._length
def close(self):
if self._buffer:
self._buffer.close()
self._wrapped_stream = None
IOBase.close(self)
def fileno(self):
return self._wrapped_stream.fileno()
def flush(self):
pass
def read(self, n):
if self.closed:
raise ValueError("Stream is closed.")
# adjust if out of bounds
if n + self._position >= self._length:
n = self._length - self._position
# return fast
if n is 0 or self._buffer.closed:
return b''
# attempt first read from the read buffer and update position
read_buffer = self._buffer.read(n)
bytes_read = len(read_buffer)
bytes_remaining = n - bytes_read
self._position += bytes_read
# repopulate the read buffer from the underlying stream to fulfill the request
# ensure the seek and read operations are done atomically (only if a lock is provided)
if bytes_remaining > 0:
with self._buffer:
# either read in the max buffer size specified on the class
# or read in just enough data for the current block/sub stream
current_max_buffer_size = min(self._max_buffer_size, self._length - self._position)
# lock is only defined if max_connections > 1 (parallel uploads)
if self._lock:
with self._lock:
# reposition the underlying stream to match the start of the data to read
absolute_position = self._stream_begin_index + self._position
self._wrapped_stream.seek(absolute_position, SEEK_SET)
# If we can't seek to the right location, our read will be corrupted so fail fast.
if self._wrapped_stream.tell() != absolute_position:
raise IOError("Stream failed to seek to the desired location.")
buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
else:
buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
if buffer_from_stream:
# update the buffer with new data from the wrapped stream
# we need to note down the start position and size of the buffer, in case seek is performed later
self._buffer = BytesIO(buffer_from_stream)
self._current_buffer_start = self._position
self._current_buffer_size = len(buffer_from_stream)
# read the remaining bytes from the new buffer and update position
second_read_buffer = self._buffer.read(bytes_remaining)
read_buffer += second_read_buffer
self._position += len(second_read_buffer)
return read_buffer
def readable(self):
return True
def readinto(self, b):
raise UnsupportedOperation
def seek(self, offset, whence=0):
if whence is SEEK_SET:
start_index = 0
elif whence is SEEK_CUR:
start_index = self._position
elif whence is SEEK_END:
start_index = self._length
offset = - offset
else:
raise ValueError("Invalid argument for the 'whence' parameter.")
pos = start_index + offset
if pos > self._length:
pos = self._length
elif pos < 0:
pos = 0
# check if buffer is still valid
# if not, drop buffer
if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size:
self._buffer.close()
self._buffer = BytesIO()
else: # if yes seek to correct position
delta = pos - self._current_buffer_start
self._buffer.seek(delta, SEEK_SET)
self._position = pos
return pos
def seekable(self):
return True
def tell(self):
return self._position
def write(self):
raise UnsupportedOperation
def writelines(self):
raise UnsupportedOperation
def writeable(self):
return False

Просмотреть файл

@ -1,823 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import sys
from os import path
from ..common._common_conversion import (
_to_str,
_int_to_str,
_datetime_to_utc_string,
_get_content_md5,
)
from ..common._constants import (
SERVICE_HOST_BASE,
DEFAULT_PROTOCOL,
)
from ..common._error import (
_validate_not_none,
_validate_type_bytes,
_validate_encryption_unsupported,
_ERROR_VALUE_NEGATIVE,
)
from ..common._http import HTTPRequest
from ..common._serialization import (
_get_data_bytes_only,
_add_metadata_headers,
)
from ._deserialization import (
_parse_append_block,
_parse_base_properties,
)
from ._serialization import (
_get_path,
_validate_and_format_range_headers,
_validate_and_add_cpk_headers,
)
from ._upload_chunking import (
_AppendBlobChunkUploader,
_upload_blob_chunks,
)
from .baseblobservice import BaseBlobService
from .models import (
_BlobTypes,
ResourceProperties
)
if sys.version_info >= (3,):
from io import BytesIO
else:
from cStringIO import StringIO as BytesIO
class AppendBlobService(BaseBlobService):
'''
An append blob is comprised of blocks and is optimized for append operations.
When you modify an append blob, blocks are added to the end of the blob only,
via the append_block operation. Updating or deleting of existing blocks is not
supported. Unlike a block blob, an append blob does not expose its block IDs.
Each block in an append blob can be a different size, up to a maximum of 4 MB,
and an append blob can include up to 50,000 blocks. The maximum size of an
append blob is therefore slightly more than 195 GB (4 MB X 50,000 blocks).
:ivar int MAX_BLOCK_SIZE:
The size of the blocks put by append_blob_from_* methods. Smaller blocks
may be put if there is less data provided. The maximum block size the service
supports is 4MB.
'''
MAX_BLOCK_SIZE = 4 * 1024 * 1024
def __init__(self, account_name=None, account_key=None, sas_token=None, is_emulated=False,
protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, custom_domain=None, request_session=None,
connection_string=None, socket_timeout=None, token_credential=None):
'''
:param str account_name:
The storage account name. This is used to authenticate requests
signed with an account key and to construct the storage endpoint. It
is required unless a connection string is given, or if a custom
domain is used with anonymous authentication.
:param str account_key:
The storage account key. This is used for shared key authentication.
If neither account key or sas token is specified, anonymous access
will be used.
:param str sas_token:
A shared access signature token to use to authenticate requests
instead of the account key. If account key and sas token are both
specified, account key will be used to sign. If neither are
specified, anonymous access will be used.
:param bool is_emulated:
Whether to use the emulator. Defaults to False. If specified, will
override all other parameters besides connection string and request
session.
:param str protocol:
The protocol to use for requests. Defaults to https.
:param str endpoint_suffix:
The host base component of the url, minus the account name. Defaults
to Azure (core.windows.net). Override this to use the China cloud
(core.chinacloudapi.cn).
:param str custom_domain:
The custom domain to use. This can be set in the Azure Portal. For
example, 'www.mydomain.com'.
:param requests.Session request_session:
The session object to use for http requests.
:param str connection_string:
If specified, this will override all other parameters besides
request session. See
http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/
for the connection string format.
:param int socket_timeout:
If specified, this will override the default socket timeout. The timeout specified is in seconds.
See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value.
:param token_credential:
A token credential used to authenticate HTTPS requests. The token value
should be updated before its expiration.
:type `~..common.TokenCredential`
'''
self.blob_type = _BlobTypes.AppendBlob
super(AppendBlobService, self).__init__(
account_name, account_key, sas_token, is_emulated, protocol, endpoint_suffix,
custom_domain, request_session, connection_string, socket_timeout, token_credential)
def create_blob(self, container_name, blob_name, content_settings=None,
metadata=None, lease_id=None,
if_modified_since=None, if_unmodified_since=None,
if_match=None, if_none_match=None, timeout=None, cpk=None):
'''
Creates a blob or overrides an existing blob. Use if_none_match=* to
prevent overriding an existing blob.
See create_blob_from_* for high level
functions that handle the creation and upload of large blobs with
automatic chunking and progress notifications.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of blob to create or update.
:param ~azure.storage.blob.models.ContentSettings content_settings:
ContentSettings object used to set blob properties.
:param metadata:
Name-value pairs associated with the blob as metadata.
:type metadata: dict(str, str)
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to
perform the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param ~azure.storage.blob.models.CustomerProvidedEncryptionKey cpk:
Encrypts the data on the service-side with the given key.
Use of customer-provided keys must be done over HTTPS.
As the encryption key itself is provided in the request,
a secure connection must be established to transfer the key.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: ETag and last modified properties for the updated Append Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name, blob_name)
request.query = {'timeout': _int_to_str(timeout)}
request.headers = {
'x-ms-blob-type': _to_str(self.blob_type),
'x-ms-lease-id': _to_str(lease_id),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match)
}
_validate_and_add_cpk_headers(request, encryption_key=cpk, protocol=self.protocol)
_add_metadata_headers(metadata, request)
if content_settings is not None:
request.headers.update(content_settings._to_headers())
return self._perform_request(request, _parse_base_properties)
def append_block(self, container_name, blob_name, block,
validate_content=False, maxsize_condition=None,
appendpos_condition=None,
lease_id=None, if_modified_since=None,
if_unmodified_since=None, if_match=None,
if_none_match=None, timeout=None, cpk=None):
'''
Commits a new block of data to the end of an existing append blob.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param bytes block:
Content of the block in bytes.
:param bool validate_content:
If true, calculates an MD5 hash of the block content. The storage
service checks the hash of the content that has arrived
with the hash that was sent. This is primarily valuable for detecting
bitflips on the wire if using http instead of https as https (the default)
will already validate. Note that this MD5 hash is not stored with the
blob.
:param int maxsize_condition:
Optional conditional header. The max length in bytes permitted for
the append blob. If the Append Block operation would cause the blob
to exceed that limit or if the blob size is already greater than the
value specified in this header, the request will fail with
MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
:param int appendpos_condition:
Optional conditional header, used only for the Append Block operation.
A number indicating the byte offset to compare. Append Block will
succeed only if the append position is equal to this number. If it
is not, the request will fail with the
AppendPositionConditionNotMet error
(HTTP status code 412 - Precondition Failed).
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param ~azure.storage.blob.models.CustomerProvidedEncryptionKey cpk:
Encrypts the data on the service-side with the given key.
Use of customer-provided keys must be done over HTTPS.
As the encryption key itself is provided in the request,
a secure connection must be established to transfer the key.
:param int timeout:
The timeout parameter is expressed in seconds.
:return:
ETag, last modified, append offset, and committed block count
properties for the updated Append Blob
:rtype: :class:`~azure.storage.blob.models.AppendBlockProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('block', block)
_validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name, blob_name)
request.query = {
'comp': 'appendblock',
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-blob-condition-maxsize': _to_str(maxsize_condition),
'x-ms-blob-condition-appendpos': _to_str(appendpos_condition),
'x-ms-lease-id': _to_str(lease_id),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match)
}
_validate_and_add_cpk_headers(request, encryption_key=cpk, protocol=self.protocol)
request.body = _get_data_bytes_only('block', block)
if validate_content:
computed_md5 = _get_content_md5(request.body)
request.headers['Content-MD5'] = _to_str(computed_md5)
return self._perform_request(request, _parse_append_block)
def append_block_from_url(self, container_name, blob_name, copy_source_url, source_range_start=None,
source_range_end=None, source_content_md5=None, source_if_modified_since=None,
source_if_unmodified_since=None, source_if_match=None,
source_if_none_match=None, maxsize_condition=None,
appendpos_condition=None, lease_id=None, if_modified_since=None,
if_unmodified_since=None, if_match=None,
if_none_match=None, timeout=None, cpk=None):
"""
Creates a new block to be committed as part of a blob, where the contents are read from a source url.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of blob.
:param str copy_source_url:
The URL of the source data. It can point to any Azure Blob or File, that is either public or has a
shared access signature attached.
:param int source_range_start:
This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source.
:param int source_range_end:
This indicates the end of the range of bytes(inclusive) that has to be taken from the copy source.
:param str source_content_md5:
If given, the service will calculate the MD5 hash of the block content and compare against this value.
:param datetime source_if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the source resource has been modified since the specified time.
:param datetime source_if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the source resource has not been modified since the specified date/time.
:param str source_if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the source resource's ETag matches the value specified.
:param str source_if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the source resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the source resource does not exist, and fail the
operation if it does exist.
:param int maxsize_condition:
Optional conditional header. The max length in bytes permitted for
the append blob. If the Append Block operation would cause the blob
to exceed that limit or if the blob size is already greater than the
value specified in this header, the request will fail with
MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
:param int appendpos_condition:
Optional conditional header, used only for the Append Block operation.
A number indicating the byte offset to compare. Append Block will
succeed only if the append position is equal to this number. If it
is not, the request will fail with the
AppendPositionConditionNotMet error
(HTTP status code 412 - Precondition Failed).
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param ~azure.storage.blob.models.CustomerProvidedEncryptionKey cpk:
Encrypts the data on the service-side with the given key.
Use of customer-provided keys must be done over HTTPS.
As the encryption key itself is provided in the request,
a secure connection must be established to transfer the key.
:param int timeout:
The timeout parameter is expressed in seconds.
"""
_validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('copy_source_url', copy_source_url)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name, blob_name)
request.query = {
'comp': 'appendblock',
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-copy-source': copy_source_url,
'x-ms-source-content-md5': source_content_md5,
'x-ms-source-if-Modified-Since': _datetime_to_utc_string(source_if_modified_since),
'x-ms-source-if-Unmodified-Since': _datetime_to_utc_string(source_if_unmodified_since),
'x-ms-source-if-Match': _to_str(source_if_match),
'x-ms-source-if-None-Match': _to_str(source_if_none_match),
'x-ms-blob-condition-maxsize': _to_str(maxsize_condition),
'x-ms-blob-condition-appendpos': _to_str(appendpos_condition),
'x-ms-lease-id': _to_str(lease_id),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match)
}
_validate_and_add_cpk_headers(request, encryption_key=cpk, protocol=self.protocol)
_validate_and_format_range_headers(request, source_range_start, source_range_end,
start_range_required=False,
end_range_required=False,
range_header_name="x-ms-source-range")
return self._perform_request(request, _parse_append_block)
# ----Convenience APIs----------------------------------------------
def append_blob_from_path(
self, container_name, blob_name, file_path, validate_content=False,
maxsize_condition=None, progress_callback=None, lease_id=None, timeout=None,
if_modified_since=None, if_unmodified_since=None, if_match=None,
if_none_match=None, cpk=None):
'''
Appends to the content of an existing blob from a file path, with automatic
chunking and progress notifications.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of blob to create or update.
:param str file_path:
Path of the file to upload as the blob content.
:param bool validate_content:
If true, calculates an MD5 hash for each chunk of the blob. The storage
service checks the hash of the content that has arrived with the hash
that was sent. This is primarily valuable for detecting bitflips on
the wire if using http instead of https as https (the default) will
already validate. Note that this MD5 hash is not stored with the
blob.
:param int maxsize_condition:
Optional conditional header. The max length in bytes permitted for
the append blob. If the Append Block operation would cause the blob
to exceed that limit or if the blob size is already greater than the
value specified in this header, the request will fail with
MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
:param progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
:type progress_callback: func(current, total)
:param str lease_id:
Required if the blob has an active lease.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetime will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetime will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param ~azure.storage.blob.models.CustomerProvidedEncryptionKey cpk:
Encrypts the data on the service-side with the given key.
Use of customer-provided keys must be done over HTTPS.
As the encryption key itself is provided in the request,
a secure connection must be established to transfer the key.
:return: ETag and last modified properties for the Append Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('file_path', file_path)
_validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
count = path.getsize(file_path)
with open(file_path, 'rb') as stream:
return self.append_blob_from_stream(
container_name,
blob_name,
stream,
count=count,
validate_content=validate_content,
maxsize_condition=maxsize_condition,
progress_callback=progress_callback,
lease_id=lease_id,
timeout=timeout,
if_modified_since=if_modified_since,
if_unmodified_since=if_unmodified_since,
if_match=if_match,
if_none_match=if_none_match,
cpk=cpk)
def append_blob_from_bytes(
self, container_name, blob_name, blob, index=0, count=None,
validate_content=False, maxsize_condition=None, progress_callback=None,
lease_id=None, timeout=None, if_modified_since=None, if_unmodified_since=None, if_match=None,
if_none_match=None, cpk=None):
'''
Appends to the content of an existing blob from an array of bytes, with
automatic chunking and progress notifications.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of blob to create or update.
:param bytes blob:
Content of blob as an array of bytes.
:param int index:
Start index in the array of bytes.
:param int count:
Number of bytes to upload. Set to None or negative value to upload
all bytes starting from index.
:param bool validate_content:
If true, calculates an MD5 hash for each chunk of the blob. The storage
service checks the hash of the content that has arrived with the hash
that was sent. This is primarily valuable for detecting bitflips on
the wire if using http instead of https as https (the default) will
already validate. Note that this MD5 hash is not stored with the
blob.
:param int maxsize_condition:
Optional conditional header. The max length in bytes permitted for
the append blob. If the Append Block operation would cause the blob
to exceed that limit or if the blob size is already greater than the
value specified in this header, the request will fail with
MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
:param progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
:type progress_callback: func(current, total)
:param str lease_id:
Required if the blob has an active lease.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetime will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetime will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param ~azure.storage.blob.models.CustomerProvidedEncryptionKey cpk:
Encrypts the data on the service-side with the given key.
Use of customer-provided keys must be done over HTTPS.
As the encryption key itself is provided in the request,
a secure connection must be established to transfer the key.
:return: ETag and last modified properties for the Append Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('blob', blob)
_validate_not_none('index', index)
_validate_type_bytes('blob', blob)
_validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
if index < 0:
raise IndexError(_ERROR_VALUE_NEGATIVE.format('index'))
if count is None or count < 0:
count = len(blob) - index
stream = BytesIO(blob)
stream.seek(index)
return self.append_blob_from_stream(
container_name,
blob_name,
stream,
count=count,
validate_content=validate_content,
maxsize_condition=maxsize_condition,
lease_id=lease_id,
progress_callback=progress_callback,
timeout=timeout,
if_modified_since=if_modified_since,
if_unmodified_since=if_unmodified_since,
if_match=if_match,
if_none_match=if_none_match,
cpk=cpk)
def append_blob_from_text(
self, container_name, blob_name, text, encoding='utf-8',
validate_content=False, maxsize_condition=None, progress_callback=None,
lease_id=None, timeout=None, if_modified_since=None, if_unmodified_since=None, if_match=None,
if_none_match=None, cpk=None):
'''
Appends to the content of an existing blob from str/unicode, with
automatic chunking and progress notifications.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of blob to create or update.
:param str text:
Text to upload to the blob.
:param str encoding:
Python encoding to use to convert the text to bytes.
:param bool validate_content:
If true, calculates an MD5 hash for each chunk of the blob. The storage
service checks the hash of the content that has arrived with the hash
that was sent. This is primarily valuable for detecting bitflips on
the wire if using http instead of https as https (the default) will
already validate. Note that this MD5 hash is not stored with the
blob.
:param int maxsize_condition:
Optional conditional header. The max length in bytes permitted for
the append blob. If the Append Block operation would cause the blob
to exceed that limit or if the blob size is already greater than the
value specified in this header, the request will fail with
MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
:param progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
:type progress_callback: func(current, total)
:param str lease_id:
Required if the blob has an active lease.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetime will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetime will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param ~azure.storage.blob.models.CustomerProvidedEncryptionKey cpk:
Encrypts the data on the service-side with the given key.
Use of customer-provided keys must be done over HTTPS.
As the encryption key itself is provided in the request,
a secure connection must be established to transfer the key.
:return: ETag and last modified properties for the Append Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('text', text)
_validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
if not isinstance(text, bytes):
_validate_not_none('encoding', encoding)
text = text.encode(encoding)
return self.append_blob_from_bytes(
container_name,
blob_name,
text,
index=0,
count=len(text),
validate_content=validate_content,
maxsize_condition=maxsize_condition,
lease_id=lease_id,
progress_callback=progress_callback,
timeout=timeout,
if_modified_since=if_modified_since,
if_unmodified_since=if_unmodified_since,
if_match=if_match,
if_none_match=if_none_match,
cpk=cpk)
def append_blob_from_stream(
self, container_name, blob_name, stream, count=None,
validate_content=False, maxsize_condition=None, progress_callback=None,
lease_id=None, timeout=None, if_modified_since=None, if_unmodified_since=None, if_match=None,
if_none_match=None, cpk=None):
'''
Appends to the content of an existing blob from a file/stream, with
automatic chunking and progress notifications.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of blob to create or update.
:param io.IOBase stream:
Opened stream to upload as the blob content.
:param int count:
Number of bytes to read from the stream. This is optional, but
should be supplied for optimal performance.
:param bool validate_content:
If true, calculates an MD5 hash for each chunk of the blob. The storage
service checks the hash of the content that has arrived with the hash
that was sent. This is primarily valuable for detecting bitflips on
the wire if using http instead of https as https (the default) will
already validate. Note that this MD5 hash is not stored with the
blob.
:param int maxsize_condition:
Conditional header. The max length in bytes permitted for
the append blob. If the Append Block operation would cause the blob
to exceed that limit or if the blob size is already greater than the
value specified in this header, the request will fail with
MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
:param progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
:type progress_callback: func(current, total)
:param str lease_id:
Required if the blob has an active lease.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetime will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetime will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param ~azure.storage.blob.models.CustomerProvidedEncryptionKey cpk:
Encrypts the data on the service-side with the given key.
Use of customer-provided keys must be done over HTTPS.
As the encryption key itself is provided in the request,
a secure connection must be established to transfer the key.
:return: ETag and last modified properties for the Append Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('stream', stream)
_validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
# _upload_blob_chunks returns the block ids for block blobs so resource_properties
# is passed as a parameter to get the last_modified and etag for page and append blobs.
# this info is not needed for block_blobs since _put_block_list is called after which gets this info
resource_properties = ResourceProperties()
_upload_blob_chunks(
blob_service=self,
container_name=container_name,
blob_name=blob_name,
blob_size=count,
block_size=self.MAX_BLOCK_SIZE,
stream=stream,
max_connections=1, # upload not easily parallelizable
progress_callback=progress_callback,
validate_content=validate_content,
lease_id=lease_id,
uploader_class=_AppendBlobChunkUploader,
maxsize_condition=maxsize_condition,
timeout=timeout,
resource_properties=resource_properties,
if_modified_since=if_modified_since,
if_unmodified_since=if_unmodified_since,
if_match=if_match,
if_none_match=if_none_match,
cpk=cpk,
)
return resource_properties

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,998 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from ..common._common_conversion import _to_str
class Container(object):
'''
Blob container class.
:ivar str name:
The name of the container.
:ivar metadata:
A dict containing name-value pairs associated with the container as metadata.
This var is set to None unless the include=metadata param was included
for the list containers operation. If this parameter was specified but the
container has no metadata, metadata will be set to an empty dictionary.
:vartype metadata: dict(str, str)
:ivar ContainerProperties properties:
System properties for the container.
'''
def __init__(self, name=None, props=None, metadata=None):
self.name = name
self.properties = props or ContainerProperties()
self.metadata = metadata
class ContainerProperties(object):
'''
Blob container's properties class.
:ivar datetime last_modified:
A datetime object representing the last time the container was modified.
:ivar str etag:
The ETag contains a value that you can use to perform operations
conditionally.
:ivar LeaseProperties lease:
Stores all the lease information for the container.
:ivar bool has_immutability_policy:
Represents whether the container has an immutability policy.
:ivar bool has_legal_hold:
Represents whether the container has a legal hold.
'''
def __init__(self):
self.last_modified = None
self.etag = None
self.lease = LeaseProperties()
self.public_access = None
self.has_immutability_policy = None
self.has_legal_hold = None
class Blob(object):
'''
Blob class.
:ivar str name:
Name of blob.
:ivar str snapshot:
A DateTime value that uniquely identifies the snapshot. The value of
this header indicates the snapshot version, and may be used in
subsequent requests to access the snapshot.
:ivar content:
Blob content.
:vartype content: str or bytes
:ivar BlobProperties properties:
Stores all the system properties for the blob.
:ivar metadata:
Name-value pairs associated with the blob as metadata.
:ivar bool deleted:
Specify whether the blob was soft deleted.
In other words, if the blob is being retained by the delete retention policy,
this field would be True. The blob could be undeleted or it will be garbage collected after the specified
time period.
'''
def __init__(self, name=None, snapshot=None, content=None, props=None, metadata=None, deleted=False):
self.name = name
self.snapshot = snapshot
self.content = content
self.properties = props or BlobProperties()
self.metadata = metadata
self.deleted = deleted
class BlobProperties(object):
'''
Blob Properties
:ivar str blob_type:
String indicating this blob's type.
:ivar datetime last_modified:
A datetime object representing the last time the blob was modified.
:ivar str etag:
The ETag contains a value that you can use to perform operations
conditionally.
:ivar int content_length:
The length of the content returned. If the entire blob was requested,
the length of blob in bytes. If a subset of the blob was requested, the
length of the returned subset.
:ivar str content_range:
Indicates the range of bytes returned in the event that the client
requested a subset of the blob.
:ivar int append_blob_committed_block_count:
(For Append Blobs) Number of committed blocks in the blob.
:ivar int page_blob_sequence_number:
(For Page Blobs) Sequence number for page blob used for coordinating
concurrent writes.
:ivar bool server_encrypted:
Set to true if the blob is encrypted on the server.
:ivar str encryption_key_sha256:
The server will echo the SHA256 of the customer-provided encryption key
to validate the key used in the operation.
:ivar ~azure.storage.blob.models.CopyProperties copy:
Stores all the copy properties for the blob.
:ivar ~azure.storage.blob.models.ContentSettings content_settings:
Stores all the content settings for the blob.
:ivar ~azure.storage.blob.models.LeaseProperties lease:
Stores all the lease information for the blob.
:ivar StandardBlobTier blob_tier:
Indicates the access tier of the blob. The hot tier is optimized
for storing data that is accessed frequently. The cool storage tier
is optimized for storing data that is infrequently accessed and stored
for at least a month. The archive tier is optimized for storing
data that is rarely accessed and stored for at least six months
with flexible latency requirements.
:ivar datetime blob_tier_change_time:
Indicates when the access tier was last changed.
:ivar bool blob_tier_inferred:
Indicates whether the access tier was inferred by the service.
If false, it indicates that the tier was set explicitly.
:ivar datetime deleted_time:
A datetime object representing the time at which the blob was deleted.
:ivar int remaining_retention_days:
The number of days that the blob will be retained before being permanently deleted by the service.
:ivar datetime creation_time:
Indicates when the blob was created, in UTC.
'''
def __init__(self):
self.blob_type = None
self.last_modified = None
self.etag = None
self.content_length = None
self.content_range = None
self.append_blob_committed_block_count = None
self.page_blob_sequence_number = None
self.server_encrypted = None
self.encryption_key_sha256 = None
self.copy = CopyProperties()
self.content_settings = ContentSettings()
self.lease = LeaseProperties()
self.blob_tier = None
self.blob_tier_change_time = None
self.blob_tier_inferred = False
self.deleted_time = None
self.remaining_retention_days = None
self.creation_time = None
class ContentSettings(object):
'''
Used to store the content settings of a blob.
:ivar str content_type:
The content type specified for the blob. If no content type was
specified, the default content type is application/octet-stream.
:ivar str content_encoding:
If the content_encoding has previously been set
for the blob, that value is stored.
:ivar str content_language:
If the content_language has previously been set
for the blob, that value is stored.
:ivar str content_disposition:
content_disposition conveys additional information about how to
process the response payload, and also can be used to attach
additional metadata. If content_disposition has previously been set
for the blob, that value is stored.
:ivar str cache_control:
If the cache_control has previously been set for
the blob, that value is stored.
:ivar str content_md5:
If the content_md5 has been set for the blob, this response
header is stored so that the client can check for message content
integrity.
'''
def __init__(
self, content_type=None, content_encoding=None,
content_language=None, content_disposition=None,
cache_control=None, content_md5=None):
self.content_type = content_type
self.content_encoding = content_encoding
self.content_language = content_language
self.content_disposition = content_disposition
self.cache_control = cache_control
self.content_md5 = content_md5
def _to_headers(self):
return {
'x-ms-blob-cache-control': _to_str(self.cache_control),
'x-ms-blob-content-type': _to_str(self.content_type),
'x-ms-blob-content-disposition': _to_str(self.content_disposition),
'x-ms-blob-content-md5': _to_str(self.content_md5),
'x-ms-blob-content-encoding': _to_str(self.content_encoding),
'x-ms-blob-content-language': _to_str(self.content_language),
}
class CopyProperties(object):
'''
Blob Copy Properties.
:ivar str id:
String identifier for the last attempted Copy Blob operation where this blob
was the destination blob. This header does not appear if this blob has never
been the destination in a Copy Blob operation, or if this blob has been
modified after a concluded Copy Blob operation using Set Blob Properties,
Put Blob, or Put Block List.
:ivar str source:
URL up to 2 KB in length that specifies the source blob used in the last attempted
Copy Blob operation where this blob was the destination blob. This header does not
appear if this blob has never been the destination in a Copy Blob operation, or if
this blob has been modified after a concluded Copy Blob operation using
Set Blob Properties, Put Blob, or Put Block List.
:ivar str status:
State of the copy operation identified by Copy ID, with these values:
success:
Copy completed successfully.
pending:
Copy is in progress. Check copy_status_description if intermittent,
non-fatal errors impede copy progress but don't cause failure.
aborted:
Copy was ended by Abort Copy Blob.
failed:
Copy failed. See copy_status_description for failure details.
:ivar str progress:
Contains the number of bytes copied and the total bytes in the source in the last
attempted Copy Blob operation where this blob was the destination blob. Can show
between 0 and Content-Length bytes copied.
:ivar datetime completion_time:
Conclusion time of the last attempted Copy Blob operation where this blob was the
destination blob. This value can specify the time of a completed, aborted, or
failed copy attempt.
:ivar str status_description:
only appears when x-ms-copy-status is failed or pending. Describes cause of fatal
or non-fatal copy operation failure.
'''
def __init__(self):
self.id = None
self.source = None
self.status = None
self.progress = None
self.completion_time = None
self.status_description = None
class LeaseProperties(object):
'''
Blob Lease Properties.
:ivar str status:
The lease status of the blob.
Possible values: locked|unlocked
:ivar str state:
Lease state of the blob.
Possible values: available|leased|expired|breaking|broken
:ivar str duration:
When a blob is leased, specifies whether the lease is of infinite or fixed duration.
'''
def __init__(self):
self.status = None
self.state = None
self.duration = None
class BlobPrefix(object):
'''
BlobPrefix objects may potentially returned in the blob list when
:func:`~azure.storage.blob.baseblobservice.BaseBlobService.list_blobs` is
used with a delimiter. Prefixes can be thought of as virtual blob directories.
:ivar str name: The name of the blob prefix.
'''
def __init__(self):
self.name = None
class BlobBlockState(object):
'''Block blob block types.'''
Committed = 'Committed'
'''Committed blocks.'''
Latest = 'Latest'
'''Latest blocks.'''
Uncommitted = 'Uncommitted'
'''Uncommitted blocks.'''
class BlobBlock(object):
'''
BlockBlob Block class.
:ivar str id:
Block id.
:ivar str state:
Block state.
Possible valuse: committed|uncommitted
:ivar int size:
Block size in bytes.
'''
def __init__(self, id=None, state=BlobBlockState.Latest):
self.id = id
self.state = state
def _set_size(self, size):
self.size = size
class BlobBlockList(object):
'''
Blob Block List class.
:ivar committed_blocks:
List of committed blocks.
:vartype committed_blocks: list(:class:`~azure.storage.blob.models.BlobBlock`)
:ivar uncommitted_blocks:
List of uncommitted blocks.
:vartype uncommitted_blocks: list(:class:`~azure.storage.blob.models.BlobBlock`)
'''
def __init__(self):
self.committed_blocks = list()
self.uncommitted_blocks = list()
class PageRange(object):
'''
Page Range for page blob.
:ivar int start:
Start of page range in bytes.
:ivar int end:
End of page range in bytes.
:ivar bool is_cleared:
Indicates if a page range is cleared or not. Only applicable
for get_page_range_diff API.
'''
def __init__(self, start=None, end=None, is_cleared=False):
self.start = start
self.end = end
self.is_cleared = is_cleared
class ResourceProperties(object):
'''
Base response for a resource request.
:ivar str etag:
Opaque etag value that can be used to check if resource
has been modified.
:ivar datetime last_modified:
Datetime for last time resource was modified.
:ivar bool server_encrypted:
The value is set to true if the contents of the request are successfully
encrypted using the specified algorithm.
:ivar str encryption_key_sha256:
The server will echo the SHA256 of the customer-provided encryption key
to validate the key used in the operation.
'''
def __init__(self):
self.last_modified = None
self.etag = None
self.request_server_encrypted = None
self.encryption_key_sha256 = None
def clone(self, src):
self.last_modified = src.last_modified
self.etag = src.etag
self.request_server_encrypted = src.request_server_encrypted
self.encryption_key_sha256 = src.encryption_key_sha256
class AppendBlockProperties(ResourceProperties):
'''
Response for an append block request.
:ivar int append_offset:
Position to start next append.
:ivar int committed_block_count:
Number of committed append blocks.
'''
def __init__(self):
super(ResourceProperties, self).__init__()
self.append_offset = None
self.committed_block_count = None
class PageBlobProperties(ResourceProperties):
'''
Response for a page request.
:ivar int sequence_number:
Identifer for page blobs to help handle concurrent writes.
'''
def __init__(self):
super(ResourceProperties, self).__init__()
self.sequence_number = None
class PublicAccess(object):
'''
Specifies whether data in the container may be accessed publicly and the level of access.
'''
OFF = 'off'
'''
Specifies that there is no public read access for both the container and blobs within the container.
Clients cannot enumerate the containers within the storage account as well as the blobs within the container.
'''
Blob = 'blob'
'''
Specifies public read access for blobs. Blob data within this container can be read
via anonymous request, but container data is not available. Clients cannot enumerate
blobs within the container via anonymous request.
'''
Container = 'container'
'''
Specifies full public read access for container and blob data. Clients can enumerate
blobs within the container via anonymous request, but cannot enumerate containers
within the storage account.
'''
class DeleteSnapshot(object):
'''
Required if the blob has associated snapshots. Specifies how to handle the snapshots.
'''
Include = 'include'
'''
Delete the base blob and all of its snapshots.
'''
Only = 'only'
'''
Delete only the blob's snapshots and not the blob itself.
'''
class BlockListType(object):
'''
Specifies whether to return the list of committed blocks, the list of uncommitted
blocks, or both lists together.
'''
All = 'all'
'''Both committed and uncommitted blocks.'''
Committed = 'committed'
'''Committed blocks.'''
Uncommitted = 'uncommitted'
'''Uncommitted blocks.'''
class SequenceNumberAction(object):
'''Sequence number actions.'''
Increment = 'increment'
'''
Increments the value of the sequence number by 1. If specifying this option,
do not include the x-ms-blob-sequence-number header.
'''
Max = 'max'
'''
Sets the sequence number to be the higher of the value included with the
request and the value currently stored for the blob.
'''
Update = 'update'
'''Sets the sequence number to the value included with the request.'''
class _LeaseActions(object):
'''Actions for a lease.'''
Acquire = 'acquire'
'''Acquire the lease.'''
Break = 'break'
'''Break the lease.'''
Change = 'change'
'''Change the lease ID.'''
Release = 'release'
'''Release the lease.'''
Renew = 'renew'
'''Renew the lease.'''
class _BlobTypes(object):
'''Blob type options.'''
AppendBlob = 'AppendBlob'
'''Append blob type.'''
BlockBlob = 'BlockBlob'
'''Block blob type.'''
PageBlob = 'PageBlob'
'''Page blob type.'''
class Include(object):
'''
Specifies the datasets to include in the blob list response.
:ivar ~azure.storage.blob.models.Include Include.COPY:
Specifies that metadata related to any current or previous Copy Blob operation
should be included in the response.
:ivar ~azure.storage.blob.models.Include Include.METADATA:
Specifies that metadata be returned in the response.
:ivar ~azure.storage.blob.models.Include Include.SNAPSHOTS:
Specifies that snapshots should be included in the enumeration.
:ivar ~azure.storage.blob.models.Include Include.UNCOMMITTED_BLOBS:
Specifies that blobs for which blocks have been uploaded, but which have not
been committed using Put Block List, be included in the response.
:ivar ~azure.storage.blob.models.Include Include.DELETED:
Specifies that deleted blobs should be returned in the response.
'''
def __init__(self, snapshots=False, metadata=False, uncommitted_blobs=False,
copy=False, deleted=False, _str=None):
'''
:param bool snapshots:
Specifies that snapshots should be included in the enumeration.
:param bool metadata:
Specifies that metadata be returned in the response.
:param bool uncommitted_blobs:
Specifies that blobs for which blocks have been uploaded, but which have
not been committed using Put Block List, be included in the response.
:param bool copy:
Specifies that metadata related to any current or previous Copy Blob
operation should be included in the response.
:param bool deleted:
Specifies that deleted blobs should be returned in the response.
:param str _str:
A string representing the includes.
'''
if not _str:
_str = ''
components = _str.split(',')
self.snapshots = snapshots or ('snapshots' in components)
self.metadata = metadata or ('metadata' in components)
self.uncommitted_blobs = uncommitted_blobs or ('uncommittedblobs' in components)
self.copy = copy or ('copy' in components)
self.deleted = deleted or ('deleted' in components)
def __or__(self, other):
return Include(_str=str(self) + str(other))
def __add__(self, other):
return Include(_str=str(self) + str(other))
def __str__(self):
include = (('snapshots,' if self.snapshots else '') +
('metadata,' if self.metadata else '') +
('uncommittedblobs,' if self.uncommitted_blobs else '') +
('copy,' if self.copy else '') +
('deleted,' if self.deleted else ''))
return include.rstrip(',')
Include.COPY = Include(copy=True)
Include.METADATA = Include(metadata=True)
Include.SNAPSHOTS = Include(snapshots=True)
Include.UNCOMMITTED_BLOBS = Include(uncommitted_blobs=True)
Include.DELETED = Include(deleted=True)
class BlobPermissions(object):
'''
BlobPermissions class to be used with
:func:`~azure.storage.blob.baseblobservice.BaseBlobService.generate_blob_shared_access_signature` API.
:ivar BlobPermissions BlobPermissions.ADD:
Add a block to an append blob.
:ivar BlobPermissions BlobPermissions.CREATE:
Write a new blob, snapshot a blob, or copy a blob to a new blob.
:ivar BlobPermissions BlobPermissions.DELETE:
Delete the blob.
:ivar BlobPermissions BlobPermissions.READ:
Read the content, properties, metadata and block list. Use the blob as the source of a copy operation.
:ivar BlobPermissions BlobPermissions.WRITE:
Create or write content, properties, metadata, or block list. Snapshot or lease
the blob. Resize the blob (page blob only). Use the blob as the destination of a
copy operation within the same account.
'''
def __init__(self, read=False, add=False, create=False, write=False,
delete=False, _str=None):
'''
:param bool read:
Read the content, properties, metadata and block list. Use the blob as
the source of a copy operation.
:param bool add:
Add a block to an append blob.
:param bool create:
Write a new blob, snapshot a blob, or copy a blob to a new blob.
:param bool write:
Create or write content, properties, metadata, or block list. Snapshot
or lease the blob. Resize the blob (page blob only). Use the blob as the
destination of a copy operation within the same account.
:param bool delete:
Delete the blob.
:param str _str:
A string representing the permissions.
'''
if not _str:
_str = ''
self.read = read or ('r' in _str)
self.add = add or ('a' in _str)
self.create = create or ('c' in _str)
self.write = write or ('w' in _str)
self.delete = delete or ('d' in _str)
def __or__(self, other):
return BlobPermissions(_str=str(self) + str(other))
def __add__(self, other):
return BlobPermissions(_str=str(self) + str(other))
def __str__(self):
return (('r' if self.read else '') +
('a' if self.add else '') +
('c' if self.create else '') +
('w' if self.write else '') +
('d' if self.delete else ''))
BlobPermissions.ADD = BlobPermissions(add=True)
BlobPermissions.CREATE = BlobPermissions(create=True)
BlobPermissions.DELETE = BlobPermissions(delete=True)
BlobPermissions.READ = BlobPermissions(read=True)
BlobPermissions.WRITE = BlobPermissions(write=True)
class ContainerPermissions(object):
'''
ContainerPermissions class to be used with :func:`~azure.storage.blob.baseblobservice.BaseBlobService.generate_container_shared_access_signature`
API and for the AccessPolicies used with :func:`~azure.storage.blob.baseblobservice.BaseBlobService.set_container_acl`.
:ivar ContainerPermissions ContainerPermissions.DELETE:
Delete any blob in the container. Note: You cannot grant permissions to
delete a container with a container SAS. Use an account SAS instead.
:ivar ContainerPermissions ContainerPermissions.LIST:
List blobs in the container.
:ivar ContainerPermissions ContainerPermissions.READ:
Read the content, properties, metadata or block list of any blob in the
container. Use any blob in the container as the source of a copy operation.
:ivar ContainerPermissions ContainerPermissions.WRITE:
For any blob in the container, create or write content, properties,
metadata, or block list. Snapshot or lease the blob. Resize the blob
(page blob only). Use the blob as the destination of a copy operation
within the same account. Note: You cannot grant permissions to read or
write container properties or metadata, nor to lease a container, with
a container SAS. Use an account SAS instead.
'''
def __init__(self, read=False, add=False, create=False, write=False, delete=False, list=False,
_str=None):
'''
:param bool read:
Read the content, properties, metadata or block list of any blob in the
container. Use any blob in the container as the source of a copy operation.
:param bool add:
Add a block to any append blob in the container.
:param bool create:
Write a new blob to the container, snapshot any blob in the container, or copy a blob to
a new blob in the container. Note: You cannot grant permissions to create a container
with a container SAS. Use an account SAS to create a container instead.
:param bool write:
For any blob in the container, create or write content, properties,
metadata, or block list. Snapshot or lease the blob. Resize the blob
(page blob only). Use the blob as the destination of a copy operation
within the same account. Note: You cannot grant permissions to read or
write container properties or metadata, nor to lease a container, with
a container SAS. Use an account SAS instead.
:param bool delete:
Delete any blob in the container. Note: You cannot grant permissions to
delete a container with a container SAS. Use an account SAS instead.
:param bool list:
List blobs in the container.
:param str _str:
A string representing the permissions.
'''
if not _str:
_str = ''
self.read = read or ('r' in _str)
self.add = add or ('a' in _str)
self.create = create or ('c' in _str)
self.write = write or ('w' in _str)
self.delete = delete or ('d' in _str)
self.list = list or ('l' in _str)
def __or__(self, other):
return ContainerPermissions(_str=str(self) + str(other))
def __add__(self, other):
return ContainerPermissions(_str=str(self) + str(other))
def __str__(self):
return (('r' if self.read else '') +
('a' if self.add else '') +
('c' if self.create else '') +
('w' if self.write else '') +
('d' if self.delete else '') +
('l' if self.list else ''))
ContainerPermissions.DELETE = ContainerPermissions(delete=True)
ContainerPermissions.LIST = ContainerPermissions(list=True)
ContainerPermissions.READ = ContainerPermissions(read=True)
ContainerPermissions.WRITE = ContainerPermissions(write=True)
ContainerPermissions.ADD = ContainerPermissions(add=True)
ContainerPermissions.CREATE = ContainerPermissions(create=True)
class PremiumPageBlobTier(object):
'''
Specifies the page blob tier to set the blob to. This is only applicable to page
blobs on premium storage accounts.
Please take a look at https://docs.microsoft.com/en-us/azure/storage/storage-premium-storage#scalability-and-performance-targets
for detailed information on the corresponding IOPS and throughtput per PageBlobTier.
'''
P4 = 'P4'
''' P4 Tier '''
P6 = 'P6'
''' P6 Tier '''
P10 = 'P10'
''' P10 Tier '''
P20 = 'P20'
''' P20 Tier '''
P30 = 'P30'
''' P30 Tier '''
P40 = 'P40'
''' P40 Tier '''
P50 = 'P50'
''' P50 Tier '''
P60 = 'P60'
''' P60 Tier '''
class StandardBlobTier(object):
'''
Specifies the blob tier to set the blob to. This is only applicable for block blobs on standard storage accounts.
'''
Archive = 'Archive'
''' Archive '''
Cool = 'Cool'
''' Cool '''
Hot = 'Hot'
''' Hot '''
class RehydratePriority(object):
"""
Indicates the priority with which to rehydrate an archived blob
"""
Standard = 'Standard'
''' The rehydrate priority is standard. '''
High = 'High'
''' The rehydrate priority is high. '''
class AccountInformation(object):
"""
Holds information related to the storage account.
:ivar str sku_name:
Name of the storage SKU, also known as account type.
Example: Standard_LRS, Standard_ZRS, Standard_GRS, Standard_RAGRS, Premium_LRS, Premium_ZRS
:ivar str account_kind:
Describes the flavour of the storage account, also known as account kind.
Example: Storage, StorageV2, BlobStorage
"""
def __init__(self):
self.sku_name = None
self.account_kind = None
class UserDelegationKey(object):
"""
Represents a user delegation key, provided to the user by Azure Storage
based on their Azure Active Directory access token.
The fields are saved as simple strings since the user does not have to interact with this object;
to generate an identify SAS, the user can simply pass it to the right API.
:ivar str signed_oid:
Object ID of this token.
:ivar str signed_tid:
Tenant ID of the tenant that issued this token.
:ivar str signed_start:
The datetime this token becomes valid.
:ivar str signed_expiry:
The datetime this token expires.
:ivar str signed_service:
What service this key is valid for.
:ivar str signed_version:
The version identifier of the REST service that created this token.
:ivar str value:
The user delegation key.
"""
def __init__(self):
self.signed_oid = None
self.signed_tid = None
self.signed_start = None
self.signed_expiry = None
self.signed_service = None
self.signed_version = None
self.value = None
class BatchDeleteSubRequest(object):
"""
Represents one request in batch of multiple blob delete requests
Organizes HttpRequest objects together for batch REST operations to a single host endpoint.
:ivar str container_name:
Name of existing container.
:ivar str blob_name:
Name of existing blob.
:ivar str snapshot:
The snapshot parameter is an opaque DateTime value that,
when present, specifies the blob snapshot to delete.
:ivar str lease_id:
Required if the blob has an active lease.
:ivar ~azure.storage.blob.models.DeleteSnapshot delete_snapshots:
Required if the blob has associated snapshots.
:ivar datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:ivar datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:ivar str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:ivar str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
"""
def __init__(self, container_name, blob_name, snapshot=None,
lease_id=None, delete_snapshots=None,
if_modified_since=None, if_unmodified_since=None,
if_match=None, if_none_match=None):
self.container_name = container_name
self.blob_name = blob_name
self.snapshot = snapshot
self.lease_id = lease_id
self.delete_snapshots = delete_snapshots
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
self.if_match = if_match
self.if_none_match = if_none_match
class BatchSubResponse(object):
"""
Sub-response parsed from batch http sub-response
Organizes batch sub-response info and batch sub-request together for easier processing
:ivar bool is_successful:
Represent if the batch sub-request is successful
:ivar :class:`~..common._http.HTTPResponse` http_response:
Parsed batch sub-response, in HTTPResponse format
:ivar batch_sub_request:
Represent the batch sub-request corresponding to the batch sub-response.
This could be any type of sub-request. One example is class: ~azure.storage.blob.models.BatchDeleteSubRequest
"""
def __init__(self, is_successful, http_response, batch_sub_request):
self.is_successful = is_successful
self.http_response = http_response
self.batch_sub_request = batch_sub_request
class BatchSetBlobTierSubRequest(object):
"""
Represents one request in batch of multiple set block blob tier requests
Organizes HttpRequest objects together for batch REST operations to a single host endpoint.
:ivar str container_name:
Name of existing container.
:ivar str blob_name:
Name of existing blob.
:ivar StandardBlobTier standard_blob_tier:
A standard blob tier value to set the blob to. For this version of the library,
this is only applicable to block blobs on standard storage accounts.
"""
def __init__(self, container_name, blob_name, standard_blob_tier, rehydrate_priority=None):
self.container_name = container_name
self.blob_name = blob_name
self.standard_blob_tier = standard_blob_tier
self.rehydrate_priority = rehydrate_priority
class CustomerProvidedEncryptionKey(object):
"""
All data in Azure Storage is encrypted at-rest using an account-level encryption key.
In versions 2018-06-17 and newer, you can manage the key used to encrypt blob contents
and application metadata per-blob by providing an AES-256 encryption key in requests to the storage service.
When you use a customer-provided key, Azure Storage does not manage or persist your key.
When writing data to a blob, the provided key is used to encrypt your data before writing it to disk.
A SHA-256 hash of the encryption key is written alongside the blob contents,
and is used to verify that all subsequent operations against the blob use the same encryption key.
This hash cannot be used to retrieve the encryption key or decrypt the contents of the blob.
When reading a blob, the provided key is used to decrypt your data after reading it from disk.
In both cases, the provided encryption key is securely discarded
as soon as the encryption or decryption process completes.
:ivar str key_value:
Base64-encoded AES-256 encryption key value.
:ivar str key_hash:
Base64-encoded SHA256 of the encryption key.
:ivar str algorithm:
Specifies the algorithm to use when encrypting data using the given key. Must be AES256.
"""
def __init__(self, key_value, key_hash):
self.key_value = key_value
self.key_hash = key_hash
self.algorithm = 'AES256'
class PathProperties(object):
"""
Represent a path's properties(only permissions and acl at the moment).
The path can be either a directory or a file.
:ivar string owner:
Represents the owner of the path.
:ivar string group:
Represents the group of the path.
:ivar string permissions:
Represents the permissions of the path.
:ivar string acl:
Represents the acl of the path.
"""
def __init__(self):
self.owner = None
self.group = None
self.permissions = None
self.acl = None

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,275 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from ..common.sharedaccesssignature import (
SharedAccessSignature,
_SharedAccessHelper,
_QueryStringConstants,
_sign_string,
)
from ._constants import X_MS_VERSION
from ..common._serialization import (
url_quote,
)
class BlobSharedAccessSignature(SharedAccessSignature):
'''
Provides a factory for creating blob and container access
signature tokens with a common account name and account key. Users can either
use the factory or can construct the appropriate service and use the
generate_*_shared_access_signature method directly.
'''
def __init__(self, account_name, account_key=None, user_delegation_key=None):
'''
:param str account_name:
The storage account name used to generate the shared access signatures.
:param str account_key:
The access key to generate the shares access signatures.
:param ~azure.storage.blob.models.UserDelegationKey user_delegation_key:
Instead of an account key, the user could pass in a user delegation key.
A user delegation key can be obtained from the service by authenticating with an AAD identity;
this can be accomplished by calling get_user_delegation_key on any Blob service object.
'''
super(BlobSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION)
self.user_delegation_key = user_delegation_key
def generate_blob(self, container_name, blob_name, snapshot=None, permission=None,
expiry=None, start=None, id=None, ip=None, protocol=None,
cache_control=None, content_disposition=None,
content_encoding=None, content_language=None,
content_type=None):
'''
Generates a shared access signature for the blob or one of its snapshots.
Use the returned signature with the sas_token parameter of any BlobService.
:param str container_name:
Name of container.
:param str blob_name:
Name of blob.
:param str snapshot:
The snapshot parameter is an opaque DateTime value that,
when present, specifies the blob snapshot to grant permission.
:param BlobPermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered read, write, delete, list.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: datetime or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: datetime or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_blob_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~..common.models.Protocol` for possible values.
:param str cache_control:
Response header value for Cache-Control when resource is accessed
using this shared access signature.
:param str content_disposition:
Response header value for Content-Disposition when resource is accessed
using this shared access signature.
:param str content_encoding:
Response header value for Content-Encoding when resource is accessed
using this shared access signature.
:param str content_language:
Response header value for Content-Language when resource is accessed
using this shared access signature.
:param str content_type:
Response header value for Content-Type when resource is accessed
using this shared access signature.
'''
resource_path = container_name + '/' + blob_name
sas = _BlobSharedAccessHelper()
sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
sas.add_id(id)
sas.add_resource('b' if snapshot is None else 'bs')
sas.add_timestamp(snapshot)
sas.add_override_response_headers(cache_control, content_disposition,
content_encoding, content_language,
content_type)
sas.add_resource_signature(self.account_name, self.account_key, resource_path,
user_delegation_key=self.user_delegation_key)
return sas.get_token()
def generate_container(self, container_name, permission=None, expiry=None,
start=None, id=None, ip=None, protocol=None,
cache_control=None, content_disposition=None,
content_encoding=None, content_language=None,
content_type=None):
'''
Generates a shared access signature for the container.
Use the returned signature with the sas_token parameter of any BlobService.
:param str container_name:
Name of container.
:param ContainerPermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered read, write, delete, list.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: datetime or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: datetime or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_blob_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~..common.models.Protocol` for possible values.
:param str cache_control:
Response header value for Cache-Control when resource is accessed
using this shared access signature.
:param str content_disposition:
Response header value for Content-Disposition when resource is accessed
using this shared access signature.
:param str content_encoding:
Response header value for Content-Encoding when resource is accessed
using this shared access signature.
:param str content_language:
Response header value for Content-Language when resource is accessed
using this shared access signature.
:param str content_type:
Response header value for Content-Type when resource is accessed
using this shared access signature.
'''
sas = _BlobSharedAccessHelper()
sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
sas.add_id(id)
sas.add_resource('c')
sas.add_override_response_headers(cache_control, content_disposition,
content_encoding, content_language,
content_type)
sas.add_resource_signature(self.account_name, self.account_key, container_name,
user_delegation_key=self.user_delegation_key)
return sas.get_token()
class _BlobQueryStringConstants(_QueryStringConstants):
SIGNED_TIMESTAMP = 'snapshot'
SIGNED_OID = 'skoid'
SIGNED_TID = 'sktid'
SIGNED_KEY_START = 'skt'
SIGNED_KEY_EXPIRY = 'ske'
SIGNED_KEY_SERVICE = 'sks'
SIGNED_KEY_VERSION = 'skv'
class _BlobSharedAccessHelper(_SharedAccessHelper):
def __init__(self):
super(_BlobSharedAccessHelper, self).__init__()
def add_timestamp(self, timestamp):
self._add_query(_BlobQueryStringConstants.SIGNED_TIMESTAMP, timestamp)
def get_value_to_append(self, query):
return_value = self.query_dict.get(query) or ''
return return_value + '\n'
def add_resource_signature(self, account_name, account_key, path, user_delegation_key=None):
if path[0] != '/':
path = '/' + path
canonicalized_resource = '/blob/' + account_name + path + '\n'
# Form the string to sign from shared_access_policy and canonicalized
# resource. The order of values is important.
string_to_sign = \
(self.get_value_to_append(_BlobQueryStringConstants.SIGNED_PERMISSION) +
self.get_value_to_append(_BlobQueryStringConstants.SIGNED_START) +
self.get_value_to_append(_BlobQueryStringConstants.SIGNED_EXPIRY) +
canonicalized_resource)
if user_delegation_key is not None:
self._add_query(_BlobQueryStringConstants.SIGNED_OID, user_delegation_key.signed_oid)
self._add_query(_BlobQueryStringConstants.SIGNED_TID, user_delegation_key.signed_tid)
self._add_query(_BlobQueryStringConstants.SIGNED_KEY_START, user_delegation_key.signed_start)
self._add_query(_BlobQueryStringConstants.SIGNED_KEY_EXPIRY, user_delegation_key.signed_expiry)
self._add_query(_BlobQueryStringConstants.SIGNED_KEY_SERVICE, user_delegation_key.signed_service)
self._add_query(_BlobQueryStringConstants.SIGNED_KEY_VERSION, user_delegation_key.signed_version)
string_to_sign += \
(self.get_value_to_append(_BlobQueryStringConstants.SIGNED_OID) +
self.get_value_to_append(_BlobQueryStringConstants.SIGNED_TID) +
self.get_value_to_append(_BlobQueryStringConstants.SIGNED_KEY_START) +
self.get_value_to_append(_BlobQueryStringConstants.SIGNED_KEY_EXPIRY) +
self.get_value_to_append(_BlobQueryStringConstants.SIGNED_KEY_SERVICE) +
self.get_value_to_append(_BlobQueryStringConstants.SIGNED_KEY_VERSION))
else:
string_to_sign += self.get_value_to_append(_BlobQueryStringConstants.SIGNED_IDENTIFIER)
string_to_sign += \
(self.get_value_to_append(_BlobQueryStringConstants.SIGNED_IP) +
self.get_value_to_append(_BlobQueryStringConstants.SIGNED_PROTOCOL) +
self.get_value_to_append(_BlobQueryStringConstants.SIGNED_VERSION) +
self.get_value_to_append(_BlobQueryStringConstants.SIGNED_RESOURCE) +
self.get_value_to_append(_BlobQueryStringConstants.SIGNED_TIMESTAMP) +
self.get_value_to_append(_BlobQueryStringConstants.SIGNED_CACHE_CONTROL) +
self.get_value_to_append(_BlobQueryStringConstants.SIGNED_CONTENT_DISPOSITION) +
self.get_value_to_append(_BlobQueryStringConstants.SIGNED_CONTENT_ENCODING) +
self.get_value_to_append(_BlobQueryStringConstants.SIGNED_CONTENT_LANGUAGE) +
self.get_value_to_append(_BlobQueryStringConstants.SIGNED_CONTENT_TYPE))
# remove the trailing newline
if string_to_sign[-1] == '\n':
string_to_sign = string_to_sign[:-1]
self._add_query(_BlobQueryStringConstants.SIGNED_SIGNATURE,
_sign_string(account_key if user_delegation_key is None else user_delegation_key.value,
string_to_sign))
def get_token(self):
# a conscious decision was made to exclude the timestamp in the generated token
# this is to avoid having two snapshot ids in the query parameters when the user appends the snapshot timestamp
exclude = [_BlobQueryStringConstants.SIGNED_TIMESTAMP]
return '&'.join(['{0}={1}'.format(n, url_quote(v))
for n, v in self.query_dict.items() if v is not None and n not in exclude])

Просмотреть файл

@ -1,39 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from ._constants import (
__author__,
__version__,
DEFAULT_X_MS_VERSION,
)
from .cloudstorageaccount import CloudStorageAccount
from .models import (
RetentionPolicy,
Logging,
Metrics,
CorsRule,
DeleteRetentionPolicy,
StaticWebsite,
ServiceProperties,
AccessPolicy,
ResourceTypes,
Services,
AccountPermissions,
Protocol,
ServiceStats,
GeoReplication,
LocationMode,
RetryContext,
)
from .retry import (
ExponentialRetry,
LinearRetry,
no_retry,
)
from .sharedaccesssignature import (
SharedAccessSignature,
)
from .tokencredential import TokenCredential
from ._error import AzureSigningError

Просмотреть файл

@ -1,127 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from ._common_conversion import (
_sign_string,
)
from ._constants import (
DEV_ACCOUNT_NAME,
DEV_ACCOUNT_SECONDARY_NAME
)
import sys
if sys.version_info >= (3,):
from urllib.parse import parse_qsl
else:
from urlparse import parse_qsl
import logging
logger = logging.getLogger(__name__)
from ._error import (
AzureSigningError,
_wrap_exception,
)
class _StorageSharedKeyAuthentication(object):
def __init__(self, account_name, account_key, is_emulated=False):
self.account_name = account_name
self.account_key = account_key
self.is_emulated = is_emulated
def _get_headers(self, request, headers_to_sign):
headers = dict((name.lower(), value) for name, value in request.headers.items() if value)
if 'content-length' in headers and headers['content-length'] == '0':
del headers['content-length']
return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n'
def _get_verb(self, request):
return request.method + '\n'
def _get_canonicalized_resource(self, request):
uri_path = request.path.split('?')[0]
# for emulator, use the DEV_ACCOUNT_NAME instead of DEV_ACCOUNT_SECONDARY_NAME
# as this is how the emulator works
if self.is_emulated and uri_path.find(DEV_ACCOUNT_SECONDARY_NAME) == 1:
# only replace the first instance
uri_path = uri_path.replace(DEV_ACCOUNT_SECONDARY_NAME, DEV_ACCOUNT_NAME, 1)
return '/' + self.account_name + uri_path
def _get_canonicalized_headers(self, request):
string_to_sign = ''
x_ms_headers = []
for name, value in request.headers.items():
if name.startswith('x-ms-'):
x_ms_headers.append((name.lower(), value))
x_ms_headers.sort()
for name, value in x_ms_headers:
if value is not None:
string_to_sign += ''.join([name, ':', value, '\n'])
return string_to_sign
def _add_authorization_header(self, request, string_to_sign):
try:
signature = _sign_string(self.account_key, string_to_sign)
auth_string = 'SharedKey ' + self.account_name + ':' + signature
request.headers['Authorization'] = auth_string
except Exception as ex:
# Wrap any error that occurred as signing error
# Doing so will clarify/locate the source of problem
raise _wrap_exception(ex, AzureSigningError)
def sign_request(self, request):
string_to_sign = \
self._get_verb(request) + \
self._get_headers(
request,
[
'content-encoding', 'content-language', 'content-length',
'content-md5', 'content-type', 'date', 'if-modified-since',
'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range'
]
) + \
self._get_canonicalized_headers(request) + \
self._get_canonicalized_resource(request) + \
self._get_canonicalized_resource_query(request)
self._add_authorization_header(request, string_to_sign)
logger.debug("String_to_sign=%s", string_to_sign)
def _get_canonicalized_resource_query(self, request):
sorted_queries = [(name, value) for name, value in request.query.items()]
sorted_queries.sort()
string_to_sign = ''
for name, value in sorted_queries:
if value is not None:
string_to_sign += '\n' + name.lower() + ':' + value
return string_to_sign
class _StorageNoAuthentication(object):
def sign_request(self, request):
pass
class _StorageSASAuthentication(object):
def __init__(self, sas_token):
# ignore ?-prefix (added by tools such as Azure Portal) on sas tokens
# doing so avoids double question marks when signing
if sas_token[0] == '?':
sas_token = sas_token[1:]
self.sas_qs = parse_qsl(sas_token)
def sign_request(self, request):
# if 'sig' is present, then the request has already been signed
# as is the case when performing retries
if 'sig' in request.query:
return
request.query.update(self.sas_qs)

Просмотреть файл

@ -1,126 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import base64
import hashlib
import hmac
import sys
from io import (SEEK_SET)
from dateutil.tz import tzutc
from ._error import (
_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM,
_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM,
)
from .models import (
_unicode_type,
)
if sys.version_info < (3,):
def _str(value):
if isinstance(value, unicode):
return value.encode('utf-8')
return str(value)
else:
_str = str
def _to_str(value):
return _str(value) if value is not None else None
def _int_to_str(value):
return str(int(value)) if value is not None else None
def _bool_to_str(value):
if value is None:
return None
if isinstance(value, bool):
if value:
return 'true'
else:
return 'false'
return str(value)
def _to_utc_datetime(value):
return value.strftime('%Y-%m-%dT%H:%M:%SZ')
def _datetime_to_utc_string(value):
# Azure expects the date value passed in to be UTC.
# Azure will always return values as UTC.
# If a date is passed in without timezone info, it is assumed to be UTC.
if value is None:
return None
if value.tzinfo:
value = value.astimezone(tzutc())
return value.strftime('%a, %d %b %Y %H:%M:%S GMT')
def _encode_base64(data):
if isinstance(data, _unicode_type):
data = data.encode('utf-8')
encoded = base64.b64encode(data)
return encoded.decode('utf-8')
def _decode_base64_to_bytes(data):
if isinstance(data, _unicode_type):
data = data.encode('utf-8')
return base64.b64decode(data)
def _decode_base64_to_text(data):
decoded_bytes = _decode_base64_to_bytes(data)
return decoded_bytes.decode('utf-8')
def _sign_string(key, string_to_sign, key_is_base64=True):
if key_is_base64:
key = _decode_base64_to_bytes(key)
else:
if isinstance(key, _unicode_type):
key = key.encode('utf-8')
if isinstance(string_to_sign, _unicode_type):
string_to_sign = string_to_sign.encode('utf-8')
signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256)
digest = signed_hmac_sha256.digest()
encoded_digest = _encode_base64(digest)
return encoded_digest
def _get_content_md5(data):
md5 = hashlib.md5()
if isinstance(data, bytes):
md5.update(data)
elif hasattr(data, 'read'):
pos = 0
try:
pos = data.tell()
except:
pass
for chunk in iter(lambda: data.read(4096), b""):
md5.update(chunk)
try:
data.seek(pos, SEEK_SET)
except (AttributeError, IOError):
raise ValueError(_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM.format('data'))
else:
raise ValueError(_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM.format('data'))
return base64.b64encode(md5.digest()).decode('utf-8')
def _lower(text):
return text.lower()

Просмотреть файл

@ -1,161 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import sys
if sys.version_info >= (3,):
from urllib.parse import urlparse
else:
from urlparse import urlparse
from ._constants import (
SERVICE_HOST_BASE,
DEFAULT_PROTOCOL,
DEV_ACCOUNT_NAME,
DEV_ACCOUNT_SECONDARY_NAME,
DEV_ACCOUNT_KEY,
DEV_BLOB_HOST,
DEV_QUEUE_HOST,
)
from ._error import (
_ERROR_STORAGE_MISSING_INFO,
)
_EMULATOR_ENDPOINTS = {
'blob': DEV_BLOB_HOST,
'queue': DEV_QUEUE_HOST,
'file': '',
}
_CONNECTION_ENDPOINTS = {
'blob': 'BlobEndpoint',
'queue': 'QueueEndpoint',
'file': 'FileEndpoint',
}
_CONNECTION_ENDPOINTS_SECONDARY = {
'blob': 'BlobSecondaryEndpoint',
'queue': 'QueueSecondaryEndpoint',
'file': 'FileSecondaryEndpoint',
}
class _ServiceParameters(object):
def __init__(self, service, account_name=None, account_key=None, sas_token=None, token_credential=None,
is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE,
custom_domain=None, custom_domain_secondary=None):
self.account_name = account_name
self.account_key = account_key
self.sas_token = sas_token
self.token_credential = token_credential
self.protocol = protocol or DEFAULT_PROTOCOL
self.is_emulated = is_emulated
if is_emulated:
self.account_name = DEV_ACCOUNT_NAME
self.protocol = 'http'
# Only set the account key if a sas_token is not present to allow sas to be used with the emulator
self.account_key = DEV_ACCOUNT_KEY if not self.sas_token else None
emulator_endpoint = _EMULATOR_ENDPOINTS[service] if custom_domain is None else custom_domain
self.primary_endpoint = '{}/{}'.format(emulator_endpoint, DEV_ACCOUNT_NAME)
self.secondary_endpoint = '{}/{}'.format(emulator_endpoint, DEV_ACCOUNT_SECONDARY_NAME)
else:
# Strip whitespace from the key
if self.account_key:
self.account_key = self.account_key.strip()
endpoint_suffix = endpoint_suffix or SERVICE_HOST_BASE
# Setup the primary endpoint
if custom_domain:
parsed_url = urlparse(custom_domain)
# Trim any trailing slashes from the path
path = parsed_url.path.rstrip('/')
self.primary_endpoint = parsed_url.netloc + path
self.protocol = self.protocol if parsed_url.scheme is '' else parsed_url.scheme
else:
if not self.account_name:
raise ValueError(_ERROR_STORAGE_MISSING_INFO)
self.primary_endpoint = '{}.{}.{}'.format(self.account_name, service, endpoint_suffix)
# Setup the secondary endpoint
if custom_domain_secondary:
if not custom_domain:
raise ValueError(_ERROR_STORAGE_MISSING_INFO)
parsed_url = urlparse(custom_domain_secondary)
# Trim any trailing slashes from the path
path = parsed_url.path.rstrip('/')
self.secondary_endpoint = parsed_url.netloc + path
else:
if self.account_name:
self.secondary_endpoint = '{}-secondary.{}.{}'.format(self.account_name, service, endpoint_suffix)
else:
self.secondary_endpoint = None
@staticmethod
def get_service_parameters(service, account_name=None, account_key=None, sas_token=None, token_credential= None,
is_emulated=None, protocol=None, endpoint_suffix=None, custom_domain=None,
request_session=None, connection_string=None, socket_timeout=None):
if connection_string:
params = _ServiceParameters._from_connection_string(connection_string, service)
elif is_emulated:
params = _ServiceParameters(service, is_emulated=True, custom_domain=custom_domain)
elif account_name:
if protocol.lower() != 'https' and token_credential is not None:
raise ValueError("Token credential is only supported with HTTPS.")
params = _ServiceParameters(service,
account_name=account_name,
account_key=account_key,
sas_token=sas_token,
token_credential=token_credential,
is_emulated=is_emulated,
protocol=protocol,
endpoint_suffix=endpoint_suffix,
custom_domain=custom_domain)
else:
raise ValueError(_ERROR_STORAGE_MISSING_INFO)
params.request_session = request_session
params.socket_timeout = socket_timeout
return params
@staticmethod
def _from_connection_string(connection_string, service):
# Split into key=value pairs removing empties, then split the pairs into a dict
config = dict(s.split('=', 1) for s in connection_string.split(';') if s)
# Authentication
account_name = config.get('AccountName')
account_key = config.get('AccountKey')
sas_token = config.get('SharedAccessSignature')
# Emulator
is_emulated = config.get('UseDevelopmentStorage')
# Basic URL Configuration
protocol = config.get('DefaultEndpointsProtocol')
endpoint_suffix = config.get('EndpointSuffix')
# Custom URLs
endpoint = config.get(_CONNECTION_ENDPOINTS[service])
endpoint_secondary = config.get(_CONNECTION_ENDPOINTS_SECONDARY[service])
return _ServiceParameters(service,
account_name=account_name,
account_key=account_key,
sas_token=sas_token,
is_emulated=is_emulated,
protocol=protocol,
endpoint_suffix=endpoint_suffix,
custom_domain=endpoint,
custom_domain_secondary=endpoint_secondary)

Просмотреть файл

@ -1,52 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import platform
import sys
__author__ = 'Microsoft Corp. <ptvshelp@microsoft.com>'
__version__ = '2.1.0'
# UserAgent string sample: 'Azure-Storage/0.37.0-0.38.0 (Python CPython 3.4.2; Windows 8)'
# First version(0.37.0) is the common package, and the second version(0.38.0) is the service package
USER_AGENT_STRING_PREFIX = 'Azure-Storage/{}-'.format(__version__)
USER_AGENT_STRING_SUFFIX = '(Python {} {}; {} {})'.format(platform.python_implementation(),
platform.python_version(), platform.system(),
platform.release())
# default values for common package, in case it is used directly
DEFAULT_X_MS_VERSION = '2019-02-02'
DEFAULT_USER_AGENT_STRING = '{}None {}'.format(USER_AGENT_STRING_PREFIX, USER_AGENT_STRING_SUFFIX)
# Live ServiceClient URLs
SERVICE_HOST_BASE = 'core.windows.net'
DEFAULT_PROTOCOL = 'https'
# Development ServiceClient URLs
DEV_BLOB_HOST = '127.0.0.1:10000'
DEV_QUEUE_HOST = '127.0.0.1:10001'
# Default credentials for Development Storage Service
DEV_ACCOUNT_NAME = 'devstoreaccount1'
DEV_ACCOUNT_SECONDARY_NAME = 'devstoreaccount1-secondary'
DEV_ACCOUNT_KEY = 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=='
# Socket timeout in seconds
DEFAULT_SOCKET_TIMEOUT = 20
# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned)
# The socket timeout is now the maximum total duration to send all data.
if sys.version_info >= (3, 5):
# the timeout to connect is 20 seconds, and the read timeout is 2000 seconds
# the 2000 seconds was calculated with: 100MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed)
DEFAULT_SOCKET_TIMEOUT = (20, 2000)
# Encryption constants
_ENCRYPTION_PROTOCOL_V1 = '1.0'
_AUTHORIZATION_HEADER_NAME = 'Authorization'
_COPY_SOURCE_HEADER_NAME = 'x-ms-copy-source'
_REDACTED_VALUE = 'REDACTED'
_CLIENT_REQUEST_ID_HEADER_NAME = 'x-ms-client-request-id'

Просмотреть файл

@ -1,396 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from dateutil import parser
from ._common_conversion import _to_str
try:
from xml.etree import cElementTree as ETree
except ImportError:
from xml.etree import ElementTree as ETree
from .models import (
ServiceProperties,
Logging,
Metrics,
CorsRule,
AccessPolicy,
_dict,
GeoReplication,
ServiceStats,
DeleteRetentionPolicy,
StaticWebsite,
)
def _to_int(value):
return value if value is None else int(value)
def _bool(value):
return value.lower() == 'true'
def _to_upper_str(value):
return _to_str(value).upper() if value is not None else None
def _get_download_size(start_range, end_range, resource_size):
if start_range is not None:
end_range = end_range if end_range else (resource_size if resource_size else None)
if end_range is not None:
return end_range - start_range
else:
return None
else:
return resource_size
GET_PROPERTIES_ATTRIBUTE_MAP = {
'last-modified': (None, 'last_modified', parser.parse),
'etag': (None, 'etag', _to_str),
'x-ms-blob-type': (None, 'blob_type', _to_str),
'content-length': (None, 'content_length', _to_int),
'content-range': (None, 'content_range', _to_str),
'x-ms-blob-sequence-number': (None, 'page_blob_sequence_number', _to_int),
'x-ms-blob-committed-block-count': (None, 'append_blob_committed_block_count', _to_int),
'x-ms-blob-public-access': (None, 'public_access', _to_str),
'x-ms-access-tier': (None, 'blob_tier', _to_str),
'x-ms-access-tier-change-time': (None, 'blob_tier_change_time', parser.parse),
'x-ms-access-tier-inferred': (None, 'blob_tier_inferred', _bool),
'x-ms-archive-status': (None, 'rehydration_status', _to_str),
'x-ms-share-quota': (None, 'quota', _to_int),
'x-ms-server-encrypted': (None, 'server_encrypted', _bool),
'x-ms-encryption-key-sha256': (None, 'encryption_key_sha256', _to_str),
'x-ms-creation-time': (None, 'creation_time', parser.parse),
'content-type': ('content_settings', 'content_type', _to_str),
'cache-control': ('content_settings', 'cache_control', _to_str),
'content-encoding': ('content_settings', 'content_encoding', _to_str),
'content-disposition': ('content_settings', 'content_disposition', _to_str),
'content-language': ('content_settings', 'content_language', _to_str),
'content-md5': ('content_settings', 'content_md5', _to_str),
'x-ms-lease-status': ('lease', 'status', _to_str),
'x-ms-lease-state': ('lease', 'state', _to_str),
'x-ms-lease-duration': ('lease', 'duration', _to_str),
'x-ms-copy-id': ('copy', 'id', _to_str),
'x-ms-copy-source': ('copy', 'source', _to_str),
'x-ms-copy-status': ('copy', 'status', _to_str),
'x-ms-copy-progress': ('copy', 'progress', _to_str),
'x-ms-copy-completion-time': ('copy', 'completion_time', parser.parse),
'x-ms-copy-destination-snapshot': ('copy', 'destination_snapshot_time', _to_str),
'x-ms-copy-status-description': ('copy', 'status_description', _to_str),
'x-ms-has-immutability-policy': (None, 'has_immutability_policy', _bool),
'x-ms-has-legal-hold': (None, 'has_legal_hold', _bool),
'x-ms-file-attributes': ('smb_properties', 'ntfs_attributes', _to_str),
'x-ms-file-creation-time': ('smb_properties', 'creation_time', parser.parse, True),
'x-ms-file-last-write-time': ('smb_properties', 'last_write_time', parser.parse, True),
'x-ms-file-change-time': ('smb_properties', 'change_time', parser.parse, True),
'x-ms-file-permission-key': ('smb_properties', 'permission_key', _to_str),
'x-ms-file-id': ('smb_properties', 'file_id', _to_str),
'x-ms-file-parent-id': ('smb_properties', 'parent_id', _to_str),
}
def _parse_metadata(response):
'''
Extracts out resource metadata information.
'''
if response is None or response.headers is None:
return None
metadata = _dict()
for key, value in response.headers.items():
if key.lower().startswith('x-ms-meta-'):
metadata[key[10:]] = _to_str(value)
return metadata
def _parse_properties(response, result_class):
'''
Extracts out resource properties and metadata information.
Ignores the standard http headers.
'''
if response is None or response.headers is None:
return None
props = result_class()
for key, value in response.headers.items():
info = GET_PROPERTIES_ATTRIBUTE_MAP.get(key)
if info:
if info[0] is None:
setattr(props, info[1], info[2](value))
else:
attr = getattr(props, info[0])
# if info[3] is True, time zones in parsed strings are ignored and a naive :class:`datetime` object
# will be returned.
ignoretz = info[3] if len(info) > 3 else False
header_value = info[2](value, ignoretz=ignoretz) if info[2] is parser.parse else info[2](value)
setattr(attr, info[1], header_value)
if hasattr(props, 'blob_type') and props.blob_type == 'PageBlob' and hasattr(props, 'blob_tier') and props.blob_tier is not None:
props.blob_tier = _to_upper_str(props.blob_tier)
return props
def _parse_length_from_content_range(content_range):
'''
Parses the blob length from the content range header: bytes 1-3/65537
'''
if content_range is None:
return None
# First, split in space and take the second half: '1-3/65537'
# Next, split on slash and take the second half: '65537'
# Finally, convert to an int: 65537
return int(content_range.split(' ', 1)[1].split('/', 1)[1])
def _convert_xml_to_signed_identifiers(response):
'''
<?xml version="1.0" encoding="utf-8"?>
<SignedIdentifiers>
<SignedIdentifier>
<Id>unique-value</Id>
<AccessPolicy>
<Start>start-time</Start>
<Expiry>expiry-time</Expiry>
<Permission>abbreviated-permission-list</Permission>
</AccessPolicy>
</SignedIdentifier>
</SignedIdentifiers>
'''
if response is None or response.body is None:
return None
list_element = ETree.fromstring(response.body)
signed_identifiers = _dict()
for signed_identifier_element in list_element.findall('SignedIdentifier'):
# Id element
id = signed_identifier_element.find('Id').text
# Access policy element
access_policy = AccessPolicy()
access_policy_element = signed_identifier_element.find('AccessPolicy')
if access_policy_element is not None:
start_element = access_policy_element.find('Start')
if start_element is not None:
access_policy.start = parser.parse(start_element.text)
expiry_element = access_policy_element.find('Expiry')
if expiry_element is not None:
access_policy.expiry = parser.parse(expiry_element.text)
access_policy.permission = access_policy_element.findtext('Permission')
signed_identifiers[id] = access_policy
return signed_identifiers
def _convert_xml_to_service_stats(response):
'''
<?xml version="1.0" encoding="utf-8"?>
<StorageServiceStats>
<GeoReplication>
<Status>live|bootstrap|unavailable</Status>
<LastSyncTime>sync-time|<empty></LastSyncTime>
</GeoReplication>
</StorageServiceStats>
'''
if response is None or response.body is None:
return None
service_stats_element = ETree.fromstring(response.body)
geo_replication_element = service_stats_element.find('GeoReplication')
geo_replication = GeoReplication()
geo_replication.status = geo_replication_element.find('Status').text
last_sync_time = geo_replication_element.find('LastSyncTime').text
geo_replication.last_sync_time = parser.parse(last_sync_time) if last_sync_time else None
service_stats = ServiceStats()
service_stats.geo_replication = geo_replication
return service_stats
def _convert_xml_to_service_properties(response):
'''
<?xml version="1.0" encoding="utf-8"?>
<StorageServiceProperties>
<Logging>
<Version>version-number</Version>
<Delete>true|false</Delete>
<Read>true|false</Read>
<Write>true|false</Write>
<RetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</RetentionPolicy>
</Logging>
<HourMetrics>
<Version>version-number</Version>
<Enabled>true|false</Enabled>
<IncludeAPIs>true|false</IncludeAPIs>
<RetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</RetentionPolicy>
</HourMetrics>
<MinuteMetrics>
<Version>version-number</Version>
<Enabled>true|false</Enabled>
<IncludeAPIs>true|false</IncludeAPIs>
<RetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</RetentionPolicy>
</MinuteMetrics>
<Cors>
<CorsRule>
<AllowedOrigins>comma-separated-list-of-allowed-origins</AllowedOrigins>
<AllowedMethods>comma-separated-list-of-HTTP-verb</AllowedMethods>
<MaxAgeInSeconds>max-caching-age-in-seconds</MaxAgeInSeconds>
<ExposedHeaders>comma-separated-list-of-response-headers</ExposedHeaders>
<AllowedHeaders>comma-separated-list-of-request-headers</AllowedHeaders>
</CorsRule>
</Cors>
<DeleteRetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</DeleteRetentionPolicy>
<StaticWebsite>
<Enabled>true|false</Enabled>
<IndexDocument></IndexDocument>
<ErrorDocument404Path></ErrorDocument404Path>
</StaticWebsite>
</StorageServiceProperties>
'''
if response is None or response.body is None:
return None
service_properties_element = ETree.fromstring(response.body)
service_properties = ServiceProperties()
# Logging
logging = service_properties_element.find('Logging')
if logging is not None:
service_properties.logging = Logging()
service_properties.logging.version = logging.find('Version').text
service_properties.logging.delete = _bool(logging.find('Delete').text)
service_properties.logging.read = _bool(logging.find('Read').text)
service_properties.logging.write = _bool(logging.find('Write').text)
_convert_xml_to_retention_policy(logging.find('RetentionPolicy'),
service_properties.logging.retention_policy)
# HourMetrics
hour_metrics_element = service_properties_element.find('HourMetrics')
if hour_metrics_element is not None:
service_properties.hour_metrics = Metrics()
_convert_xml_to_metrics(hour_metrics_element, service_properties.hour_metrics)
# MinuteMetrics
minute_metrics_element = service_properties_element.find('MinuteMetrics')
if minute_metrics_element is not None:
service_properties.minute_metrics = Metrics()
_convert_xml_to_metrics(minute_metrics_element, service_properties.minute_metrics)
# CORS
cors = service_properties_element.find('Cors')
if cors is not None:
service_properties.cors = list()
for rule in cors.findall('CorsRule'):
allowed_origins = rule.find('AllowedOrigins').text.split(',')
allowed_methods = rule.find('AllowedMethods').text.split(',')
max_age_in_seconds = int(rule.find('MaxAgeInSeconds').text)
cors_rule = CorsRule(allowed_origins, allowed_methods, max_age_in_seconds)
exposed_headers = rule.find('ExposedHeaders').text
if exposed_headers is not None:
cors_rule.exposed_headers = exposed_headers.split(',')
allowed_headers = rule.find('AllowedHeaders').text
if allowed_headers is not None:
cors_rule.allowed_headers = allowed_headers.split(',')
service_properties.cors.append(cors_rule)
# Target version
target_version = service_properties_element.find('DefaultServiceVersion')
if target_version is not None:
service_properties.target_version = target_version.text
# DeleteRetentionPolicy
delete_retention_policy_element = service_properties_element.find('DeleteRetentionPolicy')
if delete_retention_policy_element is not None:
service_properties.delete_retention_policy = DeleteRetentionPolicy()
policy_enabled = _bool(delete_retention_policy_element.find('Enabled').text)
service_properties.delete_retention_policy.enabled = policy_enabled
if policy_enabled:
service_properties.delete_retention_policy.days = int(delete_retention_policy_element.find('Days').text)
# StaticWebsite
static_website_element = service_properties_element.find('StaticWebsite')
if static_website_element is not None:
service_properties.static_website = StaticWebsite()
service_properties.static_website.enabled = _bool(static_website_element.find('Enabled').text)
index_document_element = static_website_element.find('IndexDocument')
if index_document_element is not None:
service_properties.static_website.index_document = index_document_element.text
error_document_element = static_website_element.find('ErrorDocument404Path')
if error_document_element is not None:
service_properties.static_website.error_document_404_path = error_document_element.text
return service_properties
def _convert_xml_to_metrics(xml, metrics):
'''
<Version>version-number</Version>
<Enabled>true|false</Enabled>
<IncludeAPIs>true|false</IncludeAPIs>
<RetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</RetentionPolicy>
'''
# Version
metrics.version = xml.find('Version').text
# Enabled
metrics.enabled = _bool(xml.find('Enabled').text)
# IncludeAPIs
include_apis_element = xml.find('IncludeAPIs')
if include_apis_element is not None:
metrics.include_apis = _bool(include_apis_element.text)
# RetentionPolicy
_convert_xml_to_retention_policy(xml.find('RetentionPolicy'), metrics.retention_policy)
def _convert_xml_to_retention_policy(xml, retention_policy):
'''
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
'''
# Enabled
retention_policy.enabled = _bool(xml.find('Enabled').text)
# Days
days_element = xml.find('Days')
if days_element is not None:
retention_policy.days = int(days_element.text)

Просмотреть файл

@ -1,233 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from collections import OrderedDict
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import Cipher
from cryptography.hazmat.primitives.ciphers.algorithms import AES
from cryptography.hazmat.primitives.ciphers.modes import CBC
from ._common_conversion import (
_encode_base64,
_decode_base64_to_bytes,
)
from ._constants import (
_ENCRYPTION_PROTOCOL_V1,
__version__,
)
from ._error import (
_ERROR_UNSUPPORTED_ENCRYPTION_VERSION,
_validate_not_none,
_validate_encryption_protocol_version,
_validate_key_encryption_key_unwrap,
_validate_kek_id,
)
class _EncryptionAlgorithm(object):
'''
Specifies which client encryption algorithm is used.
'''
AES_CBC_256 = 'AES_CBC_256'
class _WrappedContentKey:
'''
Represents the envelope key details stored on the service.
'''
def __init__(self, algorithm, encrypted_key, key_id):
'''
:param str algorithm:
The algorithm used for wrapping.
:param bytes encrypted_key:
The encrypted content-encryption-key.
:param str key_id:
The key-encryption-key identifier string.
'''
_validate_not_none('algorithm', algorithm)
_validate_not_none('encrypted_key', encrypted_key)
_validate_not_none('key_id', key_id)
self.algorithm = algorithm
self.encrypted_key = encrypted_key
self.key_id = key_id
class _EncryptionAgent:
'''
Represents the encryption agent stored on the service.
It consists of the encryption protocol version and encryption algorithm used.
'''
def __init__(self, encryption_algorithm, protocol):
'''
:param _EncryptionAlgorithm encryption_algorithm:
The algorithm used for encrypting the message contents.
:param str protocol:
The protocol version used for encryption.
'''
_validate_not_none('encryption_algorithm', encryption_algorithm)
_validate_not_none('protocol', protocol)
self.encryption_algorithm = str(encryption_algorithm)
self.protocol = protocol
class _EncryptionData:
'''
Represents the encryption data that is stored on the service.
'''
def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key,
key_wrapping_metadata):
'''
:param bytes content_encryption_IV:
The content encryption initialization vector.
:param _EncryptionAgent encryption_agent:
The encryption agent.
:param _WrappedContentKey wrapped_content_key:
An object that stores the wrapping algorithm, the key identifier,
and the encrypted key bytes.
:param dict key_wrapping_metadata:
A dict containing metadata related to the key wrapping.
'''
_validate_not_none('content_encryption_IV', content_encryption_IV)
_validate_not_none('encryption_agent', encryption_agent)
_validate_not_none('wrapped_content_key', wrapped_content_key)
self.content_encryption_IV = content_encryption_IV
self.encryption_agent = encryption_agent
self.wrapped_content_key = wrapped_content_key
self.key_wrapping_metadata = key_wrapping_metadata
def _generate_encryption_data_dict(kek, cek, iv):
'''
Generates and returns the encryption metadata as a dict.
:param object kek: The key encryption key. See calling functions for more information.
:param bytes cek: The content encryption key.
:param bytes iv: The initialization vector.
:return: A dict containing all the encryption metadata.
:rtype: dict
'''
# Encrypt the cek.
wrapped_cek = kek.wrap_key(cek)
# Build the encryption_data dict.
# Use OrderedDict to comply with Java's ordering requirement.
wrapped_content_key = OrderedDict()
wrapped_content_key['KeyId'] = kek.get_kid()
wrapped_content_key['EncryptedKey'] = _encode_base64(wrapped_cek)
wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm()
encryption_agent = OrderedDict()
encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1
encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256
encryption_data_dict = OrderedDict()
encryption_data_dict['WrappedContentKey'] = wrapped_content_key
encryption_data_dict['EncryptionAgent'] = encryption_agent
encryption_data_dict['ContentEncryptionIV'] = _encode_base64(iv)
encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + __version__}
return encryption_data_dict
def _dict_to_encryption_data(encryption_data_dict):
'''
Converts the specified dictionary to an EncryptionData object for
eventual use in decryption.
:param dict encryption_data_dict:
The dictionary containing the encryption data.
:return: an _EncryptionData object built from the dictionary.
:rtype: _EncryptionData
'''
try:
if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1:
raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_VERSION)
except KeyError:
raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_VERSION)
wrapped_content_key = encryption_data_dict['WrappedContentKey']
wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'],
_decode_base64_to_bytes(wrapped_content_key['EncryptedKey']),
wrapped_content_key['KeyId'])
encryption_agent = encryption_data_dict['EncryptionAgent']
encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'],
encryption_agent['Protocol'])
if 'KeyWrappingMetadata' in encryption_data_dict:
key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata']
else:
key_wrapping_metadata = None
encryption_data = _EncryptionData(_decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']),
encryption_agent,
wrapped_content_key,
key_wrapping_metadata)
return encryption_data
def _generate_AES_CBC_cipher(cek, iv):
'''
Generates and returns an encryption cipher for AES CBC using the given cek and iv.
:param bytes[] cek: The content encryption key for the cipher.
:param bytes[] iv: The initialization vector for the cipher.
:return: A cipher for encrypting in AES256 CBC.
:rtype: ~cryptography.hazmat.primitives.ciphers.Cipher
'''
backend = default_backend()
algorithm = AES(cek)
mode = CBC(iv)
return Cipher(algorithm, mode, backend)
def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None):
'''
Extracts and returns the content_encryption_key stored in the encryption_data object
and performs necessary validation on all parameters.
:param _EncryptionData encryption_data:
The encryption metadata of the retrieved value.
:param obj key_encryption_key:
The key_encryption_key used to unwrap the cek. Please refer to high-level service object
instance variables for more details.
:param func key_resolver:
A function used that, given a key_id, will return a key_encryption_key. Please refer
to high-level service object instance variables for more details.
:return: the content_encryption_key stored in the encryption_data object.
:rtype: bytes[]
'''
_validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV)
_validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key)
_validate_encryption_protocol_version(encryption_data.encryption_agent.protocol)
content_encryption_key = None
# If the resolver exists, give priority to the key it finds.
if key_resolver is not None:
key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id)
_validate_not_none('key_encryption_key', key_encryption_key)
_validate_key_encryption_key_unwrap(key_encryption_key)
_validate_kek_id(encryption_data.wrapped_content_key.key_id, key_encryption_key.get_kid())
# Will throw an exception if the specified algorithm is not supported.
content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key,
encryption_data.wrapped_content_key.algorithm)
_validate_not_none('content_encryption_key', content_encryption_key)
return content_encryption_key

Просмотреть файл

@ -1,218 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from sys import version_info
if version_info < (3,):
def _str(value):
if isinstance(value, unicode):
return value.encode('utf-8')
return str(value)
else:
_str = str
def _to_str(value):
return _str(value) if value is not None else None
from azure.common import (
AzureHttpError,
AzureConflictHttpError,
AzureMissingResourceHttpError,
AzureException,
)
from ._constants import (
_ENCRYPTION_PROTOCOL_V1,
)
_ERROR_CONFLICT = 'Conflict ({0})'
_ERROR_NOT_FOUND = 'Not found ({0})'
_ERROR_UNKNOWN = 'Unknown error ({0})'
_ERROR_STORAGE_MISSING_INFO = \
'You need to provide an account name and either an account_key or sas_token when creating a storage service.'
_ERROR_EMULATOR_DOES_NOT_SUPPORT_FILES = \
'The emulator does not support the file service.'
_ERROR_ACCESS_POLICY = \
'share_access_policy must be either SignedIdentifier or AccessPolicy ' + \
'instance'
_ERROR_PARALLEL_NOT_SEEKABLE = 'Parallel operations require a seekable stream.'
_ERROR_VALUE_SHOULD_BE_BYTES = '{0} should be of type bytes.'
_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM = '{0} should be of type bytes or a readable file-like/io.IOBase stream object.'
_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.'
_ERROR_VALUE_SHOULD_BE_STREAM = '{0} should be a file-like/io.IOBase type stream object with a read method.'
_ERROR_VALUE_NONE = '{0} should not be None.'
_ERROR_VALUE_NONE_OR_EMPTY = '{0} should not be None or empty.'
_ERROR_VALUE_NEGATIVE = '{0} should not be negative.'
_ERROR_START_END_NEEDED_FOR_MD5 = \
'Both end_range and start_range need to be specified ' + \
'for getting content MD5.'
_ERROR_RANGE_TOO_LARGE_FOR_MD5 = \
'Getting content MD5 for a range greater than 4MB ' + \
'is not supported.'
_ERROR_MD5_MISMATCH = \
'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'
_ERROR_TOO_MANY_ACCESS_POLICIES = \
'Too many access policies provided. The server does not support setting more than 5 access policies on a single resource.'
_ERROR_OBJECT_INVALID = \
'{0} does not define a complete interface. Value of {1} is either missing or invalid.'
_ERROR_UNSUPPORTED_ENCRYPTION_VERSION = \
'Encryption version is not supported.'
_ERROR_DECRYPTION_FAILURE = \
'Decryption failed'
_ERROR_ENCRYPTION_REQUIRED = \
'Encryption required but no key was provided.'
_ERROR_DECRYPTION_REQUIRED = \
'Decryption required but neither key nor resolver was provided.' + \
' If you do not want to decypt, please do not set the require encryption flag.'
_ERROR_INVALID_KID = \
'Provided or resolved key-encryption-key does not match the id of key used to encrypt.'
_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM = \
'Specified encryption algorithm is not supported.'
_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = 'The require_encryption flag is set, but encryption is not supported' + \
' for this method.'
_ERROR_UNKNOWN_KEY_WRAP_ALGORITHM = 'Unknown key wrap algorithm.'
_ERROR_DATA_NOT_ENCRYPTED = 'Encryption required, but received data does not contain appropriate metatadata.' + \
'Data was either not encrypted or metadata has been lost.'
def _dont_fail_on_exist(error):
''' don't throw exception if the resource exists.
This is called by create_* APIs with fail_on_exist=False'''
if isinstance(error, AzureConflictHttpError):
return False
else:
raise error
def _dont_fail_not_exist(error):
''' don't throw exception if the resource doesn't exist.
This is called by create_* APIs with fail_on_exist=False'''
if isinstance(error, AzureMissingResourceHttpError):
return False
else:
raise error
def _http_error_handler(http_error):
''' Simple error handler for azure.'''
message = str(http_error)
error_code = None
if 'x-ms-error-code' in http_error.respheader:
error_code = http_error.respheader['x-ms-error-code']
message += ' ErrorCode: ' + error_code
if http_error.respbody is not None:
message += '\n' + http_error.respbody.decode('utf-8-sig')
ex = AzureHttpError(message, http_error.status)
ex.error_code = error_code
raise ex
def _validate_type_bytes(param_name, param):
if not isinstance(param, bytes):
raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name))
def _validate_type_bytes_or_stream(param_name, param):
if not (isinstance(param, bytes) or hasattr(param, 'read')):
raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM.format(param_name))
def _validate_not_none(param_name, param):
if param is None:
raise ValueError(_ERROR_VALUE_NONE.format(param_name))
def _validate_content_match(server_md5, computed_md5):
if server_md5 != computed_md5:
raise AzureException(_ERROR_MD5_MISMATCH.format(server_md5, computed_md5))
def _validate_access_policies(identifiers):
if identifiers and len(identifiers) > 5:
raise AzureException(_ERROR_TOO_MANY_ACCESS_POLICIES)
def _validate_key_encryption_key_wrap(kek):
# Note that None is not callable and so will fail the second clause of each check.
if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key):
raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key'))
if not hasattr(kek, 'get_kid') or not callable(kek.get_kid):
raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid'))
if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm):
raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm'))
def _validate_key_encryption_key_unwrap(kek):
if not hasattr(kek, 'get_kid') or not callable(kek.get_kid):
raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid'))
if not hasattr(kek, 'unwrap_key') or not callable(kek.unwrap_key):
raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key'))
def _validate_encryption_required(require_encryption, kek):
if require_encryption and (kek is None):
raise ValueError(_ERROR_ENCRYPTION_REQUIRED)
def _validate_decryption_required(require_encryption, kek, resolver):
if (require_encryption and (kek is None) and
(resolver is None)):
raise ValueError(_ERROR_DECRYPTION_REQUIRED)
def _validate_encryption_protocol_version(encryption_protocol):
if not (_ENCRYPTION_PROTOCOL_V1 == encryption_protocol):
raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_VERSION)
def _validate_kek_id(kid, resolved_id):
if not (kid == resolved_id):
raise ValueError(_ERROR_INVALID_KID)
def _validate_encryption_unsupported(require_encryption, key_encryption_key):
if require_encryption or (key_encryption_key is not None):
raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
def _validate_user_delegation_key(user_delegation_key):
_validate_not_none('user_delegation_key.signed_oid', user_delegation_key.signed_oid)
_validate_not_none('user_delegation_key.signed_tid', user_delegation_key.signed_tid)
_validate_not_none('user_delegation_key.signed_start', user_delegation_key.signed_start)
_validate_not_none('user_delegation_key.signed_expiry', user_delegation_key.signed_expiry)
_validate_not_none('user_delegation_key.signed_version', user_delegation_key.signed_version)
_validate_not_none('user_delegation_key.signed_service', user_delegation_key.signed_service)
_validate_not_none('user_delegation_key.value', user_delegation_key.value)
# wraps a given exception with the desired exception type
def _wrap_exception(ex, desired_type):
msg = ""
if len(ex.args) > 0:
msg = ex.args[0]
if version_info >= (3,):
# Automatic chaining in Python 3 means we keep the trace
return desired_type(msg)
else:
# There isn't a good solution in 2 for keeping the stack trace
# in general, or that will not result in an error in 3
# However, we can keep the previous error type and message
# TODO: In the future we will log the trace
return desired_type('{}: {}'.format(ex.__class__.__name__, msg))
class AzureSigningError(AzureException):
"""
Represents a fatal error when attempting to sign a request.
In general, the cause of this exception is user error. For example, the given account key is not valid.
Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info.
"""
pass

Просмотреть файл

@ -1,74 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
class HTTPError(Exception):
'''
Represents an HTTP Exception when response status code >= 300.
:ivar int status:
the status code of the response
:ivar str message:
the message
:ivar list headers:
the returned headers, as a list of (name, value) pairs
:ivar bytes body:
the body of the response
'''
def __init__(self, status, message, respheader, respbody):
self.status = status
self.respheader = respheader
self.respbody = respbody
Exception.__init__(self, message)
class HTTPResponse(object):
'''
Represents a response from an HTTP request.
:ivar int status:
the status code of the response
:ivar str message:
the message
:ivar dict headers:
the returned headers
:ivar bytes body:
the body of the response
'''
def __init__(self, status, message, headers, body):
self.status = status
self.message = message
self.headers = headers
self.body = body
class HTTPRequest(object):
'''
Represents an HTTP Request.
:ivar str host:
the host name to connect to
:ivar str method:
the method to use to connect (string such as GET, POST, PUT, etc.)
:ivar str path:
the uri fragment
:ivar dict query:
query parameters
:ivar dict headers:
header values
:ivar bytes body:
the body of the request.
'''
def __init__(self):
self.host = ''
self.method = ''
self.path = ''
self.query = {} # list of (name, value)
self.headers = {} # list of (header name, header value)
self.body = ''

Просмотреть файл

@ -1,107 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import logging
from . import HTTPResponse
from .._serialization import _get_data_bytes_or_stream_only
logger = logging.getLogger(__name__)
class _HTTPClient(object):
'''
Takes the request and sends it to cloud service and returns the response.
'''
def __init__(self, protocol=None, session=None, timeout=None):
'''
:param str protocol:
http or https.
:param requests.Session session:
session object created with requests library (or compatible).
:param int timeout:
timeout for the http request, in seconds.
'''
self.protocol = protocol
self.session = session
self.timeout = timeout
# By default, requests adds an Accept:*/* and Accept-Encoding to the session,
# which causes issues with some Azure REST APIs. Removing these here gives us
# the flexibility to add it back on a case by case basis.
if 'Accept' in self.session.headers:
del self.session.headers['Accept']
if 'Accept-Encoding' in self.session.headers:
del self.session.headers['Accept-Encoding']
self.proxies = None
def set_proxy(self, host, port, user, password):
'''
Sets the proxy server host and port for the HTTP CONNECT Tunnelling.
Note that we set the proxies directly on the request later on rather than
using the session object as requests has a bug where session proxy is ignored
in favor of environment proxy. So, auth will not work unless it is passed
directly when making the request as this overrides both.
:param str host:
Address of the proxy. Ex: '192.168.0.100'
:param int port:
Port of the proxy. Ex: 6000
:param str user:
User for proxy authorization.
:param str password:
Password for proxy authorization.
'''
if user and password:
proxy_string = '{}:{}@{}:{}'.format(user, password, host, port)
else:
proxy_string = '{}:{}'.format(host, port)
self.proxies = {'http': 'http://{}'.format(proxy_string),
'https': 'https://{}'.format(proxy_string)}
def perform_request(self, request):
'''
Sends an HTTPRequest to Azure Storage and returns an HTTPResponse. If
the response code indicates an error, raise an HTTPError.
:param HTTPRequest request:
The request to serialize and send.
:return: An HTTPResponse containing the parsed HTTP response.
:rtype: :class:`~azure.storage.common._http.HTTPResponse`
'''
# Verify the body is in bytes or either a file-like/stream object
if request.body:
request.body = _get_data_bytes_or_stream_only('request.body', request.body)
# Construct the URI
uri = self.protocol.lower() + '://' + request.host + request.path
# Send the request
response = self.session.request(request.method,
uri,
params=request.query,
headers=request.headers,
data=request.body or None,
timeout=self.timeout,
proxies=self.proxies)
# Parse the response
status = int(response.status_code)
response_headers = {}
for key, name in response.headers.items():
# Preserve the case of metadata
if key.lower().startswith('x-ms-meta-'):
response_headers[key] = name
else:
response_headers[key.lower()] = name
wrap = HTTPResponse(status, response.reason, response_headers, response.content)
response.close()
return wrap

Просмотреть файл

@ -1,372 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import sys
import uuid
from datetime import date
from io import (BytesIO, IOBase, SEEK_SET, SEEK_END, UnsupportedOperation)
from os import fstat
from time import time
from wsgiref.handlers import format_date_time
from dateutil.tz import tzutc
if sys.version_info >= (3,):
from urllib.parse import quote as url_quote
else:
from urllib2 import quote as url_quote
try:
from xml.etree import cElementTree as ETree
except ImportError:
from xml.etree import ElementTree as ETree
from ._error import (
_ERROR_VALUE_SHOULD_BE_BYTES,
_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM,
_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM
)
from .models import (
_unicode_type,
)
from ._common_conversion import (
_str,
)
from ._constants import _CLIENT_REQUEST_ID_HEADER_NAME
def _to_utc_datetime(value):
# Azure expects the date value passed in to be UTC.
# Azure will always return values as UTC.
# If a date is passed in without timezone info, it is assumed to be UTC.
if value.tzinfo:
value = value.astimezone(tzutc())
return value.strftime('%Y-%m-%dT%H:%M:%SZ')
def _update_request(request, x_ms_version, user_agent_string):
# Verify body
if request.body:
request.body = _get_data_bytes_or_stream_only('request.body', request.body)
length = _len_plus(request.body)
# only scenario where this case is plausible is if the stream object is not seekable.
if length is None:
raise ValueError(_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM)
# if it is PUT, POST, MERGE, DELETE, need to add content-length to header.
if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']:
request.headers['Content-Length'] = str(length)
# append addtional headers based on the service
request.headers['x-ms-version'] = x_ms_version
request.headers['User-Agent'] = user_agent_string
request.headers[_CLIENT_REQUEST_ID_HEADER_NAME] = str(uuid.uuid1())
# If the host has a path component (ex local storage), move it
path = request.host.split('/', 1)
if len(path) == 2:
request.host = path[0]
request.path = '/{}{}'.format(path[1], request.path)
# Encode and optionally add local storage prefix to path
request.path = url_quote(request.path, '/()$=\',~')
def _add_metadata_headers(metadata, request):
if metadata:
if not request.headers:
request.headers = {}
for name, value in metadata.items():
request.headers['x-ms-meta-' + name] = value
def _add_date_header(request):
current_time = format_date_time(time())
request.headers['x-ms-date'] = current_time
def _get_data_bytes_only(param_name, param_value):
'''Validates the request body passed in and converts it to bytes
if our policy allows it.'''
if param_value is None:
return b''
if isinstance(param_value, bytes):
return param_value
raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name))
def _get_data_bytes_or_stream_only(param_name, param_value):
'''Validates the request body passed in is a stream/file-like or bytes
object.'''
if param_value is None:
return b''
if isinstance(param_value, bytes) or hasattr(param_value, 'read'):
return param_value
raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM.format(param_name))
def _get_request_body(request_body):
'''Converts an object into a request body. If it's None
we'll return an empty string, if it's one of our objects it'll
convert it to XML and return it. Otherwise we just use the object
directly'''
if request_body is None:
return b''
if isinstance(request_body, bytes) or isinstance(request_body, IOBase):
return request_body
if isinstance(request_body, _unicode_type):
return request_body.encode('utf-8')
request_body = str(request_body)
if isinstance(request_body, _unicode_type):
return request_body.encode('utf-8')
return request_body
def _convert_signed_identifiers_to_xml(signed_identifiers):
if signed_identifiers is None:
return ''
sis = ETree.Element('SignedIdentifiers')
for id, access_policy in signed_identifiers.items():
# Root signed identifers element
si = ETree.SubElement(sis, 'SignedIdentifier')
# Id element
ETree.SubElement(si, 'Id').text = id
# Access policy element
policy = ETree.SubElement(si, 'AccessPolicy')
if access_policy.start:
start = access_policy.start
if isinstance(access_policy.start, date):
start = _to_utc_datetime(start)
ETree.SubElement(policy, 'Start').text = start
if access_policy.expiry:
expiry = access_policy.expiry
if isinstance(access_policy.expiry, date):
expiry = _to_utc_datetime(expiry)
ETree.SubElement(policy, 'Expiry').text = expiry
if access_policy.permission:
ETree.SubElement(policy, 'Permission').text = _str(access_policy.permission)
# Add xml declaration and serialize
try:
stream = BytesIO()
ETree.ElementTree(sis).write(stream, xml_declaration=True, encoding='utf-8', method='xml')
except:
raise
finally:
output = stream.getvalue()
stream.close()
return output
def _convert_service_properties_to_xml(logging, hour_metrics, minute_metrics,
cors, target_version=None, delete_retention_policy=None, static_website=None):
'''
<?xml version="1.0" encoding="utf-8"?>
<StorageServiceProperties>
<Logging>
<Version>version-number</Version>
<Delete>true|false</Delete>
<Read>true|false</Read>
<Write>true|false</Write>
<RetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</RetentionPolicy>
</Logging>
<HourMetrics>
<Version>version-number</Version>
<Enabled>true|false</Enabled>
<IncludeAPIs>true|false</IncludeAPIs>
<RetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</RetentionPolicy>
</HourMetrics>
<MinuteMetrics>
<Version>version-number</Version>
<Enabled>true|false</Enabled>
<IncludeAPIs>true|false</IncludeAPIs>
<RetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</RetentionPolicy>
</MinuteMetrics>
<Cors>
<CorsRule>
<AllowedOrigins>comma-separated-list-of-allowed-origins</AllowedOrigins>
<AllowedMethods>comma-separated-list-of-HTTP-verb</AllowedMethods>
<MaxAgeInSeconds>max-caching-age-in-seconds</MaxAgeInSeconds>
<ExposedHeaders>comma-separated-list-of-response-headers</ExposedHeaders>
<AllowedHeaders>comma-separated-list-of-request-headers</AllowedHeaders>
</CorsRule>
</Cors>
<DeleteRetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</DeleteRetentionPolicy>
<StaticWebsite>
<Enabled>true|false</Enabled>
<IndexDocument></IndexDocument>
<ErrorDocument404Path></ErrorDocument404Path>
</StaticWebsite>
</StorageServiceProperties>
'''
service_properties_element = ETree.Element('StorageServiceProperties')
# Logging
if logging:
logging_element = ETree.SubElement(service_properties_element, 'Logging')
ETree.SubElement(logging_element, 'Version').text = logging.version
ETree.SubElement(logging_element, 'Delete').text = str(logging.delete)
ETree.SubElement(logging_element, 'Read').text = str(logging.read)
ETree.SubElement(logging_element, 'Write').text = str(logging.write)
retention_element = ETree.SubElement(logging_element, 'RetentionPolicy')
_convert_retention_policy_to_xml(logging.retention_policy, retention_element)
# HourMetrics
if hour_metrics:
hour_metrics_element = ETree.SubElement(service_properties_element, 'HourMetrics')
_convert_metrics_to_xml(hour_metrics, hour_metrics_element)
# MinuteMetrics
if minute_metrics:
minute_metrics_element = ETree.SubElement(service_properties_element, 'MinuteMetrics')
_convert_metrics_to_xml(minute_metrics, minute_metrics_element)
# CORS
# Make sure to still serialize empty list
if cors is not None:
cors_element = ETree.SubElement(service_properties_element, 'Cors')
for rule in cors:
cors_rule = ETree.SubElement(cors_element, 'CorsRule')
ETree.SubElement(cors_rule, 'AllowedOrigins').text = ",".join(rule.allowed_origins)
ETree.SubElement(cors_rule, 'AllowedMethods').text = ",".join(rule.allowed_methods)
ETree.SubElement(cors_rule, 'MaxAgeInSeconds').text = str(rule.max_age_in_seconds)
ETree.SubElement(cors_rule, 'ExposedHeaders').text = ",".join(rule.exposed_headers)
ETree.SubElement(cors_rule, 'AllowedHeaders').text = ",".join(rule.allowed_headers)
# Target version
if target_version:
ETree.SubElement(service_properties_element, 'DefaultServiceVersion').text = target_version
# DeleteRetentionPolicy
if delete_retention_policy:
policy_element = ETree.SubElement(service_properties_element, 'DeleteRetentionPolicy')
ETree.SubElement(policy_element, 'Enabled').text = str(delete_retention_policy.enabled)
if delete_retention_policy.enabled:
ETree.SubElement(policy_element, 'Days').text = str(delete_retention_policy.days)
# StaticWebsite
if static_website:
static_website_element = ETree.SubElement(service_properties_element, 'StaticWebsite')
ETree.SubElement(static_website_element, 'Enabled').text = str(static_website.enabled)
if static_website.enabled:
if static_website.index_document is not None:
ETree.SubElement(static_website_element, 'IndexDocument').text = str(static_website.index_document)
if static_website.error_document_404_path is not None:
ETree.SubElement(static_website_element, 'ErrorDocument404Path').text = \
str(static_website.error_document_404_path)
# Add xml declaration and serialize
try:
stream = BytesIO()
ETree.ElementTree(service_properties_element).write(stream, xml_declaration=True, encoding='utf-8',
method='xml')
except:
raise
finally:
output = stream.getvalue()
stream.close()
return output
def _convert_metrics_to_xml(metrics, root):
'''
<Version>version-number</Version>
<Enabled>true|false</Enabled>
<IncludeAPIs>true|false</IncludeAPIs>
<RetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</RetentionPolicy>
'''
# Version
ETree.SubElement(root, 'Version').text = metrics.version
# Enabled
ETree.SubElement(root, 'Enabled').text = str(metrics.enabled)
# IncludeAPIs
if metrics.enabled and metrics.include_apis is not None:
ETree.SubElement(root, 'IncludeAPIs').text = str(metrics.include_apis)
# RetentionPolicy
retention_element = ETree.SubElement(root, 'RetentionPolicy')
_convert_retention_policy_to_xml(metrics.retention_policy, retention_element)
def _convert_retention_policy_to_xml(retention_policy, root):
'''
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
'''
# Enabled
ETree.SubElement(root, 'Enabled').text = str(retention_policy.enabled)
# Days
if retention_policy.enabled and retention_policy.days:
ETree.SubElement(root, 'Days').text = str(retention_policy.days)
def _len_plus(data):
length = None
# Check if object implements the __len__ method, covers most input cases such as bytearray.
try:
length = len(data)
except:
pass
if not length:
# Check if the stream is a file-like stream object.
# If so, calculate the size using the file descriptor.
try:
fileno = data.fileno()
except (AttributeError, UnsupportedOperation):
pass
else:
return fstat(fileno).st_size
# If the stream is seekable and tell() is implemented, calculate the stream size.
try:
current_position = data.tell()
data.seek(0, SEEK_END)
length = data.tell() - current_position
data.seek(current_position, SEEK_SET)
except (AttributeError, UnsupportedOperation):
pass
return length

Просмотреть файл

@ -1,198 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# Note that we import BlobService/QueueService/FileService on demand
# because this module is imported by azure/storage/__init__
# ie. we don't want 'import azure.storage' to trigger an automatic import
# of blob/queue/file packages.
from azure.storage.common._error import _validate_not_none
from azure.storage.common.models import (
ResourceTypes,
Services,
AccountPermissions,
)
from azure.storage.common.sharedaccesssignature import (
SharedAccessSignature,
)
class CloudStorageAccount(object):
"""
Provides a factory for creating the blob, queue, and file services
with a common account name and account key or sas token. Users can either
use the factory or can construct the appropriate service directly.
"""
def __init__(self, account_name=None, account_key=None, sas_token=None,
is_emulated=None, endpoint_suffix=None):
'''
:param str account_name:
The storage account name. This is used to authenticate requests
signed with an account key and to construct the storage endpoint. It
is required unless is_emulated is used.
:param str account_key:
The storage account key. This is used for shared key authentication.
:param str sas_token:
A shared access signature token to use to authenticate requests
instead of the account key. If account key and sas token are both
specified, account key will be used to sign.
:param bool is_emulated:
Whether to use the emulator. Defaults to False. If specified, will
override all other parameters.
:param str endpoint_suffix:
The host base component of the url, minus the account name. Defaults
to Azure (core.windows.net). Override this to use a sovereign cloud.
'''
self.account_name = account_name
self.account_key = account_key
self.sas_token = sas_token
self.is_emulated = is_emulated
self.endpoint_suffix = endpoint_suffix
def create_block_blob_service(self):
'''
Creates a BlockBlobService object with the settings specified in the
CloudStorageAccount.
:return: A service object.
:rtype: :class:`~azure.storage.blob.blockblobservice.BlockBlobService`
'''
try:
from azure.storage.blob.blockblobservice import BlockBlobService
return BlockBlobService(self.account_name, self.account_key,
sas_token=self.sas_token,
is_emulated=self.is_emulated,
endpoint_suffix=self.endpoint_suffix)
except ImportError:
raise Exception('The package azure-storage-blob is required. '
+ 'Please install it using "pip install azure-storage-blob"')
def create_page_blob_service(self):
'''
Creates a PageBlobService object with the settings specified in the
CloudStorageAccount.
:return: A service object.
:rtype: :class:`~azure.storage.blob.pageblobservice.PageBlobService`
'''
try:
from azure.storage.blob.pageblobservice import PageBlobService
return PageBlobService(self.account_name, self.account_key,
sas_token=self.sas_token,
is_emulated=self.is_emulated,
endpoint_suffix=self.endpoint_suffix)
except ImportError:
raise Exception('The package azure-storage-blob is required. '
+ 'Please install it using "pip install azure-storage-blob"')
def create_append_blob_service(self):
'''
Creates a AppendBlobService object with the settings specified in the
CloudStorageAccount.
:return: A service object.
:rtype: :class:`~azure.storage.blob.appendblobservice.AppendBlobService`
'''
try:
from azure.storage.blob.appendblobservice import AppendBlobService
return AppendBlobService(self.account_name, self.account_key,
sas_token=self.sas_token,
is_emulated=self.is_emulated,
endpoint_suffix=self.endpoint_suffix)
except ImportError:
raise Exception('The package azure-storage-blob is required. '
+ 'Please install it using "pip install azure-storage-blob"')
def create_queue_service(self):
'''
Creates a QueueService object with the settings specified in the
CloudStorageAccount.
:return: A service object.
:rtype: :class:`~azure.storage.queue.queueservice.QueueService`
'''
try:
from azure.storage.queue.queueservice import QueueService
return QueueService(self.account_name, self.account_key,
sas_token=self.sas_token,
is_emulated=self.is_emulated,
endpoint_suffix=self.endpoint_suffix)
except ImportError:
raise Exception('The package azure-storage-queue is required. '
+ 'Please install it using "pip install azure-storage-queue"')
def create_file_service(self):
'''
Creates a FileService object with the settings specified in the
CloudStorageAccount.
:return: A service object.
:rtype: :class:`~azure.storage.file.fileservice.FileService`
'''
try:
from azure.storage.file.fileservice import FileService
return FileService(self.account_name, self.account_key,
sas_token=self.sas_token,
endpoint_suffix=self.endpoint_suffix)
except ImportError:
raise Exception('The package azure-storage-file is required. '
+ 'Please install it using "pip install azure-storage-file"')
def generate_shared_access_signature(self, services, resource_types,
permission, expiry, start=None,
ip=None, protocol=None):
'''
Generates a shared access signature for the account.
Use the returned signature with the sas_token parameter of the service
or to create a new account object.
:param Services services:
Specifies the services accessible with the account SAS. You can
combine values to provide access to more than one service.
:param ResourceTypes resource_types:
Specifies the resource types that are accessible with the account
SAS. You can combine values to provide access to more than one
resource type.
:param AccountPermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy. You can combine
values to provide more than one permission.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: datetime or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: datetime or str
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. Possible values are
both HTTPS and HTTP (https,http) or HTTPS only (https). The default value
is https,http. Note that HTTP only is not a permitted value.
'''
_validate_not_none('self.account_name', self.account_name)
_validate_not_none('self.account_key', self.account_key)
sas = SharedAccessSignature(self.account_name, self.account_key)
return sas.generate_account(services, resource_types, permission,
expiry, start=start, ip=ip, protocol=protocol)

Просмотреть файл

@ -1,672 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import sys
if sys.version_info < (3,):
from collections import Iterable
_unicode_type = unicode
else:
from collections.abc import Iterable
_unicode_type = str
from ._error import (
_validate_not_none
)
class _HeaderDict(dict):
def __getitem__(self, index):
return super(_HeaderDict, self).__getitem__(index.lower())
class _list(list):
'''Used so that additional properties can be set on the return list'''
pass
class _dict(dict):
'''Used so that additional properties can be set on the return dictionary'''
pass
class _OperationContext(object):
'''
Contains information that lasts the lifetime of an operation. This operation
may span multiple calls to the Azure service.
:ivar bool location_lock:
Whether the location should be locked for this operation.
:ivar str location:
The location to lock to.
'''
def __init__(self, location_lock=False):
self.location_lock = location_lock
self.host_location = None
class ListGenerator(Iterable):
'''
A generator object used to list storage resources. The generator will lazily
follow the continuation tokens returned by the service and stop when all
resources have been returned or max_results is reached.
If max_results is specified and the account has more than that number of
resources, the generator will have a populated next_marker field once it
finishes. This marker can be used to create a new generator if more
results are desired.
'''
def __init__(self, resources, list_method, list_args, list_kwargs):
self.items = resources
self.next_marker = resources.next_marker
self._list_method = list_method
self._list_args = list_args
self._list_kwargs = list_kwargs
def __iter__(self):
# return results
for i in self.items:
yield i
while True:
# if no more results on the service, return
if not self.next_marker:
break
# update the marker args
self._list_kwargs['marker'] = self.next_marker
# handle max results, if present
max_results = self._list_kwargs.get('max_results')
if max_results is not None:
max_results = max_results - len(self.items)
# if we've reached max_results, return
# else, update the max_results arg
if max_results <= 0:
break
else:
self._list_kwargs['max_results'] = max_results
# get the next segment
resources = self._list_method(*self._list_args, **self._list_kwargs)
self.items = resources
self.next_marker = resources.next_marker
# return results
for i in self.items:
yield i
class RetryContext(object):
'''
Contains the request and response information that can be used to determine
whether and how to retry. This context is stored across retries and may be
used to store other information relevant to the retry strategy.
:ivar ~azure.storage.common._http.HTTPRequest request:
The request sent to the storage service.
:ivar ~azure.storage.common._http.HTTPResponse response:
The response returned by the storage service.
:ivar LocationMode location_mode:
The location the request was sent to.
:ivar Exception exception:
The exception that just occurred. The type could either be AzureException (for HTTP errors),
or other Exception types from lower layers, which are kept unwrapped for easier processing.
:ivar bool is_emulated:
Whether retry is targeting the emulator. The default value is False.
:ivar int body_position:
The initial position of the body stream. It is useful when retries happen and we need to rewind the stream.
'''
def __init__(self):
self.request = None
self.response = None
self.location_mode = None
self.exception = None
self.is_emulated = False
self.body_position = None
class LocationMode(object):
'''
Specifies the location the request should be sent to. This mode only applies
for RA-GRS accounts which allow secondary read access. All other account types
must use PRIMARY.
'''
PRIMARY = 'primary'
''' Requests should be sent to the primary location. '''
SECONDARY = 'secondary'
''' Requests should be sent to the secondary location, if possible. '''
class RetentionPolicy(object):
'''
By default, Storage Analytics will not delete any logging or metrics data. Blobs
will continue to be written until the shared 20TB limit is
reached. Once the 20TB limit is reached, Storage Analytics will stop writing
new data and will not resume until free space is available. This 20TB limit
is independent of the total limit for your storage account.
There are two ways to delete Storage Analytics data: by manually making deletion
requests or by setting a data retention policy. Manual requests to delete Storage
Analytics data are billable, but delete requests resulting from a retention policy
are not billable.
'''
def __init__(self, enabled=False, days=None):
'''
:param bool enabled:
Indicates whether a retention policy is enabled for the
storage service. If disabled, logging and metrics data will be retained
infinitely by the service unless explicitly deleted.
:param int days:
Required if enabled is true. Indicates the number of
days that metrics or logging data should be retained. All data older
than this value will be deleted. The minimum value you can specify is 1;
the largest value is 365 (one year).
'''
_validate_not_none("enabled", enabled)
if enabled:
_validate_not_none("days", days)
self.enabled = enabled
self.days = days
class Logging(object):
'''
Storage Analytics logs detailed information about successful and failed requests
to a storage service. This information can be used to monitor individual requests
and to diagnose issues with a storage service. Requests are logged on a best-effort
basis.
All logs are stored in block blobs in a container named $logs, which is
automatically created when Storage Analytics is enabled for a storage account.
The $logs container is located in the blob namespace of the storage account.
This container cannot be deleted once Storage Analytics has been enabled, though
its contents can be deleted.
For more information, see https://msdn.microsoft.com/en-us/library/azure/hh343262.aspx
'''
def __init__(self, delete=False, read=False, write=False,
retention_policy=None):
'''
:param bool delete:
Indicates whether all delete requests should be logged.
:param bool read:
Indicates whether all read requests should be logged.
:param bool write:
Indicates whether all write requests should be logged.
:param RetentionPolicy retention_policy:
The retention policy for the metrics.
'''
_validate_not_none("read", read)
_validate_not_none("write", write)
_validate_not_none("delete", delete)
self.version = u'1.0'
self.delete = delete
self.read = read
self.write = write
self.retention_policy = retention_policy if retention_policy else RetentionPolicy()
class Metrics(object):
'''
Metrics include aggregated transaction statistics and capacity data about requests
to a storage service. Transactions are reported at both the API operation level
as well as at the storage service level, and capacity is reported at the storage
service level. Metrics data can be used to analyze storage service usage, diagnose
issues with requests made against the storage service, and to improve the
performance of applications that use a service.
For more information, see https://msdn.microsoft.com/en-us/library/azure/hh343258.aspx
'''
def __init__(self, enabled=False, include_apis=None,
retention_policy=None):
'''
:param bool enabled:
Indicates whether metrics are enabled for
the service.
:param bool include_apis:
Required if enabled is True. Indicates whether metrics
should generate summary statistics for called API operations.
:param RetentionPolicy retention_policy:
The retention policy for the metrics.
'''
_validate_not_none("enabled", enabled)
if enabled:
_validate_not_none("include_apis", include_apis)
self.version = u'1.0'
self.enabled = enabled
self.include_apis = include_apis
self.retention_policy = retention_policy if retention_policy else RetentionPolicy()
class CorsRule(object):
'''
CORS is an HTTP feature that enables a web application running under one domain
to access resources in another domain. Web browsers implement a security
restriction known as same-origin policy that prevents a web page from calling
APIs in a different domain; CORS provides a secure way to allow one domain
(the origin domain) to call APIs in another domain.
For more information, see https://msdn.microsoft.com/en-us/library/azure/dn535601.aspx
'''
def __init__(self, allowed_origins, allowed_methods, max_age_in_seconds=0,
exposed_headers=None, allowed_headers=None):
'''
:param allowed_origins:
A list of origin domains that will be allowed via CORS, or "*" to allow
all domains. The list of must contain at least one entry. Limited to 64
origin domains. Each allowed origin can have up to 256 characters.
:type allowed_origins: list(str)
:param allowed_methods:
A list of HTTP methods that are allowed to be executed by the origin.
The list of must contain at least one entry. For Azure Storage,
permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT.
:type allowed_methods: list(str)
:param int max_age_in_seconds:
The number of seconds that the client/browser should cache a
preflight response.
:param exposed_headers:
Defaults to an empty list. A list of response headers to expose to CORS
clients. Limited to 64 defined headers and two prefixed headers. Each
header can be up to 256 characters.
:type exposed_headers: list(str)
:param allowed_headers:
Defaults to an empty list. A list of headers allowed to be part of
the cross-origin request. Limited to 64 defined headers and 2 prefixed
headers. Each header can be up to 256 characters.
:type allowed_headers: list(str)
'''
_validate_not_none("allowed_origins", allowed_origins)
_validate_not_none("allowed_methods", allowed_methods)
_validate_not_none("max_age_in_seconds", max_age_in_seconds)
self.allowed_origins = allowed_origins if allowed_origins else list()
self.allowed_methods = allowed_methods if allowed_methods else list()
self.max_age_in_seconds = max_age_in_seconds
self.exposed_headers = exposed_headers if exposed_headers else list()
self.allowed_headers = allowed_headers if allowed_headers else list()
class DeleteRetentionPolicy(object):
'''
To set DeleteRetentionPolicy, you must call Set Blob Service Properties using version 2017-07-29 or later.
This class groups the settings related to delete retention policy.
'''
def __init__(self, enabled=False, days=None):
'''
:param bool enabled:
Required. Indicates whether a deleted blob or snapshot is retained or immediately removed by delete operation.
:param int days:
Required only if Enabled is true. Indicates the number of days that deleted blob be retained.
All data older than this value will be permanently deleted.
The minimum value you can specify is 1; the largest value is 365.
'''
_validate_not_none("enabled", enabled)
if enabled:
_validate_not_none("days", days)
self.enabled = enabled
self.days = days
class StaticWebsite(object):
'''
Class representing the service properties pertaining to static websites.
To set StaticWebsite, you must call Set Blob Service Properties using version 2018-03-28 or later.
'''
def __init__(self, enabled=False, index_document=None, error_document_404_path=None):
'''
:param bool enabled:
Required. True if static websites should be enabled on the blob service for the corresponding Storage Account.
:param str index_document:
Represents the name of the index document. This is commonly "index.html".
:param str error_document_404_path:
Represents the path to the error document that should be shown when an error 404 is issued,
in other words, when a browser requests a page that does not exist.
'''
_validate_not_none("enabled", enabled)
self.enabled = enabled
self.index_document = index_document
self.error_document_404_path = error_document_404_path
class ServiceProperties(object):
'''
Returned by get_*_service_properties functions. Contains the properties of a
storage service, including Analytics and CORS rules.
Azure Storage Analytics performs logging and provides metrics data for a storage
account. You can use this data to trace requests, analyze usage trends, and
diagnose issues with your storage account. To use Storage Analytics, you must
enable it individually for each service you want to monitor.
The aggregated data is stored in a well-known blob (for logging) and in well-known
tables (for metrics), which may be accessed using the Blob service and Table
service APIs.
For an in-depth guide on using Storage Analytics and other tools to identify,
diagnose, and troubleshoot Azure Storage-related issues, see
http://azure.microsoft.com/documentation/articles/storage-monitoring-diagnosing-troubleshooting/
For more information on CORS, see https://msdn.microsoft.com/en-us/library/azure/dn535601.aspx
'''
pass
class ServiceStats(object):
'''
Returned by get_*_service_stats functions. Contains statistics related to
replication for the given service. It is only available when read-access
geo-redundant replication is enabled for the storage account.
:ivar GeoReplication geo_replication:
An object containing statistics related to replication for the given service.
'''
pass
class GeoReplication(object):
'''
Contains statistics related to replication for the given service.
:ivar str status:
The status of the secondary location. Possible values are:
live: Indicates that the secondary location is active and operational.
bootstrap: Indicates initial synchronization from the primary location
to the secondary location is in progress. This typically occurs
when replication is first enabled.
unavailable: Indicates that the secondary location is temporarily
unavailable.
:ivar date last_sync_time:
A GMT date value, to the second. All primary writes preceding this value
are guaranteed to be available for read operations at the secondary.
Primary writes after this point in time may or may not be available for
reads. The value may be empty if LastSyncTime is not available. This can
happen if the replication status is bootstrap or unavailable. Although
geo-replication is continuously enabled, the LastSyncTime result may
reflect a cached value from the service that is refreshed every few minutes.
'''
pass
class AccessPolicy(object):
'''
Access Policy class used by the set and get acl methods in each service.
A stored access policy can specify the start time, expiry time, and
permissions for the Shared Access Signatures with which it's associated.
Depending on how you want to control access to your resource, you can
specify all of these parameters within the stored access policy, and omit
them from the URL for the Shared Access Signature. Doing so permits you to
modify the associated signature's behavior at any time, as well as to revoke
it. Or you can specify one or more of the access policy parameters within
the stored access policy, and the others on the URL. Finally, you can
specify all of the parameters on the URL. In this case, you can use the
stored access policy to revoke the signature, but not to modify its behavior.
Together the Shared Access Signature and the stored access policy must
include all fields required to authenticate the signature. If any required
fields are missing, the request will fail. Likewise, if a field is specified
both in the Shared Access Signature URL and in the stored access policy, the
request will fail with status code 400 (Bad Request).
'''
def __init__(self, permission=None, expiry=None, start=None):
'''
:param str permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: datetime or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: datetime or str
'''
self.start = start
self.expiry = expiry
self.permission = permission
class Protocol(object):
'''
Specifies the protocol permitted for a SAS token. Note that HTTP only is
not allowed.
'''
HTTPS = 'https'
''' Allow HTTPS requests only. '''
HTTPS_HTTP = 'https,http'
''' Allow HTTP and HTTPS requests. '''
class ResourceTypes(object):
'''
Specifies the resource types that are accessible with the account SAS.
:ivar ResourceTypes ResourceTypes.CONTAINER:
Access to container-level APIs (e.g., Create/Delete Container,
Create/Delete Queue, Create/Delete Share,
List Blobs/Files and Directories)
:ivar ResourceTypes ResourceTypes.OBJECT:
Access to object-level APIs for blobs, queue messages, and
files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.)
:ivar ResourceTypes ResourceTypes.SERVICE:
Access to service-level APIs (e.g., Get/Set Service Properties,
Get Service Stats, List Containers/Queues/Shares)
'''
def __init__(self, service=False, container=False, object=False, _str=None):
'''
:param bool service:
Access to service-level APIs (e.g., Get/Set Service Properties,
Get Service Stats, List Containers/Queues/Shares)
:param bool container:
Access to container-level APIs (e.g., Create/Delete Container,
Create/Delete Queue, Create/Delete Share,
List Blobs/Files and Directories)
:param bool object:
Access to object-level APIs for blobs, queue messages, and
files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.)
:param str _str:
A string representing the resource types.
'''
if not _str:
_str = ''
self.service = service or ('s' in _str)
self.container = container or ('c' in _str)
self.object = object or ('o' in _str)
def __or__(self, other):
return ResourceTypes(_str=str(self) + str(other))
def __add__(self, other):
return ResourceTypes(_str=str(self) + str(other))
def __str__(self):
return (('s' if self.service else '') +
('c' if self.container else '') +
('o' if self.object else ''))
ResourceTypes.SERVICE = ResourceTypes(service=True)
ResourceTypes.CONTAINER = ResourceTypes(container=True)
ResourceTypes.OBJECT = ResourceTypes(object=True)
class Services(object):
'''
Specifies the services accessible with the account SAS.
:ivar Services Services.BLOB: The blob service.
:ivar Services Services.FILE: The file service
:ivar Services Services.QUEUE: The queue service.
:ivar Services Services.TABLE: The table service.
'''
def __init__(self, blob=False, queue=False, file=False, table=False, _str=None):
'''
:param bool blob:
Access to any blob service, for example, the `.BlockBlobService`
:param bool queue:
Access to the `.QueueService`
:param bool file:
Access to the `.FileService`
:param bool table:
Access to the TableService
:param str _str:
A string representing the services.
'''
if not _str:
_str = ''
self.blob = blob or ('b' in _str)
self.queue = queue or ('q' in _str)
self.file = file or ('f' in _str)
self.table = table or ('t' in _str)
def __or__(self, other):
return Services(_str=str(self) + str(other))
def __add__(self, other):
return Services(_str=str(self) + str(other))
def __str__(self):
return (('b' if self.blob else '') +
('q' if self.queue else '') +
('t' if self.table else '') +
('f' if self.file else ''))
Services.BLOB = Services(blob=True)
Services.QUEUE = Services(queue=True)
Services.TABLE = Services(table=True)
Services.FILE = Services(file=True)
class AccountPermissions(object):
'''
:class:`~ResourceTypes` class to be used with generate_shared_access_signature
method and for the AccessPolicies used with set_*_acl. There are two types of
SAS which may be used to grant resource access. One is to grant access to a
specific resource (resource-specific). Another is to grant access to the
entire service for a specific account and allow certain operations based on
perms found here.
:ivar AccountPermissions AccountPermissions.ADD:
Valid for the following Object resource types only: queue messages and append blobs.
:ivar AccountPermissions AccountPermissions.CREATE:
Valid for the following Object resource types only: blobs and files. Users
can create new blobs or files, but may not overwrite existing blobs or files.
:ivar AccountPermissions AccountPermissions.DELETE:
Valid for Container and Object resource types, except for queue messages.
:ivar AccountPermissions AccountPermissions.LIST:
Valid for Service and Container resource types only.
:ivar AccountPermissions AccountPermissions.PROCESS:
Valid for the following Object resource type only: queue messages.
:ivar AccountPermissions AccountPermissions.READ:
Valid for all signed resources types (Service, Container, and Object).
Permits read permissions to the specified resource type.
:ivar AccountPermissions AccountPermissions.UPDATE:
Valid for the following Object resource types only: queue messages.
:ivar AccountPermissions AccountPermissions.WRITE:
Valid for all signed resources types (Service, Container, and Object).
Permits write permissions to the specified resource type.
'''
def __init__(self, read=False, write=False, delete=False, list=False,
add=False, create=False, update=False, process=False, _str=None):
'''
:param bool read:
Valid for all signed resources types (Service, Container, and Object).
Permits read permissions to the specified resource type.
:param bool write:
Valid for all signed resources types (Service, Container, and Object).
Permits write permissions to the specified resource type.
:param bool delete:
Valid for Container and Object resource types, except for queue messages.
:param bool list:
Valid for Service and Container resource types only.
:param bool add:
Valid for the following Object resource types only: queue messages, and append blobs.
:param bool create:
Valid for the following Object resource types only: blobs and files.
Users can create new blobs or files, but may not overwrite existing
blobs or files.
:param bool update:
Valid for the following Object resource types only: queue messages.
:param bool process:
Valid for the following Object resource type only: queue messages.
:param str _str:
A string representing the permissions.
'''
if not _str:
_str = ''
self.read = read or ('r' in _str)
self.write = write or ('w' in _str)
self.delete = delete or ('d' in _str)
self.list = list or ('l' in _str)
self.add = add or ('a' in _str)
self.create = create or ('c' in _str)
self.update = update or ('u' in _str)
self.process = process or ('p' in _str)
def __or__(self, other):
return AccountPermissions(_str=str(self) + str(other))
def __add__(self, other):
return AccountPermissions(_str=str(self) + str(other))
def __str__(self):
return (('r' if self.read else '') +
('w' if self.write else '') +
('d' if self.delete else '') +
('l' if self.list else '') +
('a' if self.add else '') +
('c' if self.create else '') +
('u' if self.update else '') +
('p' if self.process else ''))
AccountPermissions.READ = AccountPermissions(read=True)
AccountPermissions.WRITE = AccountPermissions(write=True)
AccountPermissions.DELETE = AccountPermissions(delete=True)
AccountPermissions.LIST = AccountPermissions(list=True)
AccountPermissions.ADD = AccountPermissions(add=True)
AccountPermissions.CREATE = AccountPermissions(create=True)
AccountPermissions.UPDATE = AccountPermissions(update=True)
AccountPermissions.PROCESS = AccountPermissions(process=True)

Просмотреть файл

@ -1,306 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from abc import ABCMeta
from math import pow
import random
from io import (SEEK_SET, UnsupportedOperation)
from .models import LocationMode
from ._constants import (
DEV_ACCOUNT_NAME,
DEV_ACCOUNT_SECONDARY_NAME
)
class _Retry(object):
'''
The base class for Exponential and Linear retries containing shared code.
'''
__metaclass__ = ABCMeta
def __init__(self, max_attempts, retry_to_secondary):
'''
Constructs a base retry object.
:param int max_attempts:
The maximum number of retry attempts.
:param bool retry_to_secondary:
Whether the request should be retried to secondary, if able. This should
only be enabled of RA-GRS accounts are used and potentially stale data
can be handled.
'''
self.max_attempts = max_attempts
self.retry_to_secondary = retry_to_secondary
def _should_retry(self, context):
'''
A function which determines whether or not to retry.
:param ~azure.storage.models.RetryContext context:
The retry context. This contains the request, response, and other data
which can be used to determine whether or not to retry.
:return:
A boolean indicating whether or not to retry the request.
:rtype: bool
'''
# If max attempts are reached, do not retry.
if context.count >= self.max_attempts:
return False
status = None
if context.response and context.response.status:
status = context.response.status
if status is None:
'''
If status is None, retry as this request triggered an exception. For
example, network issues would trigger this.
'''
return True
elif 200 <= status < 300:
'''
This method is called after a successful response, meaning we failed
during the response body download or parsing. So, success codes should
be retried.
'''
return True
elif 300 <= status < 500:
'''
An exception occured, but in most cases it was expected. Examples could
include a 309 Conflict or 412 Precondition Failed.
'''
if status == 404 and context.location_mode == LocationMode.SECONDARY:
# Response code 404 should be retried if secondary was used.
return True
if status == 408:
# Response code 408 is a timeout and should be retried.
return True
return False
elif status >= 500:
'''
Response codes above 500 with the exception of 501 Not Implemented and
505 Version Not Supported indicate a server issue and should be retried.
'''
if status == 501 or status == 505:
return False
return True
else:
# If something else happened, it's unexpected. Retry.
return True
def _set_next_host_location(self, context):
'''
A function which sets the next host location on the request, if applicable.
:param ~azure.storage.models.RetryContext context:
The retry context containing the previous host location and the request
to evaluate and possibly modify.
'''
if len(context.request.host_locations) > 1:
# If there's more than one possible location, retry to the alternative
if context.location_mode == LocationMode.PRIMARY:
context.location_mode = LocationMode.SECONDARY
# if targeting the emulator (with path style), change path instead of host
if context.is_emulated:
# replace the first instance of primary account name with the secondary account name
context.request.path = context.request.path.replace(DEV_ACCOUNT_NAME, DEV_ACCOUNT_SECONDARY_NAME, 1)
else:
context.request.host = context.request.host_locations.get(context.location_mode)
else:
context.location_mode = LocationMode.PRIMARY
# if targeting the emulator (with path style), change path instead of host
if context.is_emulated:
# replace the first instance of secondary account name with the primary account name
context.request.path = context.request.path.replace(DEV_ACCOUNT_SECONDARY_NAME, DEV_ACCOUNT_NAME, 1)
else:
context.request.host = context.request.host_locations.get(context.location_mode)
def _retry(self, context, backoff):
'''
A function which determines whether and how to retry.
:param ~azure.storage.models.RetryContext context:
The retry context. This contains the request, response, and other data
which can be used to determine whether or not to retry.
:param function() backoff:
A function which returns the backoff time if a retry is to be performed.
:return:
An integer indicating how long to wait before retrying the request,
or None to indicate no retry should be performed.
:rtype: int or None
'''
# If the context does not contain a count parameter, this request has not
# been retried yet. Add the count parameter to track the number of retries.
if not hasattr(context, 'count'):
context.count = 0
# Determine whether to retry, and if so increment the count, modify the
# request as desired, and return the backoff.
if self._should_retry(context):
backoff_interval = backoff(context)
context.count += 1
# If retry to secondary is enabled, attempt to change the host if the
# request allows it
if self.retry_to_secondary:
self._set_next_host_location(context)
# rewind the request body if it is a stream
if hasattr(context.request, 'body') and hasattr(context.request.body, 'read'):
# no position was saved, then retry would not work
if context.body_position is None:
return None
else:
try:
# attempt to rewind the body to the initial position
context.request.body.seek(context.body_position, SEEK_SET)
except UnsupportedOperation:
# if body is not seekable, then retry would not work
return None
return backoff_interval
return None
class ExponentialRetry(_Retry):
'''
Exponential retry.
'''
def __init__(self, initial_backoff=15, increment_base=3, max_attempts=3,
retry_to_secondary=False, random_jitter_range=3):
'''
Constructs an Exponential retry object. The initial_backoff is used for
the first retry. Subsequent retries are retried after initial_backoff +
increment_power^retry_count seconds. For example, by default the first retry
occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the
third after (15+3^2) = 24 seconds.
:param int initial_backoff:
The initial backoff interval, in seconds, for the first retry.
:param int increment_base:
The base, in seconds, to increment the initial_backoff by after the
first retry.
:param int max_attempts:
The maximum number of retry attempts.
:param bool retry_to_secondary:
Whether the request should be retried to secondary, if able. This should
only be enabled of RA-GRS accounts are used and potentially stale data
can be handled.
:param int random_jitter_range:
A number in seconds which indicates a range to jitter/randomize for the back-off interval.
For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
'''
self.initial_backoff = initial_backoff
self.increment_base = increment_base
self.random_jitter_range = random_jitter_range
super(ExponentialRetry, self).__init__(max_attempts, retry_to_secondary)
'''
A function which determines whether and how to retry.
:param ~azure.storage.models.RetryContext context:
The retry context. This contains the request, response, and other data
which can be used to determine whether or not to retry.
:return:
An integer indicating how long to wait before retrying the request,
or None to indicate no retry should be performed.
:rtype: int or None
'''
def retry(self, context):
return self._retry(context, self._backoff)
'''
Calculates how long to sleep before retrying.
:return:
An integer indicating how long to wait before retrying the request,
or None to indicate no retry should be performed.
:rtype: int or None
'''
def _backoff(self, context):
random_generator = random.Random()
backoff = self.initial_backoff + (0 if context.count == 0 else pow(self.increment_base, context.count))
random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0
random_range_end = backoff + self.random_jitter_range
return random_generator.uniform(random_range_start, random_range_end)
class LinearRetry(_Retry):
'''
Linear retry.
'''
def __init__(self, backoff=15, max_attempts=3, retry_to_secondary=False, random_jitter_range=3):
'''
Constructs a Linear retry object.
:param int backoff:
The backoff interval, in seconds, between retries.
:param int max_attempts:
The maximum number of retry attempts.
:param bool retry_to_secondary:
Whether the request should be retried to secondary, if able. This should
only be enabled of RA-GRS accounts are used and potentially stale data
can be handled.
:param int random_jitter_range:
A number in seconds which indicates a range to jitter/randomize for the back-off interval.
For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
'''
self.backoff = backoff
self.max_attempts = max_attempts
self.random_jitter_range = random_jitter_range
super(LinearRetry, self).__init__(max_attempts, retry_to_secondary)
'''
A function which determines whether and how to retry.
:param ~azure.storage.models.RetryContext context:
The retry context. This contains the request, response, and other data
which can be used to determine whether or not to retry.
:return:
An integer indicating how long to wait before retrying the request,
or None to indicate no retry should be performed.
:rtype: int or None
'''
def retry(self, context):
return self._retry(context, self._backoff)
'''
Calculates how long to sleep before retrying.
:return:
An integer indicating how long to wait before retrying the request,
or None to indicate no retry should be performed.
:rtype: int or None
'''
def _backoff(self, context):
random_generator = random.Random()
# the backoff interval normally does not change, however there is the possibility
# that it was modified by accessing the property directly after initializing the object
self.random_range_start = self.backoff - self.random_jitter_range if self.backoff > self.random_jitter_range else 0
self.random_range_end = self.backoff + self.random_jitter_range
return random_generator.uniform(self.random_range_start, self.random_range_end)
def no_retry(context):
'''
Specifies never to retry.
:param ~azure.storage.models.RetryContext context:
The retry context.
:return:
Always returns None to indicate never to retry.
:rtype: None
'''
return None

Просмотреть файл

@ -1,180 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from datetime import date
from ._common_conversion import (
_sign_string,
_to_str,
)
from ._constants import DEFAULT_X_MS_VERSION
from ._serialization import (
url_quote,
_to_utc_datetime,
)
class SharedAccessSignature(object):
'''
Provides a factory for creating account access
signature tokens with an account name and account key. Users can either
use the factory or can construct the appropriate service and use the
generate_*_shared_access_signature method directly.
'''
def __init__(self, account_name, account_key, x_ms_version=DEFAULT_X_MS_VERSION):
'''
:param str account_name:
The storage account name used to generate the shared access signatures.
:param str account_key:
The access key to generate the shares access signatures.
:param str x_ms_version:
The service version used to generate the shared access signatures.
'''
self.account_name = account_name
self.account_key = account_key
self.x_ms_version = x_ms_version
def generate_account(self, services, resource_types, permission, expiry, start=None,
ip=None, protocol=None):
'''
Generates a shared access signature for the account.
Use the returned signature with the sas_token parameter of the service
or to create a new account object.
:param Services services:
Specifies the services accessible with the account SAS. You can
combine values to provide access to more than one service.
:param ResourceTypes resource_types:
Specifies the resource types that are accessible with the account
SAS. You can combine values to provide access to more than one
resource type.
:param AccountPermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy. You can combine
values to provide more than one permission.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: datetime or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: datetime or str
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
'''
sas = _SharedAccessHelper()
sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
sas.add_account(services, resource_types)
sas.add_account_signature(self.account_name, self.account_key)
return sas.get_token()
class _QueryStringConstants(object):
SIGNED_SIGNATURE = 'sig'
SIGNED_PERMISSION = 'sp'
SIGNED_START = 'st'
SIGNED_EXPIRY = 'se'
SIGNED_RESOURCE = 'sr'
SIGNED_IDENTIFIER = 'si'
SIGNED_IP = 'sip'
SIGNED_PROTOCOL = 'spr'
SIGNED_VERSION = 'sv'
SIGNED_CACHE_CONTROL = 'rscc'
SIGNED_CONTENT_DISPOSITION = 'rscd'
SIGNED_CONTENT_ENCODING = 'rsce'
SIGNED_CONTENT_LANGUAGE = 'rscl'
SIGNED_CONTENT_TYPE = 'rsct'
START_PK = 'spk'
START_RK = 'srk'
END_PK = 'epk'
END_RK = 'erk'
SIGNED_RESOURCE_TYPES = 'srt'
SIGNED_SERVICES = 'ss'
class _SharedAccessHelper(object):
def __init__(self):
self.query_dict = {}
def _add_query(self, name, val):
if val:
self.query_dict[name] = _to_str(val)
def add_base(self, permission, expiry, start, ip, protocol, x_ms_version):
if isinstance(start, date):
start = _to_utc_datetime(start)
if isinstance(expiry, date):
expiry = _to_utc_datetime(expiry)
self._add_query(_QueryStringConstants.SIGNED_START, start)
self._add_query(_QueryStringConstants.SIGNED_EXPIRY, expiry)
self._add_query(_QueryStringConstants.SIGNED_PERMISSION, permission)
self._add_query(_QueryStringConstants.SIGNED_IP, ip)
self._add_query(_QueryStringConstants.SIGNED_PROTOCOL, protocol)
self._add_query(_QueryStringConstants.SIGNED_VERSION, x_ms_version)
def add_resource(self, resource):
self._add_query(_QueryStringConstants.SIGNED_RESOURCE, resource)
def add_id(self, id):
self._add_query(_QueryStringConstants.SIGNED_IDENTIFIER, id)
def add_account(self, services, resource_types):
self._add_query(_QueryStringConstants.SIGNED_SERVICES, services)
self._add_query(_QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types)
def add_override_response_headers(self, cache_control,
content_disposition,
content_encoding,
content_language,
content_type):
self._add_query(_QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control)
self._add_query(_QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition)
self._add_query(_QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding)
self._add_query(_QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language)
self._add_query(_QueryStringConstants.SIGNED_CONTENT_TYPE, content_type)
def add_account_signature(self, account_name, account_key):
def get_value_to_append(query):
return_value = self.query_dict.get(query) or ''
return return_value + '\n'
string_to_sign = \
(account_name + '\n' +
get_value_to_append(_QueryStringConstants.SIGNED_PERMISSION) +
get_value_to_append(_QueryStringConstants.SIGNED_SERVICES) +
get_value_to_append(_QueryStringConstants.SIGNED_RESOURCE_TYPES) +
get_value_to_append(_QueryStringConstants.SIGNED_START) +
get_value_to_append(_QueryStringConstants.SIGNED_EXPIRY) +
get_value_to_append(_QueryStringConstants.SIGNED_IP) +
get_value_to_append(_QueryStringConstants.SIGNED_PROTOCOL) +
get_value_to_append(_QueryStringConstants.SIGNED_VERSION))
self._add_query(_QueryStringConstants.SIGNED_SIGNATURE,
_sign_string(account_key, string_to_sign))
def get_token(self):
return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None])

Просмотреть файл

@ -1,456 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import requests
from abc import ABCMeta
import logging
from time import sleep
import sys
from azure.common import (
AzureException,
AzureHttpError,
)
from ._constants import (
DEFAULT_SOCKET_TIMEOUT,
DEFAULT_X_MS_VERSION,
DEFAULT_USER_AGENT_STRING,
USER_AGENT_STRING_PREFIX,
USER_AGENT_STRING_SUFFIX,
_AUTHORIZATION_HEADER_NAME,
_REDACTED_VALUE,
_COPY_SOURCE_HEADER_NAME,
_CLIENT_REQUEST_ID_HEADER_NAME,
)
from ._error import (
_ERROR_DECRYPTION_FAILURE,
_http_error_handler,
_wrap_exception,
AzureSigningError,
)
from ._http import HTTPError
from ._http.httpclient import _HTTPClient
from ._serialization import (
_update_request,
_add_date_header,
)
from .models import (
RetryContext,
LocationMode,
_OperationContext,
)
from .retry import ExponentialRetry
from io import UnsupportedOperation
from .sharedaccesssignature import _QueryStringConstants
if sys.version_info >= (3,):
from urllib.parse import (
urlparse,
parse_qsl,
urlunparse,
urlencode,
)
else:
from urlparse import (
urlparse,
parse_qsl,
urlunparse,
)
from urllib import urlencode
logger = logging.getLogger(__name__)
class StorageClient(object):
'''
This is the base class for service objects. Service objects are used to do
all requests to Storage. This class cannot be instantiated directly.
:ivar str account_name:
The storage account name. This is used to authenticate requests
signed with an account key and to construct the storage endpoint. It
is required unless a connection string is given, or if a custom
domain is used with anonymous authentication.
:ivar str account_key:
The storage account key. This is used for shared key authentication.
If neither account key or sas token is specified, anonymous access
will be used.
:ivar str sas_token:
A shared access signature token to use to authenticate requests
instead of the account key. If account key and sas token are both
specified, account key will be used to sign. If neither are
specified, anonymous access will be used.
:ivar str primary_endpoint:
The endpoint to send storage requests to.
:ivar str secondary_endpoint:
The secondary endpoint to read storage data from. This will only be a
valid endpoint if the storage account used is RA-GRS and thus allows
reading from secondary.
:ivar function(context) retry:
A function which determines whether to retry. Takes as a parameter a
:class:`~azure.storage.common.models.RetryContext` object. Returns the number
of seconds to wait before retrying the request, or None to indicate not
to retry.
:ivar ~azure.storage.common.models.LocationMode location_mode:
The host location to use to make requests. Defaults to LocationMode.PRIMARY.
Note that this setting only applies to RA-GRS accounts as other account
types do not allow reading from secondary. If the location_mode is set to
LocationMode.SECONDARY, read requests will be sent to the secondary endpoint.
Write requests will continue to be sent to primary.
:ivar str protocol:
The protocol to use for requests. Defaults to https.
:ivar requests.Session request_session:
The session object to use for http requests.
:ivar function(request) request_callback:
A function called immediately before each request is sent. This function
takes as a parameter the request object and returns nothing. It may be
used to added custom headers or log request data.
:ivar function() response_callback:
A function called immediately after each response is received. This
function takes as a parameter the response object and returns nothing.
It may be used to log response data.
:ivar function() retry_callback:
A function called immediately after retry evaluation is performed. This
function takes as a parameter the retry context object and returns nothing.
It may be used to detect retries and log context information.
'''
__metaclass__ = ABCMeta
def __init__(self, connection_params):
'''
:param obj connection_params: The parameters to use to construct the client.
'''
self.account_name = connection_params.account_name
self.account_key = connection_params.account_key
self.sas_token = connection_params.sas_token
self.token_credential = connection_params.token_credential
self.is_emulated = connection_params.is_emulated
self.primary_endpoint = connection_params.primary_endpoint
self.secondary_endpoint = connection_params.secondary_endpoint
protocol = connection_params.protocol
request_session = connection_params.request_session or requests.Session()
socket_timeout = connection_params.socket_timeout or DEFAULT_SOCKET_TIMEOUT
self._httpclient = _HTTPClient(
protocol=protocol,
session=request_session,
timeout=socket_timeout,
)
self.retry = ExponentialRetry().retry
self.location_mode = LocationMode.PRIMARY
self.request_callback = None
self.response_callback = None
self.retry_callback = None
self._X_MS_VERSION = DEFAULT_X_MS_VERSION
self._USER_AGENT_STRING = DEFAULT_USER_AGENT_STRING
self._is_validating_request_id = True
def _update_user_agent_string(self, service_package_version):
self._USER_AGENT_STRING = '{}{} {}'.format(USER_AGENT_STRING_PREFIX,
service_package_version,
USER_AGENT_STRING_SUFFIX)
@property
def socket_timeout(self):
return self._httpclient.timeout
@socket_timeout.setter
def socket_timeout(self, value):
self._httpclient.timeout = value
@property
def protocol(self):
return self._httpclient.protocol
@protocol.setter
def protocol(self, value):
self._httpclient.protocol = value
@property
def request_session(self):
return self._httpclient.session
@request_session.setter
def request_session(self, value):
self._httpclient.session = value
def set_proxy(self, host, port, user=None, password=None):
'''
Sets the proxy server host and port for the HTTP CONNECT Tunnelling.
:param str host: Address of the proxy. Ex: '192.168.0.100'
:param int port: Port of the proxy. Ex: 6000
:param str user: User for proxy authorization.
:param str password: Password for proxy authorization.
'''
self._httpclient.set_proxy(host, port, user, password)
def _get_host_locations(self, primary=True, secondary=False):
locations = {}
if primary:
locations[LocationMode.PRIMARY] = self.primary_endpoint
if secondary:
locations[LocationMode.SECONDARY] = self.secondary_endpoint
return locations
def _apply_host(self, request, operation_context, retry_context):
if operation_context.location_lock and operation_context.host_location:
# If this is a location locked operation and the location is set,
# override the request location and host_location.
request.host_locations = operation_context.host_location
request.host = list(operation_context.host_location.values())[0]
retry_context.location_mode = list(operation_context.host_location.keys())[0]
elif len(request.host_locations) == 1:
# If only one location is allowed, use that location.
request.host = list(request.host_locations.values())[0]
retry_context.location_mode = list(request.host_locations.keys())[0]
else:
# If multiple locations are possible, choose based on the location mode.
request.host = request.host_locations.get(self.location_mode)
retry_context.location_mode = self.location_mode
@staticmethod
def extract_date_and_request_id(retry_context):
if getattr(retry_context, 'response', None) is None:
return ""
resp = retry_context.response
if 'date' in resp.headers and 'x-ms-request-id' in resp.headers:
return str.format("Server-Timestamp={0}, Server-Request-ID={1}",
resp.headers['date'], resp.headers['x-ms-request-id'])
elif 'date' in resp.headers:
return str.format("Server-Timestamp={0}", resp.headers['date'])
elif 'x-ms-request-id' in resp.headers:
return str.format("Server-Request-ID={0}", resp.headers['x-ms-request-id'])
else:
return ""
@staticmethod
def _scrub_headers(headers):
# make a copy to avoid contaminating the request
clean_headers = headers.copy()
if _AUTHORIZATION_HEADER_NAME in clean_headers:
clean_headers[_AUTHORIZATION_HEADER_NAME] = _REDACTED_VALUE
# in case of copy operations, there could be a SAS signature present in the header value
if _COPY_SOURCE_HEADER_NAME in clean_headers \
and _QueryStringConstants.SIGNED_SIGNATURE + "=" in clean_headers[_COPY_SOURCE_HEADER_NAME]:
# take the url apart and scrub away the signed signature
scheme, netloc, path, params, query, fragment = urlparse(clean_headers[_COPY_SOURCE_HEADER_NAME])
parsed_qs = dict(parse_qsl(query))
parsed_qs[_QueryStringConstants.SIGNED_SIGNATURE] = _REDACTED_VALUE
# the SAS needs to be put back together
clean_headers[_COPY_SOURCE_HEADER_NAME] = urlunparse(
(scheme, netloc, path, params, urlencode(parsed_qs), fragment))
return clean_headers
@staticmethod
def _scrub_query_parameters(query):
# make a copy to avoid contaminating the request
clean_queries = query.copy()
if _QueryStringConstants.SIGNED_SIGNATURE in clean_queries:
clean_queries[_QueryStringConstants.SIGNED_SIGNATURE] = _REDACTED_VALUE
return clean_queries
@staticmethod
def _validate_echoed_client_request_id(request, response):
# raise exception if the echoed client request id from the service is not identical to the one we sent
if _CLIENT_REQUEST_ID_HEADER_NAME in response.headers and \
request.headers[_CLIENT_REQUEST_ID_HEADER_NAME] != response.headers[_CLIENT_REQUEST_ID_HEADER_NAME]:
raise AzureException(
"Echoed client request ID: {} does not match sent client request ID: {}. Service request ID: {}".format(
response.headers[_CLIENT_REQUEST_ID_HEADER_NAME], request.headers[_CLIENT_REQUEST_ID_HEADER_NAME],
response.headers['x-ms-request-id']))
def _perform_request(self, request, parser=None, parser_args=None, operation_context=None, expected_errors=None):
'''
Sends the request and return response. Catches HTTPError and hands it
to error handler
'''
operation_context = operation_context or _OperationContext()
retry_context = RetryContext()
retry_context.is_emulated = self.is_emulated
# if request body is a stream, we need to remember its current position in case retries happen
if hasattr(request.body, 'read'):
try:
retry_context.body_position = request.body.tell()
except (AttributeError, UnsupportedOperation):
# if body position cannot be obtained, then retries will not work
pass
# Apply the appropriate host based on the location mode
self._apply_host(request, operation_context, retry_context)
# Apply common settings to the request
_update_request(request, self._X_MS_VERSION, self._USER_AGENT_STRING)
client_request_id_prefix = str.format("Client-Request-ID={0}", request.headers[_CLIENT_REQUEST_ID_HEADER_NAME])
while True:
try:
try:
# Execute the request callback
if self.request_callback:
self.request_callback(request)
# Add date and auth after the callback so date doesn't get too old and
# authentication is still correct if signed headers are added in the request
# callback. This also ensures retry policies with long back offs
# will work as it resets the time sensitive headers.
_add_date_header(request)
try:
# request can be signed individually
self.authentication.sign_request(request)
except AttributeError:
# session can also be signed
self.request_session = self.authentication.signed_session(self.request_session)
# Set the request context
retry_context.request = request
# Log the request before it goes out
# Avoid unnecessary scrubbing if the logger is not on
if logger.isEnabledFor(logging.INFO):
logger.info("%s Outgoing request: Method=%s, Path=%s, Query=%s, Headers=%s.",
client_request_id_prefix,
request.method,
request.path,
self._scrub_query_parameters(request.query),
str(self._scrub_headers(request.headers)).replace('\n', ''))
# Perform the request
response = self._httpclient.perform_request(request)
# Execute the response callback
if self.response_callback:
self.response_callback(response)
# Validate the client request ID
if self._is_validating_request_id:
self._validate_echoed_client_request_id(request, response)
# Set the response context
retry_context.response = response
# Log the response when it comes back
logger.info("%s Receiving Response: "
"%s, HTTP Status Code=%s, Message=%s, Headers=%s.",
client_request_id_prefix,
self.extract_date_and_request_id(retry_context),
response.status,
response.message,
str(response.headers).replace('\n', ''))
# Parse and wrap HTTP errors in AzureHttpError which inherits from AzureException
if response.status >= 300:
# This exception will be caught by the general error handler
# and raised as an azure http exception
_http_error_handler(
HTTPError(response.status, response.message, response.headers, response.body))
# Parse the response
if parser:
if parser_args:
args = [response]
args.extend(parser_args)
return parser(*args)
else:
return parser(response)
else:
return
except AzureException as ex:
retry_context.exception = ex
raise ex
except Exception as ex:
retry_context.exception = ex
raise _wrap_exception(ex, AzureException)
except AzureException as ex:
# only parse the strings used for logging if logging is at least enabled for CRITICAL
exception_str_in_one_line = ''
status_code = ''
timestamp_and_request_id = ''
if logger.isEnabledFor(logging.CRITICAL):
exception_str_in_one_line = str(ex).replace('\n', '')
status_code = retry_context.response.status if retry_context.response is not None else 'Unknown'
timestamp_and_request_id = self.extract_date_and_request_id(retry_context)
# if the http error was expected, we should short-circuit
if isinstance(ex, AzureHttpError) and expected_errors is not None and ex.error_code in expected_errors:
logger.info("%s Received expected http error: "
"%s, HTTP status code=%s, Exception=%s.",
client_request_id_prefix,
timestamp_and_request_id,
status_code,
exception_str_in_one_line)
raise ex
elif isinstance(ex, AzureSigningError):
logger.info("%s Unable to sign the request: Exception=%s.",
client_request_id_prefix,
exception_str_in_one_line)
raise ex
logger.info("%s Operation failed: checking if the operation should be retried. "
"Current retry count=%s, %s, HTTP status code=%s, Exception=%s.",
client_request_id_prefix,
retry_context.count if hasattr(retry_context, 'count') else 0,
timestamp_and_request_id,
status_code,
exception_str_in_one_line)
# Decryption failures (invalid objects, invalid algorithms, data unencrypted in strict mode, etc)
# will not be resolved with retries.
if str(ex) == _ERROR_DECRYPTION_FAILURE:
logger.error("%s Encountered decryption failure: this cannot be retried. "
"%s, HTTP status code=%s, Exception=%s.",
client_request_id_prefix,
timestamp_and_request_id,
status_code,
exception_str_in_one_line)
raise ex
# Determine whether a retry should be performed and if so, how
# long to wait before performing retry.
retry_interval = self.retry(retry_context)
if retry_interval is not None:
# Execute the callback
if self.retry_callback:
self.retry_callback(retry_context)
logger.info(
"%s Retry policy is allowing a retry: Retry count=%s, Interval=%s.",
client_request_id_prefix,
retry_context.count,
retry_interval)
# Sleep for the desired retry interval
sleep(retry_interval)
else:
logger.error("%s Retry policy did not allow for a retry: "
"%s, HTTP status code=%s, Exception=%s.",
client_request_id_prefix,
timestamp_and_request_id,
status_code,
exception_str_in_one_line)
raise ex
finally:
# If this is a location locked operation and the location is not set,
# this is the first request of that operation. Set the location to
# be used for subsequent requests in the operation.
if operation_context.location_lock and not operation_context.host_location:
# note: to cover the emulator scenario, the host_location is grabbed
# from request.host_locations(which includes the dev account name)
# instead of request.host(which at this point no longer includes the dev account name)
operation_context.host_location = {
retry_context.location_mode: request.host_locations[retry_context.location_mode]}

Просмотреть файл

@ -1,48 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import requests
class TokenCredential(object):
"""
Represents a token credential that is used to authorize HTTPS requests.
The token can be updated by the user.
:ivar str token:
The authorization token. It can be set by the user at any point in a thread-safe way.
"""
def __init__(self, initial_value=None):
"""
:param initial_value: initial value for the token.
"""
self.token = initial_value
def signed_session(self, session=None):
"""
Sign requests session with the token. This method is called every time a request is going on the wire.
The user is responsible for updating the token with the preferred tool/SDK.
In general there are two options:
- override this method to update the token in a preferred way and set Authorization header on session
- not override this method, and have a timer that triggers periodically to update the token on this class
The second option is recommended as it tends to be more performance-friendly.
:param session: The session to configure for authentication
:type session: requests.Session
:rtype: requests.Session
"""
session = session or requests.Session()
session.headers['Authorization'] = "Bearer {}".format(self.token)
return session
def token(self, new_value):
"""
:param new_value: new value to be set as the token.
"""
self.token = new_value

Просмотреть файл

@ -1,23 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from .fileservice import FileService
from .models import (
Share,
ShareProperties,
File,
FileProperties,
Directory,
DirectoryProperties,
FileRange,
ContentSettings,
CopyProperties,
SharePermissions,
FilePermissions,
DeleteSnapshot,
SMBProperties,
NTFSAttributes,
)
from ._constants import __version__

Просмотреть файл

@ -1,11 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
__author__ = 'Microsoft Corp. <ptvshelp@microsoft.com>'
__version__ = '2.1.0'
# x-ms-version for storage service.
X_MS_VERSION = '2019-02-02'

Просмотреть файл

@ -1,326 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from dateutil import parser
try:
from xml.etree import cElementTree as ETree
except ImportError:
from xml.etree import ElementTree as ETree
from .models import (
Share,
Directory,
File,
Handle,
FileProperties,
FileRange,
ShareProperties,
DirectoryProperties,
)
from azure.storage.common.models import (
_list,
)
from azure.storage.common._deserialization import (
_parse_properties,
_parse_metadata,
)
from azure.storage.common._error import _validate_content_match
from azure.storage.common._common_conversion import (
_get_content_md5,
_to_str,
)
def _parse_snapshot_share(response, name):
'''
Extracts snapshot return header.
'''
snapshot = response.headers.get('x-ms-snapshot')
return _parse_share(response, name, snapshot)
def _parse_share(response, name, snapshot=None):
if response is None:
return None
metadata = _parse_metadata(response)
props = _parse_properties(response, ShareProperties)
return Share(name, props, metadata, snapshot)
def _parse_directory(response, name):
if response is None:
return None
metadata = _parse_metadata(response)
props = _parse_properties(response, DirectoryProperties)
return Directory(name, props, metadata)
def _parse_permission_key(response):
'''
Extracts out file permission key
'''
if response is None or response.headers is None:
return None
return response.headers.get('x-ms-file-permission-key', None)
def _parse_permission(response):
'''
Extracts out file permission
'''
return response.body
def _parse_file(response, name, validate_content=False):
if response is None:
return None
metadata = _parse_metadata(response)
props = _parse_properties(response, FileProperties)
# For range gets, only look at 'x-ms-content-md5' for overall MD5
content_settings = getattr(props, 'content_settings')
if 'content-range' in response.headers:
if 'x-ms-content-md5' in response.headers:
setattr(content_settings, 'content_md5', _to_str(response.headers['x-ms-content-md5']))
else:
delattr(content_settings, 'content_md5')
if validate_content:
computed_md5 = _get_content_md5(response.body)
_validate_content_match(response.headers['content-md5'], computed_md5)
return File(name, response.body, props, metadata)
def _convert_xml_to_shares(response):
'''
<?xml version="1.0" encoding="utf-8"?>
<EnumerationResults AccountName="https://myaccount.file.core.windows.net">
<Prefix>string-value</Prefix>
<Marker>string-value</Marker>
<MaxResults>int-value</MaxResults>
<Shares>
<Share>
<Name>share-name</Name>
<Snapshot>date-time-value</Snapshot>
<Properties>
<Last-Modified>date/time-value</Last-Modified>
<Etag>etag</Etag>
<Quota>max-share-size</Quota>
</Properties>
<Metadata>
<metadata-name>value</metadata-name>
</Metadata>
</Share>
</Shares>
<NextMarker>marker-value</NextMarker>
</EnumerationResults>
'''
if response is None or response.body is None:
return None
shares = _list()
list_element = ETree.fromstring(response.body)
# Set next marker
next_marker = list_element.findtext('NextMarker') or None
setattr(shares, 'next_marker', next_marker)
shares_element = list_element.find('Shares')
for share_element in shares_element.findall('Share'):
# Name element
share = Share()
share.name = share_element.findtext('Name')
# Snapshot
share.snapshot = share_element.findtext('Snapshot')
# Metadata
metadata_root_element = share_element.find('Metadata')
if metadata_root_element is not None:
share.metadata = dict()
for metadata_element in metadata_root_element:
share.metadata[metadata_element.tag] = metadata_element.text
# Properties
properties_element = share_element.find('Properties')
share.properties.last_modified = parser.parse(properties_element.findtext('Last-Modified'))
share.properties.etag = properties_element.findtext('Etag')
share.properties.quota = int(properties_element.findtext('Quota'))
# Add share to list
shares.append(share)
return shares
def _convert_xml_to_directories_and_files(response):
'''
<?xml version="1.0" encoding="utf-8"?>
<EnumerationResults ServiceEndpoint="https://myaccount.file.core.windows.net/" ShareName="myshare" DirectoryPath="directory-path">
<Marker>string-value</Marker>
<MaxResults>int-value</MaxResults>
<Entries>
<File>
<Name>file-name</Name>
<Properties>
<Content-Length>size-in-bytes</Content-Length>
</Properties>
</File>
<Directory>
<Name>directory-name</Name>
</Directory>
</Entries>
<NextMarker />
</EnumerationResults>
'''
if response is None or response.body is None:
return None
entries = _list()
list_element = ETree.fromstring(response.body)
# Set next marker
next_marker = list_element.findtext('NextMarker') or None
setattr(entries, 'next_marker', next_marker)
entries_element = list_element.find('Entries')
for file_element in entries_element.findall('File'):
# Name element
file = File()
file.name = file_element.findtext('Name')
# Properties
properties_element = file_element.find('Properties')
file.properties.content_length = int(properties_element.findtext('Content-Length'))
# Add file to list
entries.append(file)
for directory_element in entries_element.findall('Directory'):
# Name element
directory = Directory()
directory.name = directory_element.findtext('Name')
# Add directory to list
entries.append(directory)
return entries
def _convert_xml_to_handles(response):
"""
<?xml version="1.0" encoding="utf-8"?>
<EnumerationResults>
<Entries>
<Handle>
<HandleId>21123954401</HandleId>
<Path />
<FileId>0</FileId>
<ParentId>0</ParentId>
<SessionId>9385737614310506553</SessionId>
<ClientIp>167.220.2.92:27553</ClientIp>
<OpenTime>Fri, 03 May 2019 05:59:43 GMT</OpenTime>
</Handle>
...
</Entries>
<NextMarker />
</EnumerationResults>'
"""
if response is None or response.body is None:
return None
entries = _list()
list_element = ETree.fromstring(response.body)
# Set next marker
next_marker = list_element.findtext('NextMarker') or None
setattr(entries, 'next_marker', next_marker)
handles_list_element = list_element.find('Entries')
for handle_element in handles_list_element.findall('Handle'):
# Name element
handle = Handle()
handle.handle_id = handle_element.findtext('HandleId')
handle.path = handle_element.findtext('Path')
handle.file_id = handle_element.findtext('FileId')
handle.parent_id = handle_element.findtext('ParentId')
handle.session_id = handle_element.findtext('SessionId')
handle.client_ip = handle_element.findtext('ClientIp')
handle.open_time = parser.parse(handle_element.findtext('OpenTime'))
last_connect_time_string = handle_element.findtext('LastReconnectTime')
if last_connect_time_string is not None:
handle.last_reconnect_time = parser.parse(last_connect_time_string)
# Add file to list
entries.append(handle)
return entries
def _parse_close_handle_response(response):
if response is None or response.body is None:
return 0
results = _list()
results.append(int(response.headers['x-ms-number-of-handles-closed']))
next_marker = None if 'x-ms-marker' not in response.headers else response.headers['x-ms-marker']
setattr(results, 'next_marker', next_marker)
return results
def _convert_xml_to_ranges(response):
'''
<?xml version="1.0" encoding="utf-8"?>
<Ranges>
<Range>
<Start>Start Byte</Start>
<End>End Byte</End>
</Range>
<Range>
<Start>Start Byte</Start>
<End>End Byte</End>
</Range>
</Ranges>
'''
if response is None or response.body is None:
return None
ranges = list()
ranges_element = ETree.fromstring(response.body)
for range_element in ranges_element.findall('Range'):
# Parse range
range = FileRange(int(range_element.findtext('Start')), int(range_element.findtext('End')))
# Add range to list
ranges.append(range)
return ranges
def _convert_xml_to_share_stats(response):
'''
<?xml version="1.0" encoding="utf-8"?>
<ShareStats>
<ShareUsageBytes>15</ShareUsageBytes>
</ShareStats>
'''
if response is None or response.body is None:
return None
share_stats_element = ETree.fromstring(response.body)
return int(share_stats_element.findtext('ShareUsageBytes'))

Просмотреть файл

@ -1,159 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import threading
def _download_file_chunks(file_service, share_name, directory_name, file_name,
download_size, block_size, progress, start_range, end_range,
stream, max_connections, progress_callback, validate_content,
timeout, operation_context, snapshot):
downloader_class = _ParallelFileChunkDownloader if max_connections > 1 else _SequentialFileChunkDownloader
downloader = downloader_class(
file_service,
share_name,
directory_name,
file_name,
download_size,
block_size,
progress,
start_range,
end_range,
stream,
progress_callback,
validate_content,
timeout,
operation_context,
snapshot,
)
if max_connections > 1:
import concurrent.futures
executor = concurrent.futures.ThreadPoolExecutor(max_connections)
list(executor.map(downloader.process_chunk, downloader.get_chunk_offsets()))
else:
for chunk in downloader.get_chunk_offsets():
downloader.process_chunk(chunk)
class _FileChunkDownloader(object):
def __init__(self, file_service, share_name, directory_name, file_name,
download_size, chunk_size, progress, start_range, end_range,
stream, progress_callback, validate_content, timeout, operation_context, snapshot):
# identifiers for the file
self.file_service = file_service
self.share_name = share_name
self.directory_name = directory_name
self.file_name = file_name
# information on the download range/chunk size
self.chunk_size = chunk_size
self.download_size = download_size
self.start_index = start_range
self.file_end = end_range
# the destination that we will write to
self.stream = stream
# progress related
self.progress_callback = progress_callback
self.progress_total = progress
# parameters for each get file operation
self.validate_content = validate_content
self.timeout = timeout
self.operation_context = operation_context
self.snapshot = snapshot
def get_chunk_offsets(self):
index = self.start_index
while index < self.file_end:
yield index
index += self.chunk_size
def process_chunk(self, chunk_start):
if chunk_start + self.chunk_size > self.file_end:
chunk_end = self.file_end
else:
chunk_end = chunk_start + self.chunk_size
chunk_data = self._download_chunk(chunk_start, chunk_end).content
length = chunk_end - chunk_start
if length > 0:
self._write_to_stream(chunk_data, chunk_start)
self._update_progress(length)
# should be provided by the subclass
def _update_progress(self, length):
pass
# should be provided by the subclass
def _write_to_stream(self, chunk_data, chunk_start):
pass
def _download_chunk(self, chunk_start, chunk_end):
return self.file_service._get_file(
self.share_name,
self.directory_name,
self.file_name,
start_range=chunk_start,
end_range=chunk_end - 1,
validate_content=self.validate_content,
timeout=self.timeout,
_context=self.operation_context,
snapshot=self.snapshot
)
class _ParallelFileChunkDownloader(_FileChunkDownloader):
def __init__(self, file_service, share_name, directory_name, file_name,
download_size, chunk_size, progress, start_range, end_range,
stream, progress_callback, validate_content, timeout, operation_context, snapshot):
super(_ParallelFileChunkDownloader, self).__init__(file_service, share_name, directory_name, file_name,
download_size, chunk_size, progress, start_range, end_range,
stream, progress_callback, validate_content, timeout,
operation_context, snapshot)
# for a parallel download, the stream is always seekable, so we note down the current position
# in order to seek to the right place when out-of-order chunks come in
self.stream_start = stream.tell()
# since parallel operations are going on
# it is essential to protect the writing and progress reporting operations
self.stream_lock = threading.Lock()
self.progress_lock = threading.Lock()
def _update_progress(self, length):
if self.progress_callback is not None:
with self.progress_lock:
self.progress_total += length
total_so_far = self.progress_total
self.progress_callback(total_so_far, self.download_size)
def _write_to_stream(self, chunk_data, chunk_start):
with self.stream_lock:
self.stream.seek(self.stream_start + (chunk_start - self.start_index))
self.stream.write(chunk_data)
class _SequentialFileChunkDownloader(_FileChunkDownloader):
def __init__(self, file_service, share_name, directory_name, file_name, download_size, chunk_size, progress,
start_range, end_range, stream, progress_callback, validate_content, timeout, operation_context,
snapshot):
super(_SequentialFileChunkDownloader, self).__init__(file_service, share_name, directory_name, file_name,
download_size, chunk_size, progress, start_range,
end_range, stream, progress_callback, validate_content,
timeout, operation_context, snapshot)
def _update_progress(self, length):
if self.progress_callback is not None:
self.progress_total += length
self.progress_callback(self.progress_total, self.download_size)
def _write_to_stream(self, chunk_data, chunk_start):
# chunk_start is ignored in the case of sequential download since we cannot seek the destination stream
self.stream.write(chunk_data)

Просмотреть файл

@ -1,95 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from sys import getsizeof
from azure.storage.common._common_conversion import _str
from azure.storage.common._error import (
_validate_not_none,
_ERROR_START_END_NEEDED_FOR_MD5,
_ERROR_RANGE_TOO_LARGE_FOR_MD5,
)
_ERROR_TOO_MANY_FILE_PERMISSIONS = 'file_permission and file_permission_key should not be set at the same time'
_FILE_PERMISSION_TOO_LONG = 'Size of file_permission is too large. file_permission should be <=8KB, else' \
'please use file_permission_key'
def _get_path(share_name=None, directory_name=None, file_name=None):
'''
Creates the path to access a file resource.
share_name:
Name of share.
directory_name:
The path to the directory.
file_name:
Name of file.
'''
if share_name and directory_name and file_name:
return '/{0}/{1}/{2}'.format(
_str(share_name),
_str(directory_name),
_str(file_name))
elif share_name and directory_name:
return '/{0}/{1}'.format(
_str(share_name),
_str(directory_name))
elif share_name and file_name:
return '/{0}/{1}'.format(
_str(share_name),
_str(file_name))
elif share_name:
return '/{0}'.format(_str(share_name))
else:
return '/'
def _validate_and_format_range_headers(request, start_range, end_range, start_range_required=True,
end_range_required=True, check_content_md5=False, is_source=False):
# If end range is provided, start range must be provided
if start_range_required or end_range is not None:
_validate_not_none('start_range', start_range)
if end_range_required:
_validate_not_none('end_range', end_range)
# Format based on whether end_range is present
request.headers = request.headers or {}
header_name = 'x-ms-source-range' if is_source else 'x-ms-range'
if end_range is not None:
request.headers[header_name] = 'bytes={0}-{1}'.format(start_range, end_range)
elif start_range is not None:
request.headers[header_name] = 'bytes={0}-'.format(start_range)
# Content MD5 can only be provided for a complete range less than 4MB in size
if check_content_md5:
if start_range is None or end_range is None:
raise ValueError(_ERROR_START_END_NEEDED_FOR_MD5)
if end_range - start_range > 4 * 1024 * 1024:
raise ValueError(_ERROR_RANGE_TOO_LARGE_FOR_MD5)
request.headers['x-ms-range-get-content-md5'] = 'true'
def _validate_and_return_file_permission(file_permission, file_permission_key, default_permission):
# if file_permission and file_permission_key are both empty, then use the default_permission
# value as file permission, file_permission size should be <= 8KB, else file permission_key should be used
empty_file_permission = file_permission is None or len(file_permission) == 0
empty_file_permission_key = file_permission_key is None or len(file_permission_key) == 0
file_permission_size_too_big = False if file_permission is None \
else len(str(file_permission).encode('utf-8')) > 8 * 1024
if file_permission_size_too_big:
raise ValueError(_FILE_PERMISSION_TOO_LONG)
if empty_file_permission:
if empty_file_permission_key:
return default_permission
else:
return None
if empty_file_permission_key:
return file_permission
raise ValueError(_ERROR_TOO_MANY_FILE_PERMISSIONS)

Просмотреть файл

@ -1,133 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import threading
def _upload_file_chunks(file_service, share_name, directory_name, file_name,
file_size, block_size, stream, max_connections,
progress_callback, validate_content, timeout):
uploader = _FileChunkUploader(
file_service,
share_name,
directory_name,
file_name,
file_size,
block_size,
stream,
max_connections > 1,
progress_callback,
validate_content,
timeout
)
if progress_callback is not None:
progress_callback(0, file_size)
if max_connections > 1:
import concurrent.futures
executor = concurrent.futures.ThreadPoolExecutor(max_connections)
range_ids = list(executor.map(uploader.process_chunk, uploader.get_chunk_offsets()))
else:
if file_size is not None:
range_ids = [uploader.process_chunk(start) for start in uploader.get_chunk_offsets()]
else:
range_ids = uploader.process_all_unknown_size()
return range_ids
class _FileChunkUploader(object):
def __init__(self, file_service, share_name, directory_name, file_name,
file_size, chunk_size, stream, parallel, progress_callback,
validate_content, timeout):
self.file_service = file_service
self.share_name = share_name
self.directory_name = directory_name
self.file_name = file_name
self.file_size = file_size
self.chunk_size = chunk_size
self.stream = stream
self.stream_start = stream.tell() if parallel else None
self.stream_lock = threading.Lock() if parallel else None
self.progress_callback = progress_callback
self.progress_total = 0
self.progress_lock = threading.Lock() if parallel else None
self.validate_content = validate_content
self.timeout = timeout
def get_chunk_offsets(self):
index = 0
if self.file_size is None:
# we don't know the size of the stream, so we have no
# choice but to seek
while True:
data = self._read_from_stream(index, 1)
if not data:
break
yield index
index += self.chunk_size
else:
while index < self.file_size:
yield index
index += self.chunk_size
def process_chunk(self, chunk_offset):
size = self.chunk_size
if self.file_size is not None:
size = min(size, self.file_size - chunk_offset)
chunk_data = self._read_from_stream(chunk_offset, size)
return self._upload_chunk_with_progress(chunk_offset, chunk_data)
def process_all_unknown_size(self):
assert self.stream_lock is None
range_ids = []
index = 0
while True:
data = self._read_from_stream(None, self.chunk_size)
if data:
index += len(data)
range_id = self._upload_chunk_with_progress(index, data)
range_ids.append(range_id)
else:
break
return range_ids
def _read_from_stream(self, offset, count):
if self.stream_lock is not None:
with self.stream_lock:
self.stream.seek(self.stream_start + offset)
data = self.stream.read(count)
else:
data = self.stream.read(count)
return data
def _update_progress(self, length):
if self.progress_callback is not None:
if self.progress_lock is not None:
with self.progress_lock:
self.progress_total += length
total = self.progress_total
else:
self.progress_total += length
total = self.progress_total
self.progress_callback(total, self.file_size)
def _upload_chunk_with_progress(self, chunk_start, chunk_data):
chunk_end = chunk_start + len(chunk_data) - 1
self.file_service.update_range(
self.share_name,
self.directory_name,
self.file_name,
chunk_data,
chunk_start,
chunk_end,
self.validate_content,
timeout=self.timeout
)
range_id = 'bytes={0}-{1}'.format(chunk_start, chunk_end)
self._update_progress(len(chunk_data))
return range_id

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,593 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from azure.storage.common._common_conversion import _to_str
class Share(object):
'''
File share class.
:ivar str name:
The name of the share.
:ivar ShareProperties properties:
System properties for the share.
:ivar metadata:
A dict containing name-value pairs associated with the share as metadata.
This var is set to None unless the include=metadata param was included
for the list shares operation. If this parameter was specified but the
share has no metadata, metadata will be set to an empty dictionary.
:vartype metadata: dict(str, str)
:ivar str snapshot:
A DateTime value that uniquely identifies the snapshot. The value of
this header indicates the snapshot version, and may be used in
subsequent requests to access the snapshot.
'''
def __init__(self, name=None, props=None, metadata=None, snapshot=None):
self.name = name
self.properties = props or ShareProperties()
self.metadata = metadata
self.snapshot = snapshot
class ShareProperties(object):
'''
File share's properties class.
:ivar datetime last_modified:
A datetime object representing the last time the share was modified.
:ivar str etag:
The ETag contains a value that you can use to perform operations
conditionally.
:ivar int quote:
Returns the current share quota in GB.
'''
def __init__(self):
self.last_modified = None
self.etag = None
self.quota = None
class Directory(object):
'''
Directory class.
:ivar str name:
The name of the directory.
:ivar DirectoryProperties properties:
System properties for the directory.
:ivar metadata:
A dict containing name-value pairs associated with the directory as metadata.
This var is set to None unless the include=metadata param was included
for the list directory operation. If this parameter was specified but the
directory has no metadata, metadata will be set to an empty dictionary.
:vartype metadata: dict(str, str)
'''
def __init__(self, name=None, props=None, metadata=None):
self.name = name
self.properties = props or DirectoryProperties()
self.metadata = metadata
class DirectoryProperties(object):
'''
File directory's properties class.
:ivar datetime last_modified:
A datetime object representing the last time the directory was modified.
:ivar str etag:
The ETag contains a value that you can use to perform operations
conditionally.
:ivar bool server_encrypted:
Set to true if the directory metadata is encrypted on the server.
:ivar ~azure.storage.file.models.SMBProperties smb_properties:
SMB related file properties
'''
def __init__(self):
self.last_modified = None
self.etag = None
self.server_encrypted = None
self.smb_properties = SMBProperties()
class File(object):
'''
File class.
:ivar str name:
The name of the file.
:ivar content:
File content.
:vartype content: str or bytes
:ivar FileProperties properties:
System properties for the file.
:ivar metadata:
A dict containing name-value pairs associated with the file as metadata.
This var is set to None unless the include=metadata param was included
for the list file operation. If this parameter was specified but the
file has no metadata, metadata will be set to an empty dictionary.
:vartype metadata: dict(str, str)
'''
def __init__(self, name=None, content=None, props=None, metadata=None):
self.name = name
self.content = content
self.properties = props or FileProperties()
self.metadata = metadata
class FileProperties(object):
'''
File Properties.
:ivar datetime last_modified:
A datetime object representing the last time the file was modified.
:ivar str etag:
The ETag contains a value that you can use to perform operations
conditionally.
:ivar int content_length:
The length of the content returned. If the entire blob was requested,
the length of blob in bytes. If a subset of the blob was requested, the
length of the returned subset.
:ivar str content_range:
Indicates the range of bytes returned in the event that the client
requested a subset of the blob.
:ivar ~azure.storage.file.models.ContentSettings content_settings:
Stores all the content settings for the file.
:ivar ~azure.storage.file.models.CopyProperties copy:
Stores all the copy properties for the file.
:ivar bool server_encrypted:
Set to true if the file data and application metadata are completely encrypted.
:ivar ~azure.storage.file.models.SMBProperties smb_properties:
SMB related file properties
:ivar ~azure.storage.file.models.LeaseProperties lease:
Stores all the lease information for the file.
'''
def __init__(self):
self.last_modified = None
self.etag = None
self.content_length = None
self.content_range = None
self.content_settings = ContentSettings()
self.copy = CopyProperties()
self.server_encrypted = None
self.smb_properties = SMBProperties()
self.lease = LeaseProperties()
class SMBProperties(object):
"""
SMB related properties to get/set for for file/directory
:ivar str or :class:`~azure.storage.file.models.NTFSAttributes` ntfs_attributes:
The file system attributes for files and directories.
If not set, indicates preservation of existing values.
Here is an example for when the var type is str: 'Temporary|Archive'
:ivar str or datetime creation_time:
When the File or Directory was created.
If it is a string type, time should have 7 decimal digits, eg. '2019-07-07T02:52:46.5540162Z'
:ivar str or datetime last_write_time:
When the File or Directory was last modified. eg. '2019-07-07T02:52:46.5540162Z'
If it is a string type, time should have 7 decimal digits, eg. '2019-07-07T02:52:46.5540162Z'
:ivar str permission_key:
The file's File Permission Key
:ivar str change_time:
When the File was last changed. This is what will be returned by service. Users don't need to specify.
:ivar str file_id:
The Id of this directory. This is what will be returned by service. Users don't need to specify.
:ivar str parent_id:
The Id of this directory's parent. This is what will be returned by service. Users don't need to specify.
"""
def __init__(self, ntfs_attributes=None, creation_time=None, last_write_time=None, permission_key=None):
self.ntfs_attributes = ntfs_attributes
self.creation_time = creation_time
self.last_write_time = last_write_time
self.permission_key = permission_key
self.change_time = None
self.file_id = None
self.parent_id = None
def _to_request_headers(self):
creation_time = self.creation_time if isinstance(self.creation_time, str) \
else self.creation_time.isoformat() + '0Z'
last_write_time = self.last_write_time if isinstance(self.last_write_time, str) \
else self.last_write_time.isoformat() + '0Z'
return {
'x-ms-file-attributes': _to_str(self.ntfs_attributes),
'x-ms-file-creation-time': creation_time,
'x-ms-file-last-write-time': last_write_time,
'x-ms-file-permission-key': _to_str(self.permission_key)
}
class LeaseProperties(object):
'''
File Lease Properties.
:ivar str status:
The lease status of the file.
Possible values: locked|unlocked
:ivar str state:
Lease state of the file.
Possible values: available|leased|expired|breaking|broken
:ivar str duration:
When a file is leased, specifies whether the lease is of infinite or fixed duration.
'''
def __init__(self):
self.status = None
self.state = None
self.duration = None
class Handle(object):
"""
Represents a file handle.
:ivar str handle_id:
Used to identify handle.
:ivar str path:
Used to identify the name of the object for which the handle is open.
:ivar str file_id:
Uniquely identifies the file.
This is useful when renames are happening as the file ID does not change.
:ivar str parent_id:
Uniquely identifies the parent directory.
This is useful when renames are happening as the parent ID does not change.
:ivar str session_id:
Session ID in context of which the file handle was opened.
:ivar str client_ip:
Used to identify client that has opened the handle.
The field is included only if client IP is known by the service.
:ivar datetime open_time:
Used to decide if handle may have been leaked.
:ivar datetime last_reconnect_time:
Used to decide if handle was reopened after client/server disconnect due to networking or other faults.
The field is included only if disconnect event occurred and handle was reopened.
"""
def __init__(self, handle_id=None, path=None, file_id=None, parent_id=None, session_id=None,
client_ip=None, open_time=None, last_reconnect_time=None):
self.handle_id = handle_id
self.path = path
self.file_id = file_id
self.parent_id = parent_id
self.session_id = session_id
self.client_ip = client_ip
self.open_time = open_time
self.last_reconnect_time = last_reconnect_time
class ContentSettings(object):
'''
Used to store the content settings of a file.
:ivar str content_type:
The content type specified for the file. If no content type was
specified, the default content type is application/octet-stream.
:ivar str content_encoding:
If content_encoding has previously been set
for the file, that value is stored.
:ivar str content_language:
If content_language has previously been set
for the file, that value is stored.
:ivar str content_disposition:
content_disposition conveys additional information about how to
process the response payload, and also can be used to attach
additional metadata. If content_disposition has previously been set
for the file, that value is stored.
:ivar str cache_control:
If cache_control has previously been set for
the file, that value is stored.
:ivar str content_md5:
If the content_md5 has been set for the file, this response
header is stored so that the client can check for message content
integrity.
'''
def __init__(
self, content_type=None, content_encoding=None,
content_language=None, content_disposition=None,
cache_control=None, content_md5=None):
self.content_type = content_type
self.content_encoding = content_encoding
self.content_language = content_language
self.content_disposition = content_disposition
self.cache_control = cache_control
self.content_md5 = content_md5
def _to_headers(self):
return {
'x-ms-cache-control': _to_str(self.cache_control),
'x-ms-content-type': _to_str(self.content_type),
'x-ms-content-disposition': _to_str(self.content_disposition),
'x-ms-content-md5': _to_str(self.content_md5),
'x-ms-content-encoding': _to_str(self.content_encoding),
'x-ms-content-language': _to_str(self.content_language),
}
class CopyProperties(object):
'''
File Copy Properties.
:ivar str id:
String identifier for the last attempted Copy File operation where this file
was the destination file. This header does not appear if this file has never
been the destination in a Copy File operation, or if this file has been
modified after a concluded Copy File operation using Set File Properties or
Put File.
:ivar str source:
URL up to 2 KB in length that specifies the source file used in the last attempted
Copy File operation where this file was the destination file. This header does not
appear if this file has never been the destination in a Copy File operation, or if
this file has been modified after a concluded Copy File operation using
Set File Properties or Put File.
:ivar str status:
State of the copy operation identified by Copy ID, with these values:
success:
Copy completed successfully.
pending:
Copy is in progress. Check copy_status_description if intermittent,
non-fatal errors impede copy progress but don't cause failure.
aborted:
Copy was ended by Abort Copy File.
failed:
Copy failed. See copy_status_description for failure details.
:ivar str progress:
Contains the number of bytes copied and the total bytes in the source in the last
attempted Copy File operation where this file was the destination file. Can show
between 0 and Content-Length bytes copied.
:ivar datetime completion_time:
Conclusion time of the last attempted Copy File operation where this file was the
destination file. This value can specify the time of a completed, aborted, or
failed copy attempt.
:ivar str status_description:
Only appears when x-ms-copy-status is failed or pending. Describes cause of fatal
or non-fatal copy operation failure.
'''
def __init__(self):
self.id = None
self.source = None
self.status = None
self.progress = None
self.completion_time = None
self.status_description = None
class FileRange(object):
'''
File Range.
:ivar int start:
Byte index for start of file range.
:ivar int end:
Byte index for end of file range.
'''
def __init__(self, start=None, end=None):
self.start = start
self.end = end
class DeleteSnapshot(object):
'''
Required if the Share has associated snapshots. Specifies how to handle the snapshots.
'''
Include = 'include'
'''
Delete the share and all of its snapshots.
'''
class FilePermissions(object):
'''
FilePermissions class to be used with
:func:`~azure.storage.file.fileservice.FileService.generate_file_shared_access_signature` API.
:ivar FilePermissions FilePermissions.CREATE:
Create a new file or copy a file to a new file.
:ivar FilePermissions FilePermissions.DELETE:
Delete the file.
:ivar FilePermissions FilePermissions.READ:
Read the content, properties, metadata. Use the file as the source of a copy
operation.
:ivar FilePermissions FilePermissions.WRITE:
Create or write content, properties, metadata. Resize the file. Use the file
as the destination of a copy operation within the same account.
'''
def __init__(self, read=False, create=False, write=False, delete=False,
_str=None):
'''
:param bool read:
Read the content, properties, metadata. Use the file as the source of a copy
operation.
:param bool create:
Create a new file or copy a file to a new file.
:param bool write:
Create or write content, properties, metadata. Resize the file. Use the file
as the destination of a copy operation within the same account.
:param bool delete:
Delete the file.
:param str _str:
A string representing the permissions.
'''
if not _str:
_str = ''
self.read = read or ('r' in _str)
self.create = create or ('c' in _str)
self.write = write or ('w' in _str)
self.delete = delete or ('d' in _str)
def __or__(self, other):
return FilePermissions(_str=str(self) + str(other))
def __add__(self, other):
return FilePermissions(_str=str(self) + str(other))
def __str__(self):
return (('r' if self.read else '') +
('c' if self.create else '') +
('w' if self.write else '') +
('d' if self.delete else ''))
FilePermissions.CREATE = FilePermissions(create=True)
FilePermissions.DELETE = FilePermissions(delete=True)
FilePermissions.READ = FilePermissions(read=True)
FilePermissions.WRITE = FilePermissions(write=True)
class SharePermissions(object):
'''
SharePermissions class to be used with `azure.storage.file.FileService.generate_share_shared_access_signature`
method and for the AccessPolicies used with `azure.storage.file.FileService.set_share_acl`.
:ivar SharePermissions FilePermissions.DELETE:
Delete any file in the share.
Note: You cannot grant permissions to delete a share with a service SAS. Use
an account SAS instead.
:ivar SharePermissions FilePermissions.LIST:
List files and directories in the share.
:ivar SharePermissions FilePermissions.READ:
Read the content, properties or metadata of any file in the share. Use any
file in the share as the source of a copy operation.
:ivar SharePermissions FilePermissions.WRITE:
For any file in the share, create or write content, properties or metadata.
Resize the file. Use the file as the destination of a copy operation within
the same account.
Note: You cannot grant permissions to read or write share properties or
metadata with a service SAS. Use an account SAS instead.
'''
def __init__(self, read=False, write=False, delete=False, list=False,
_str=None):
'''
:param bool read:
Read the content, properties or metadata of any file in the share. Use any
file in the share as the source of a copy operation.
:param bool write:
For any file in the share, create or write content, properties or metadata.
Resize the file. Use the file as the destination of a copy operation within
the same account.
Note: You cannot grant permissions to read or write share properties or
metadata with a service SAS. Use an account SAS instead.
:param bool delete:
Delete any file in the share.
Note: You cannot grant permissions to delete a share with a service SAS. Use
an account SAS instead.
:param bool list:
List files and directories in the share.
:param str _str:
A string representing the permissions
'''
if not _str:
_str = ''
self.read = read or ('r' in _str)
self.write = write or ('w' in _str)
self.delete = delete or ('d' in _str)
self.list = list or ('l' in _str)
def __or__(self, other):
return SharePermissions(_str=str(self) + str(other))
def __add__(self, other):
return SharePermissions(_str=str(self) + str(other))
def __str__(self):
return (('r' if self.read else '') +
('w' if self.write else '') +
('d' if self.delete else '') +
('l' if self.list else ''))
SharePermissions.DELETE = SharePermissions(delete=True)
SharePermissions.LIST = SharePermissions(list=True)
SharePermissions.READ = SharePermissions(read=True)
SharePermissions.WRITE = SharePermissions(write=True)
class NTFSAttributes(object):
"""
Valid set of attributes to set for file or directory.
To set attribute for directory, 'Directory' should always be enabled except setting 'None' for directory.
:ivar bool read_only:
Enable/disable 'ReadOnly' attribute for DIRECTORY or FILE
:ivar bool hidden:
Enable/disable 'Hidden' attribute for DIRECTORY or FILE
:ivar bool system:
Enable/disable 'System' attribute for DIRECTORY or FILE
:ivar bool none:
Enable/disable 'None' attribute for DIRECTORY or FILE to clear all attributes of FILE/DIRECTORY
:ivar bool directory:
Enable/disable 'Directory' attribute for DIRECTORY
:ivar bool archive:
Enable/disable 'Archive' attribute for DIRECTORY or FILE
:ivar bool temporary:
Enable/disable 'Temporary' attribute for FILE
:ivar bool offline:
Enable/disable 'Offline' attribute for DIRECTORY or FILE
:ivar bool not_content_indexed:
Enable/disable 'NotContentIndexed' attribute for DIRECTORY or FILE
:ivar bool no_scrub_data:
Enable/disable 'NoScrubData' attribute for DIRECTORY or FILE
"""
def __init__(self, read_only=False, hidden=False, system=False, none=False, directory=False, archive=False,
temporary=False, offline=False, not_content_indexed=False, no_scrub_data=False, _str=None):
if not _str:
_str = ''
self.read_only = read_only or ('ReadOnly' in _str)
self.hidden = hidden or ('Hidden' in _str)
self.system = system or ('System' in _str)
self.none = none or ('None' in _str)
self.directory = directory or ('Directory' in _str)
self.archive = archive or ('Archive' in _str)
self.temporary = temporary or ('Temporary' in _str)
self.offline = offline or ('Offline' in _str)
self.not_content_indexed = not_content_indexed or ('NotContentIndexed' in _str)
self.no_scrub_data = no_scrub_data or ('NoScrubData' in _str)
def __or__(self, other):
return NTFSAttributes(_str=str(self) + str(other))
def __add__(self, other):
return NTFSAttributes(_str=str(self) + str(other))
def __str__(self):
concatenated_params = (('ReadOnly|' if self.read_only else '') +
('Hidden|' if self.hidden else '') +
('System|' if self.system else '') +
('None|' if self.none else '') +
('Directory|' if self.directory else '') +
('Archive|' if self.archive else '') +
('Temporary|' if self.temporary else '') +
('Offline|' if self.offline else '') +
('NotContentIndexed|' if self.not_content_indexed else '') +
('NoScrubData|' if self.no_scrub_data else ''))
return concatenated_params.strip('|')
NTFSAttributes.READ_ONLY = NTFSAttributes(read_only=True)
NTFSAttributes.HIDDEN = NTFSAttributes(hidden=True)
NTFSAttributes.SYSTEM = NTFSAttributes(system=True)
NTFSAttributes.NONE = NTFSAttributes(none=True)
NTFSAttributes.DIRECTORY = NTFSAttributes(directory=True)
NTFSAttributes.ARCHIVE = NTFSAttributes(archive=True)
NTFSAttributes.TEMPORARY = NTFSAttributes(temporary=True)
NTFSAttributes.OFFLINE = NTFSAttributes(offline=True)
NTFSAttributes.NOT_CONTENT_INDEXED = NTFSAttributes(not_content_indexed=True)
NTFSAttributes.NO_SCRUB_DATA = NTFSAttributes(no_scrub_data=True)

Просмотреть файл

@ -1,229 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from azure.storage.common.sharedaccesssignature import (
SharedAccessSignature,
_SharedAccessHelper,
_QueryStringConstants,
_sign_string,
)
from azure.storage.common._common_conversion import (
_to_str,
)
from ._constants import X_MS_VERSION
class FileSharedAccessSignature(SharedAccessSignature):
'''
Provides a factory for creating file and share access
signature tokens with a common account name and account key. Users can either
use the factory or can construct the appropriate service and use the
generate_*_shared_access_signature method directly.
'''
def __init__(self, account_name, account_key):
'''
:param str account_name:
The storage account name used to generate the shared access signatures.
:param str account_key:
The access key to generate the shares access signatures.
'''
super(FileSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION)
def generate_file(self, share_name, directory_name=None, file_name=None,
permission=None, expiry=None, start=None, id=None,
ip=None, protocol=None, cache_control=None,
content_disposition=None, content_encoding=None,
content_language=None, content_type=None):
'''
Generates a shared access signature for the file.
Use the returned signature with the sas_token parameter of FileService.
:param str share_name:
Name of share.
:param str directory_name:
Name of directory. SAS tokens cannot be created for directories, so
this parameter should only be present if file_name is provided.
:param str file_name:
Name of file.
:param FilePermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered read, create, write, delete, list.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: datetime or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: datetime or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_file_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
:param str cache_control:
Response header value for Cache-Control when resource is accessed
using this shared access signature.
:param str content_disposition:
Response header value for Content-Disposition when resource is accessed
using this shared access signature.
:param str content_encoding:
Response header value for Content-Encoding when resource is accessed
using this shared access signature.
:param str content_language:
Response header value for Content-Language when resource is accessed
using this shared access signature.
:param str content_type:
Response header value for Content-Type when resource is accessed
using this shared access signature.
'''
resource_path = share_name
if directory_name is not None:
resource_path += '/' + _to_str(directory_name)
resource_path += '/' + _to_str(file_name)
sas = _FileSharedAccessHelper()
sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
sas.add_id(id)
sas.add_resource('f')
sas.add_override_response_headers(cache_control, content_disposition,
content_encoding, content_language,
content_type)
sas.add_resource_signature(self.account_name, self.account_key, resource_path)
return sas.get_token()
def generate_share(self, share_name, permission=None, expiry=None,
start=None, id=None, ip=None, protocol=None,
cache_control=None, content_disposition=None,
content_encoding=None, content_language=None,
content_type=None):
'''
Generates a shared access signature for the share.
Use the returned signature with the sas_token parameter of FileService.
:param str share_name:
Name of share.
:param SharePermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered read, create, write, delete, list.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: datetime or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: datetime or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_file_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
:param str cache_control:
Response header value for Cache-Control when resource is accessed
using this shared access signature.
:param str content_disposition:
Response header value for Content-Disposition when resource is accessed
using this shared access signature.
:param str content_encoding:
Response header value for Content-Encoding when resource is accessed
using this shared access signature.
:param str content_language:
Response header value for Content-Language when resource is accessed
using this shared access signature.
:param str content_type:
Response header value for Content-Type when resource is accessed
using this shared access signature.
'''
sas = _FileSharedAccessHelper()
sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
sas.add_id(id)
sas.add_resource('s')
sas.add_override_response_headers(cache_control, content_disposition,
content_encoding, content_language,
content_type)
sas.add_resource_signature(self.account_name, self.account_key, share_name)
return sas.get_token()
class _FileSharedAccessHelper(_SharedAccessHelper):
def __init__(self):
super(_FileSharedAccessHelper, self).__init__()
def add_resource_signature(self, account_name, account_key, path):
def get_value_to_append(query):
return_value = self.query_dict.get(query) or ''
return return_value + '\n'
if path[0] != '/':
path = '/' + path
canonicalized_resource = '/file/' + account_name + path + '\n'
# Form the string to sign from shared_access_policy and canonicalized
# resource. The order of values is important.
string_to_sign = \
(get_value_to_append(_QueryStringConstants.SIGNED_PERMISSION) +
get_value_to_append(_QueryStringConstants.SIGNED_START) +
get_value_to_append(_QueryStringConstants.SIGNED_EXPIRY) +
canonicalized_resource +
get_value_to_append(_QueryStringConstants.SIGNED_IDENTIFIER) +
get_value_to_append(_QueryStringConstants.SIGNED_IP) +
get_value_to_append(_QueryStringConstants.SIGNED_PROTOCOL) +
get_value_to_append(_QueryStringConstants.SIGNED_VERSION) +
get_value_to_append(_QueryStringConstants.SIGNED_CACHE_CONTROL) +
get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_DISPOSITION) +
get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_ENCODING) +
get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_LANGUAGE) +
get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_TYPE))
# remove the trailing newline
if string_to_sign[-1] == '\n':
string_to_sign = string_to_sign[:-1]
self._add_query(_QueryStringConstants.SIGNED_SIGNATURE,
_sign_string(account_key, string_to_sign))

Просмотреть файл

@ -1,14 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from .models import (
Queue,
QueueMessage,
QueuePermissions,
QueueMessageFormat,
)
from .queueservice import QueueService
from ._constants import __version__

Просмотреть файл

@ -1,11 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
__author__ = 'Microsoft Corp. <ptvshelp@microsoft.com>'
__version__ = '2.1.0'
# x-ms-version for storage service.
X_MS_VERSION = '2019-02-02'

Просмотреть файл

@ -1,150 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from dateutil import parser
try:
from xml.etree import cElementTree as ETree
except ImportError:
from xml.etree import ElementTree as ETree
from .models import (
Queue,
QueueMessage,
)
from azure.storage.common.models import (
_list,
)
from azure.storage.common._deserialization import (
_to_int,
_parse_metadata,
)
from ._encryption import (
_decrypt_queue_message,
)
def _parse_metadata_and_message_count(response):
'''
Extracts approximate messages count header.
'''
metadata = _parse_metadata(response)
metadata.approximate_message_count = _to_int(response.headers.get('x-ms-approximate-messages-count'))
return metadata
def _parse_queue_message_from_headers(response):
'''
Extracts pop receipt and time next visible from headers.
'''
message = QueueMessage()
message.pop_receipt = response.headers.get('x-ms-popreceipt')
message.time_next_visible = parser.parse(response.headers.get('x-ms-time-next-visible'))
return message
def _convert_xml_to_queues(response):
'''
<?xml version="1.0" encoding="utf-8"?>
<EnumerationResults ServiceEndpoint="https://myaccount.queue.core.windows.net/">
<Prefix>string-value</Prefix>
<Marker>string-value</Marker>
<MaxResults>int-value</MaxResults>
<Queues>
<Queue>
<Name>string-value</Name>
<Metadata>
<metadata-name>value</metadata-name>
</Metadata>
</Queue>
<NextMarker />
</EnumerationResults>
'''
if response is None or response.body is None:
return None
queues = _list()
list_element = ETree.fromstring(response.body)
# Set next marker
next_marker = list_element.findtext('NextMarker') or None
setattr(queues, 'next_marker', next_marker)
queues_element = list_element.find('Queues')
for queue_element in queues_element.findall('Queue'):
# Name element
queue = Queue()
queue.name = queue_element.findtext('Name')
# Metadata
metadata_root_element = queue_element.find('Metadata')
if metadata_root_element is not None:
queue.metadata = dict()
for metadata_element in metadata_root_element:
queue.metadata[metadata_element.tag] = metadata_element.text
# Add queue to list
queues.append(queue)
return queues
def _convert_xml_to_queue_messages(response, decode_function, require_encryption, key_encryption_key, resolver,
content=None):
'''
<?xml version="1.0" encoding="utf-8"?>
<QueueMessagesList>
<QueueMessage>
<MessageId>string-message-id</MessageId>
<InsertionTime>insertion-time</InsertionTime>
<ExpirationTime>expiration-time</ExpirationTime>
<PopReceipt>opaque-string-receipt-data</PopReceipt>
<TimeNextVisible>time-next-visible</TimeNextVisible>
<DequeueCount>integer</DequeueCount>
<MessageText>message-body</MessageText>
</QueueMessage>
</QueueMessagesList>
'''
if response is None or response.body is None:
return None
messages = list()
list_element = ETree.fromstring(response.body)
for message_element in list_element.findall('QueueMessage'):
message = QueueMessage()
message.id = message_element.findtext('MessageId')
dequeue_count = message_element.findtext('DequeueCount')
if dequeue_count is not None:
message.dequeue_count = _to_int(dequeue_count)
# content is not returned for put_message
if content is not None:
message.content = content
else:
message.content = message_element.findtext('MessageText')
if (key_encryption_key is not None) or (resolver is not None):
message.content = _decrypt_queue_message(message.content, require_encryption,
key_encryption_key, resolver)
message.content = decode_function(message.content)
message.insertion_time = parser.parse(message_element.findtext('InsertionTime'))
message.expiration_time = parser.parse(message_element.findtext('ExpirationTime'))
message.pop_receipt = message_element.findtext('PopReceipt')
time_next_visible = message_element.find('TimeNextVisible')
if time_next_visible is not None:
message.time_next_visible = parser.parse(time_next_visible.text)
# Add message to list
messages.append(message)
return messages

Просмотреть файл

@ -1,159 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
from json import (
dumps,
loads,
)
from azure.common import (
AzureException,
)
from cryptography.hazmat.primitives.padding import PKCS7
from azure.storage.common._common_conversion import (
_encode_base64,
_decode_base64_to_bytes
)
from azure.storage.common._encryption import (
_generate_encryption_data_dict,
_dict_to_encryption_data,
_generate_AES_CBC_cipher,
_validate_and_unwrap_cek,
_EncryptionAlgorithm,
)
from azure.storage.common._error import (
_ERROR_DECRYPTION_FAILURE,
_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM,
_validate_not_none,
_validate_key_encryption_key_wrap,
)
from ._error import (
_ERROR_MESSAGE_NOT_ENCRYPTED
)
def _encrypt_queue_message(message, key_encryption_key):
'''
Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding.
Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek).
Returns a json-formatted string containing the encrypted message and the encryption metadata.
:param object message:
The plain text messge to be encrypted.
:param object key_encryption_key:
The user-provided key-encryption-key. Must implement the following methods:
wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
get_kid()--returns a string key id for this key-encryption-key.
:return: A json-formatted string containing the encrypted message and the encryption metadata.
:rtype: str
'''
_validate_not_none('message', message)
_validate_not_none('key_encryption_key', key_encryption_key)
_validate_key_encryption_key_wrap(key_encryption_key)
# AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks
content_encryption_key = os.urandom(32)
initialization_vector = os.urandom(16)
# Queue encoding functions all return unicode strings, and encryption should
# operate on binary strings.
message = message.encode('utf-8')
cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector)
# PKCS7 with 16 byte blocks ensures compatibility with AES.
padder = PKCS7(128).padder()
padded_data = padder.update(message) + padder.finalize()
# Encrypt the data.
encryptor = cipher.encryptor()
encrypted_data = encryptor.update(padded_data) + encryptor.finalize()
# Build the dictionary structure.
queue_message = {'EncryptedMessageContents': _encode_base64(encrypted_data),
'EncryptionData': _generate_encryption_data_dict(key_encryption_key,
content_encryption_key,
initialization_vector)}
return dumps(queue_message)
def _decrypt_queue_message(message, require_encryption, key_encryption_key, resolver):
'''
Returns the decrypted message contents from an EncryptedQueueMessage.
If no encryption metadata is present, will return the unaltered message.
:param str message:
The JSON formatted QueueEncryptedMessage contents with all associated metadata.
:param bool require_encryption:
If set, will enforce that the retrieved messages are encrypted and decrypt them.
:param object key_encryption_key:
The user-provided key-encryption-key. Must implement the following methods:
unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm.
get_kid()--returns a string key id for this key-encryption-key.
:param function resolver(kid):
The user-provided key resolver. Uses the kid string to return a key-encryption-key implementing the interface defined above.
:return: The plain text message from the queue message.
:rtype: str
'''
try:
message = loads(message)
encryption_data = _dict_to_encryption_data(message['EncryptionData'])
decoded_data = _decode_base64_to_bytes(message['EncryptedMessageContents'])
except (KeyError, ValueError):
# Message was not json formatted and so was not encrypted
# or the user provided a json formatted message.
if require_encryption:
raise ValueError(_ERROR_MESSAGE_NOT_ENCRYPTED)
return message
try:
return _decrypt(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8')
except Exception:
raise AzureException(_ERROR_DECRYPTION_FAILURE)
def _decrypt(message, encryption_data, key_encryption_key=None, resolver=None):
'''
Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding.
Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). Returns the original plaintex.
:param str message:
The ciphertext to be decrypted.
:param _EncryptionData encryption_data:
The metadata associated with this ciphertext.
:param object key_encryption_key:
The user-provided key-encryption-key. Must implement the following methods:
unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm.
get_kid()--returns a string key id for this key-encryption-key.
:param function resolver(kid):
The user-provided key resolver. Uses the kid string to return a key-encryption-key implementing the interface defined above.
:return: The decrypted plaintext.
:rtype: str
'''
_validate_not_none('message', message)
content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver)
if not (_EncryptionAlgorithm.AES_CBC_256 == encryption_data.encryption_agent.encryption_algorithm):
raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM)
cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV)
# decrypt data
decrypted_data = message
decryptor = cipher.decryptor()
decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize())
# unpad data
unpadder = PKCS7(128).unpadder()
decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize())
return decrypted_data

Просмотреть файл

@ -1,27 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import sys
from azure.storage.common._error import (
_validate_type_bytes,
)
_ERROR_MESSAGE_SHOULD_BE_UNICODE = 'message should be of type unicode.'
_ERROR_MESSAGE_SHOULD_BE_STR = 'message should be of type str.'
_ERROR_MESSAGE_NOT_BASE64 = 'message is not a valid base64 value.'
_ERROR_MESSAGE_NOT_ENCRYPTED = 'Message was not encrypted.'
def _validate_message_type_text(param):
if sys.version_info < (3,):
if not isinstance(param, unicode):
raise TypeError(_ERROR_MESSAGE_SHOULD_BE_UNICODE)
else:
if not isinstance(param, str):
raise TypeError(_ERROR_MESSAGE_SHOULD_BE_STR)
def _validate_message_type_bytes(param):
_validate_type_bytes('message', param)

Просмотреть файл

@ -1,73 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import sys
if sys.version_info >= (3,):
from io import BytesIO
else:
try:
from cStringIO import StringIO as BytesIO
except:
from StringIO import StringIO as BytesIO
try:
from xml.etree import cElementTree as ETree
except ImportError:
from xml.etree import ElementTree as ETree
from azure.storage.common._common_conversion import (
_str,
)
from ._encryption import (
_encrypt_queue_message,
)
def _get_path(queue_name=None, include_messages=None, message_id=None):
'''
Creates the path to access a queue resource.
queue_name:
Name of queue.
include_messages:
Whether or not to include messages.
message_id:
Message id.
'''
if queue_name and include_messages and message_id:
return '/{0}/messages/{1}'.format(_str(queue_name), message_id)
if queue_name and include_messages:
return '/{0}/messages'.format(_str(queue_name))
elif queue_name:
return '/{0}'.format(_str(queue_name))
else:
return '/'
def _convert_queue_message_xml(message_text, encode_function, key_encryption_key):
'''
<?xml version="1.0" encoding="utf-8"?>
<QueueMessage>
<MessageText></MessageText>
</QueueMessage>
'''
queue_message_element = ETree.Element('QueueMessage')
# Enabled
message_text = encode_function(message_text)
if key_encryption_key is not None:
message_text = _encrypt_queue_message(message_text, key_encryption_key)
ETree.SubElement(queue_message_element, 'MessageText').text = message_text
# Add xml declaration and serialize
try:
stream = BytesIO()
ETree.ElementTree(queue_message_element).write(stream, xml_declaration=True, encoding='utf-8', method='xml')
output = stream.getvalue()
finally:
stream.close()
return output

Просмотреть файл

@ -1,239 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from base64 import (
b64encode,
b64decode,
)
from xml.sax.saxutils import escape as xml_escape
from xml.sax.saxutils import unescape as xml_unescape
from ._error import (
_validate_message_type_bytes,
_validate_message_type_text,
_ERROR_MESSAGE_NOT_BASE64,
)
class Queue(object):
'''
Queue class.
:ivar str name:
The name of the queue.
:ivar metadata:
A dict containing name-value pairs associated with the queue as metadata.
This var is set to None unless the include=metadata param was included
for the list queues operation. If this parameter was specified but the
queue has no metadata, metadata will be set to an empty dictionary.
:vartype metadata: dict(str, str)
'''
def __init__(self):
self.name = None
self.metadata = None
class QueueMessage(object):
'''
Queue message class.
:ivar str id:
A GUID value assigned to the message by the Queue service that
identifies the message in the queue. This value may be used together
with the value of pop_receipt to delete a message from the queue after
it has been retrieved with the get messages operation.
:ivar date insertion_time:
A UTC date value representing the time the messages was inserted.
:ivar date expiration_time:
A UTC date value representing the time the message expires.
:ivar int dequeue_count:
Begins with a value of 1 the first time the message is dequeued. This
value is incremented each time the message is subsequently dequeued.
:ivar obj content:
The message content. Type is determined by the decode_function set on
the service. Default is str.
:ivar str pop_receipt:
A receipt str which can be used together with the message_id element to
delete a message from the queue after it has been retrieved with the get
messages operation. Only returned by get messages operations. Set to
None for peek messages.
:ivar date time_next_visible:
A UTC date value representing the time the message will next be visible.
Only returned by get messages operations. Set to None for peek messages.
'''
def __init__(self):
self.id = None
self.insertion_time = None
self.expiration_time = None
self.dequeue_count = None
self.content = None
self.pop_receipt = None
self.time_next_visible = None
class QueueMessageFormat:
'''
Encoding and decoding methods which can be used to modify how the queue service
encodes and decodes queue messages. Set these to queueservice.encode_function
and queueservice.decode_function to modify the behavior. The defaults are
text_xmlencode and text_xmldecode, respectively.
'''
@staticmethod
def text_base64encode(data):
'''
Base64 encode unicode text.
:param str data: String to encode.
:return: Base64 encoded string.
:rtype: str
'''
_validate_message_type_text(data)
return b64encode(data.encode('utf-8')).decode('utf-8')
@staticmethod
def text_base64decode(data):
'''
Base64 decode to unicode text.
:param str data: String data to decode to unicode.
:return: Base64 decoded string.
:rtype: str
'''
try:
return b64decode(data.encode('utf-8')).decode('utf-8')
except (ValueError, TypeError):
# ValueError for Python 3, TypeError for Python 2
raise ValueError(_ERROR_MESSAGE_NOT_BASE64)
@staticmethod
def binary_base64encode(data):
'''
Base64 encode byte strings.
:param str data: Binary string to encode.
:return: Base64 encoded data.
:rtype: str
'''
_validate_message_type_bytes(data)
return b64encode(data).decode('utf-8')
@staticmethod
def binary_base64decode(data):
'''
Base64 decode to byte string.
:param str data: Data to decode to a byte string.
:return: Base64 decoded data.
:rtype: str
'''
try:
return b64decode(data.encode('utf-8'))
except (ValueError, TypeError):
# ValueError for Python 3, TypeError for Python 2
raise ValueError(_ERROR_MESSAGE_NOT_BASE64)
@staticmethod
def text_xmlencode(data):
'''
XML encode unicode text.
:param str data: Unicode string to encode
:return: XML encoded data.
:rtype: str
'''
_validate_message_type_text(data)
return xml_escape(data)
@staticmethod
def text_xmldecode(data):
'''
XML decode to unicode text.
:param str data: Data to decode to unicode.
:return: XML decoded data.
:rtype: str
'''
return xml_unescape(data)
@staticmethod
def noencode(data):
'''
Do no encoding.
:param str data: Data.
:return: The data passed in is returned unmodified.
:rtype: str
'''
return data
@staticmethod
def nodecode(data):
'''
Do no decoding.
:param str data: Data.
:return: The data passed in is returned unmodified.
:rtype: str
'''
return data
class QueuePermissions(object):
'''
QueuePermissions class to be used with :func:`~azure.storage.queue.queueservice.QueueService.generate_queue_shared_access_signature`
method and for the AccessPolicies used with :func:`~azure.storage.queue.queueservice.QueueService.set_queue_acl`.
:ivar QueuePermissions QueuePermissions.READ:
Read metadata and properties, including message count. Peek at messages.
:ivar QueuePermissions QueuePermissions.ADD:
Add messages to the queue.
:ivar QueuePermissions QueuePermissions.UPDATE:
Update messages in the queue. Note: Use the Process permission with
Update so you can first get the message you want to update.
:ivar QueuePermissions QueuePermissions.PROCESS: Delete entities.
Get and delete messages from the queue.
'''
def __init__(self, read=False, add=False, update=False, process=False, _str=None):
'''
:param bool read:
Read metadata and properties, including message count. Peek at messages.
:param bool add:
Add messages to the queue.
:param bool update:
Update messages in the queue. Note: Use the Process permission with
Update so you can first get the message you want to update.
:param bool process:
Get and delete messages from the queue.
:param str _str:
A string representing the permissions.
'''
if not _str:
_str = ''
self.read = read or ('r' in _str)
self.add = add or ('a' in _str)
self.update = update or ('u' in _str)
self.process = process or ('p' in _str)
def __or__(self, other):
return QueuePermissions(_str=str(self) + str(other))
def __add__(self, other):
return QueuePermissions(_str=str(self) + str(other))
def __str__(self):
return (('r' if self.read else '') +
('a' if self.add else '') +
('u' if self.update else '') +
('p' if self.process else ''))
QueuePermissions.READ = QueuePermissions(read=True)
QueuePermissions.ADD = QueuePermissions(add=True)
QueuePermissions.UPDATE = QueuePermissions(update=True)
QueuePermissions.PROCESS = QueuePermissions(process=True)

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,117 +0,0 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from azure.storage.common.sharedaccesssignature import (
SharedAccessSignature,
_SharedAccessHelper,
_QueryStringConstants,
_sign_string,
)
from ._constants import X_MS_VERSION
class QueueSharedAccessSignature(SharedAccessSignature):
'''
Provides a factory for creating queue shares access
signature tokens with a common account name and account key. Users can either
use the factory or can construct the appropriate service and use the
generate_*_shared_access_signature method directly.
'''
def __init__(self, account_name, account_key):
'''
:param str account_name:
The storage account name used to generate the shared access signatures.
:param str account_key:
The access key to generate the shares access signatures.
'''
super(QueueSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION)
def generate_queue(self, queue_name, permission=None,
expiry=None, start=None, id=None,
ip=None, protocol=None):
'''
Generates a shared access signature for the queue.
Use the returned signature with the sas_token parameter of QueueService.
:param str queue_name:
Name of queue.
:param QueuePermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered read, add, update, process.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: datetime or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: datetime or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_blob_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
'''
sas = _QueueSharedAccessHelper()
sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
sas.add_id(id)
sas.add_resource_signature(self.account_name, self.account_key, queue_name)
return sas.get_token()
class _QueueSharedAccessHelper(_SharedAccessHelper):
def __init__(self):
super(_QueueSharedAccessHelper, self).__init__()
def add_resource_signature(self, account_name, account_key, path):
def get_value_to_append(query):
return_value = self.query_dict.get(query) or ''
return return_value + '\n'
if path[0] != '/':
path = '/' + path
canonicalized_resource = '/queue/' + account_name + path + '\n'
# Form the string to sign from shared_access_policy and canonicalized
# resource. The order of values is important.
string_to_sign = \
(get_value_to_append(_QueryStringConstants.SIGNED_PERMISSION) +
get_value_to_append(_QueryStringConstants.SIGNED_START) +
get_value_to_append(_QueryStringConstants.SIGNED_EXPIRY) +
canonicalized_resource +
get_value_to_append(_QueryStringConstants.SIGNED_IDENTIFIER) +
get_value_to_append(_QueryStringConstants.SIGNED_IP) +
get_value_to_append(_QueryStringConstants.SIGNED_PROTOCOL) +
get_value_to_append(_QueryStringConstants.SIGNED_VERSION))
# remove the trailing newline
if string_to_sign[-1] == '\n':
string_to_sign = string_to_sign[:-1]
self._add_query(_QueryStringConstants.SIGNED_SIGNATURE,
_sign_string(account_key, string_to_sign))

Просмотреть файл

@ -8,7 +8,7 @@
from codecs import open
from setuptools import setup, find_packages
VERSION = "1.0.0b4"
VERSION = "1.0.0b5"
CLASSIFIERS = [
'Development Status :: 4 - Beta',