зеркало из
1
0
Форкнуть 0

Merge pull request #8 from DinoV/master

MergeDevToMaster
This commit is contained in:
Joost de Nijs 2012-06-06 16:12:16 -07:00
Родитель 10aef48af2 93ae7e6595
Коммит d2f9f481f0
39 изменённых файлов: 11016 добавлений и 144 удалений

233
README.md
Просмотреть файл

@ -45,204 +45,149 @@ the local Storage Emulator (with the exception of Service Bus features).
# Usage
## Table Storage
To ensure a table exists, call **createTableIfNotExists**:
To ensure a table exists, call **create_table**:
```Javascript
var tableService = azure.createTableService();
tableService.createTableIfNotExists('tasktable', function(error){
if(!error){
// Table exists
}
});
```
A new entity can be added by calling **insertEntity**:
```Javascript
var tableService = azure.createTableService(),
task1 = {
PartitionKey : 'tasksSeattle',
RowKey: '1',
Description: 'Take out the trash',
DueDate: new Date(2011, 12, 14, 12)
};
tableService.insertEntity('tasktable', task1, function(error){
if(!error){
// Entity inserted
}
});
```Python
from azure.storage import TableService
ts = TableService(account_name, account_key)
table = ts.create_table('tasktable')
```
The method **queryEntity** can then be used to fetch the entity that was just inserted:
A new entity can be added by calling **insert_entity**:
```Javascript
var tableService = azure.createTableService();
tableService.queryEntity('tasktable', 'tasksSeattle', '1', function(error, serverEntity){
if(!error){
// Entity available in serverEntity variable
```Python
ts = TableService(account_name, account_key)
table = ts.create_table('tasktable')
table.insert_entity(
'tasktable',
{
'PartitionKey' : 'tasksSeattle',
'RowKey': '1',
'Description': 'Take out the trash',
'DueDate': datetime(2011, 12, 14, 12)
}
});
)
```
The method **get_entity** can then be used to fetch the entity that was just inserted:
```Python
ts = TableService(account_name, account_key)
entity = ts.get_entity('tasktable', 'tasksSeattle', '1')
```
## Blob Storage
The **createContainerIfNotExists** method can be used to create a
The **create_container** method can be used to create a
container in which to store a blob:
```Javascript
var blobService = azure.createBlobService();
blobService.createContainerIfNotExists('taskcontainer', {publicAccessLevel : 'blob'}, function(error){
if(!error){
// Container exists and is public
}
});
```Python
from azure.storage import BlobService
blob_service = BlobService()
container = blob_service.create_container('taskcontainer')
```
To upload a file (assuming it is called task1-upload.txt, it contains the exact text "hello world" (no quotation marks), and it is placed in the same folder as the script below), the method **createBlockBlobFromStream** can be used:
To upload a file (assuming it is called task1-upload.txt, it contains the exact text "hello world" (no quotation marks), and it is placed in the same folder as the script below), the method **put_blob** can be used:
```Python
from azure.storage import BlobService
blob_service = BlobService(account_name, account_key)
blob_service.put_blob('taskcontainer', 'task1',
blobService = azure.createBlobService()
blobService.put_blob('taskcontainer', 'task1', file('task1-upload.txt').read())
```Javascript
var blobService = azure.createBlobService();
blobService.createBlockBlobFromStream('taskcontainer', 'task1', fs.createReadStream('task1-upload.txt'), 11, function(error){
if(!error){
// Blob uploaded
}
});
```
To download the blob and write it to the file system, the **getBlobToStream** method can be used:
To download the blob and write it to the file system, the **get_blob** method can be used:
```Javascript
var blobService = azure.createBlobService();
blobService.getBlobToStream('taskcontainer', 'task1', fs.createWriteStream('task1-download.txt'), function(error, serverBlob){
if(!error){
// Blob available in serverBlob.blob variable
}
});
```Python
from azure.storage import BlobService
blob_service = BlobService(account_name, account_key)
blob = blob_service.get_blob('taskcontainer', 'task1')
```
## Storage Queues
The **createQueueIfNotExists** method can be used to ensure a queue exists:
The **create_queue** method can be used to ensure a queue exists:
```Javascript
var queueService = azure.createQueueService();
queueService.createQueueIfNotExists('taskqueue', function(error){
if(!error){
// Queue exists
}
});
```Python
from azure.storage import QueueService
queue_service = QueueService(account_name, account_key)
queue = queue_service.create_queue('taskqueue')
```
The **createMessage** method can then be called to insert the message into the queue:
The **put_message** method can then be called to insert the message into the queue:
```Javascript
var queueService = azure.createQueueService();
queueService.createMessage('taskqueue', "Hello world!", function(error){
if(!error){
// Message inserted
}
});
```Python
from azure.storage import QueueService
queue_service = QueueService(account_name, account_key)
queue_service.put_message('taskqueue', 'Hello world!')
```
It is then possible to call the **getMessage** method, process the message and then call **deleteMessage** inside the callback. This two-step process ensures messages don't get lost when they are removed from the queue.
It is then possible to call the **get___messages** method, process the message and then call **delete_message** on the messages ID. This two-step process ensures messages don't get lost when they are removed from the queue.
```Javascript
var queueService = azure.createQueueService(),
queueName = 'taskqueue';
queueService.getMessages(queueName, function(error, serverMessages){
if(!error){
// Process the message in less than 30 seconds, the message
// text is available in serverMessages[0].messagetext
queueService.deleteMessage(queueName, serverMessages[0].messageid, serverMessages[0].popreceipt, function(error){
if(!error){
// Message deleted
}
});
}
});
```Python
from azure.storage import QueueService
queue_service = QueueService(account_name, account_key)
messages = queue_service.get_messages('taskqueue')
queue_service.delete_message('taskqueue', messages[0].message_id)
```
## ServiceBus Queues
ServiceBus Queues are an alternative to Storage Queues that might be useful in scenarios where more advanced messaging features are needed (larger message sizes, message ordering, single-operaiton destructive reads, scheduled delivery) using push-style delivery (using long polling).
The **createQueueIfNotExists** method can be used to ensure a queue exists:
The **create_queue** method can be used to ensure a queue exists:
```Javascript
var serviceBusService = azure.createServiceBusService();
serviceBusService.createQueueIfNotExists('taskqueue', function(error){
if(!error){
// Queue exists
}
});
```Python
from azure.servicebus import ServiceBusService
sbs = ServiceBusService(service_namespace, account_key)
queue = sbs.create_queue('taskqueue');
```
The **sendQueueMessage** method can then be called to insert the message into the queue:
The **send__queue__message** method can then be called to insert the message into the queue:
```Javascript
var serviceBusService = azure.createServiceBusService();
serviceBusService.sendQueueMessage('taskqueue', 'Hello world!', function(
if(!error){
// Message sent
}
});
```Python
from azure.servicebus import ServiceBusService
sbs = ServiceBusService(service_namespace, account_key)
sbs.send_queue_message('taskqueue', 'Hello World!')
```
It is then possible to call the **receiveQueueMessage** method to dequeue the message.
It is then possible to call the **read__delete___queue__message** method to dequeue the message.
```Javascript
var serviceBusService = azure.createServiceBusService();
serviceBusService.receiveQueueMessage('taskqueue', function(error, serverMessage){
if(!error){
// Process the message
}
});
```Python
from azure.servicebus import ServiceBusService
sbs = ServiceBusService(service_namespace, account_key)
msg = sbs.read_delete_queue_message('taskqueue')
```
## ServiceBus Topics
ServiceBus topics are an abstraction on top of ServiceBus Queues that make pub/sub scenarios easy to implement.
The **createTopicIfNotExists** method can be used to create a server-side topic:
The **create_topic** method can be used to create a server-side topic:
```Javascript
var serviceBusService = azure.createServiceBusService();
serviceBusService.createTopicIfNotExists('taskdiscussion', function(error){
if(!error){
// Topic exists
}
});
```Python
from azure.servicebus import ServiceBusService
sbs = ServiceBusService(service_namespace, account_key)
topic = sbs.create_topic('taskdiscussion')
```
The **sendTopicMessage** method can be used to send a message to a topic:
The **send__topic__message** method can be used to send a message to a topic:
```Javascript
var serviceBusService = azure.createServiceBusService();
serviceBusService.sendTopicMessage('taskdiscussion', 'Hello world!', function(error){
if(!error){
// Message sent
}
});
```Python
from azure.servicebus import ServiceBusService
sbs = ServiceBusService(service_namespace, account_key)
sbs.send_topic_message('taskdiscussion', 'Hello world!')
```
A client can then create a subscription and start consuming messages by calling the **createSubscription** method followed by the **receiveSubscriptionMessage** method. Please note that any messages sent before the subscription is created will not be received.
A client can then create a subscription and start consuming messages by calling the **create__subscription** method followed by the **receive__subscription__message** method. Please note that any messages sent before the subscription is created will not be received.
```Javascript
var serviceBusService = azure.createServiceBusService(),
topic = 'taskdiscussion',
subscription = 'client1';
serviceBusService.createSubscription(topic, subscription, function(error1){
if(!error1){
// Subscription created
serviceBusService.receiveSubscriptionMessage(topic, subscription, function(error2, serverMessage){
if(!error2){
// Process message
}
});
}
});
```Python
from azure.servicebus import ServiceBusService
sbs = ServiceBusService(service_namespace, account_key)
sbs.create_subscription('taskdiscussion', 'client1')
msg = sbs.receive_subscription_message('taskdiscussion', 'client1')
```
** For more examples please see the [Windows Azure Python Developer Center](http://www.windowsazure.com/en-us/develop/python) **

61
src/azure.pyproj Normal file
Просмотреть файл

@ -0,0 +1,61 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup>
<Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
<SchemaVersion>2.0</SchemaVersion>
<ProjectGuid>{25b2c65a-0553-4452-8907-8b5b17544e68}</ProjectGuid>
<ProjectHome>
</ProjectHome>
<StartupFile>
</StartupFile>
<SearchPath>..</SearchPath>
<WorkingDirectory>.</WorkingDirectory>
<OutputPath>.</OutputPath>
<Name>azure</Name>
<RootNamespace>azure</RootNamespace>
<IsWindowsApplication>False</IsWindowsApplication>
<LaunchProvider>Standard Python launcher</LaunchProvider>
<CommandLineArguments />
<InterpreterPath />
<InterpreterArguments />
<InterpreterId>2af0f10d-7135-4994-9156-5d01c9c11b7e</InterpreterId>
<InterpreterVersion>2.7</InterpreterVersion>
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)' == 'Debug' ">
<DebugSymbols>true</DebugSymbols>
<EnableUnmanagedDebugging>false</EnableUnmanagedDebugging>
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)' == 'Release' ">
<DebugSymbols>true</DebugSymbols>
<EnableUnmanagedDebugging>false</EnableUnmanagedDebugging>
</PropertyGroup>
<ItemGroup>
<Compile Include="azure\http\batchclient.py" />
<Compile Include="azure\http\httpclient.py" />
<Compile Include="azure\http\winhttp.py" />
<Compile Include="azure\http\__init__.py" />
<Compile Include="azure\servicebus\servicebusservice.py" />
<Compile Include="azure\storage\blobservice.py" />
<Compile Include="azure\storage\queueservice.py" />
<Compile Include="azure\storage\cloudstorageaccount.py" />
<Compile Include="azure\storage\tableservice.py" />
<Compile Include="azure\storage\sharedaccesssignature.py" />
<Compile Include="azure\__init__.py" />
<Compile Include="azure\servicebus\__init__.py" />
<Compile Include="azure\storage\storageclient.py" />
<Compile Include="azure\storage\__init__.py" />
</ItemGroup>
<ItemGroup>
<Folder Include="azure\http" />
<Folder Include="azure\tests\" />
<Folder Include="azure\servicebus\" />
<Folder Include="azure\storage" />
</ItemGroup>
<ItemGroup>
<Content Include="build.bat" />
<Content Include="install.bat" />
<Content Include="installfrompip.bat" />
<Content Include="upload.bat" />
</ItemGroup>
<Import Project="$(MSBuildToolsPath)\Microsoft.Common.targets" />
</Project>

22
src/azure.sln Normal file
Просмотреть файл

@ -0,0 +1,22 @@

Microsoft Visual Studio Solution File, Format Version 11.00
# Visual Studio 2010
Project("{888888A0-9F3D-457C-B088-3A5042F75D52}") = "azure", "azure.pyproj", "{25B2C65A-0553-4452-8907-8B5B17544E68}"
EndProject
Project("{888888A0-9F3D-457C-B088-3A5042F75D52}") = "azuretest", "..\test\azuretest.pyproj", "{C0742A2D-4862-40E4-8A28-036EECDBC614}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU
Release|Any CPU = Release|Any CPU
EndGlobalSection
GlobalSection(ProjectConfigurationPlatforms) = postSolution
{25B2C65A-0553-4452-8907-8B5B17544E68}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{25B2C65A-0553-4452-8907-8B5B17544E68}.Release|Any CPU.ActiveCfg = Release|Any CPU
{C0742A2D-4862-40E4-8A28-036EECDBC614}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{C0742A2D-4862-40E4-8A28-036EECDBC614}.Release|Any CPU.ActiveCfg = Release|Any CPU
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
EndGlobalSection
EndGlobal

506
src/azure/__init__.py Normal file
Просмотреть файл

@ -0,0 +1,506 @@
#-------------------------------------------------------------------------
# Copyright 2011 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import types
from datetime import datetime
from xml.dom import minidom
import base64
import urllib2
import ast
from xml.sax.saxutils import escape as xml_escape
#--------------------------------------------------------------------------
# constants
#Live ServiceClient URLs
BLOB_SERVICE_HOST_BASE = '.blob.core.windows.net'
QUEUE_SERVICE_HOST_BASE = '.queue.core.windows.net'
TABLE_SERVICE_HOST_BASE = '.table.core.windows.net'
SERVICE_BUS_HOST_BASE = '.servicebus.windows.net'
#Development ServiceClient URLs
DEV_BLOB_HOST = '127.0.0.1:10000'
DEV_QUEUE_HOST = '127.0.0.1:10001'
DEV_TABLE_HOST = '127.0.0.1:10002'
#Default credentials for Development Storage Service
DEV_ACCOUNT_NAME = 'devstoreaccount1'
DEV_ACCOUNT_KEY = 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=='
# All of our error messages
_ERROR_CANNOT_FIND_PARTITION_KEY = 'Cannot find partition key in request.'
_ERROR_CANNOT_FIND_ROW_KEY = 'Cannot find row key in request.'
_ERROR_INCORRECT_TABLE_IN_BATCH = 'Table should be the same in a batch operations'
_ERROR_INCORRECT_PARTITION_KEY_IN_BATCH = 'Partition Key should be the same in a batch operations'
_ERROR_DUPLICATE_ROW_KEY_IN_BATCH = 'Partition Key should be the same in a batch operations'
_ERROR_BATCH_COMMIT_FAIL = 'Batch Commit Fail'
_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_DELETE = 'Message is not peek locked and cannot be deleted.'
_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_UNLOCK = 'Message is not peek locked and cannot be unlocked.'
_ERROR_QUEUE_NOT_FOUND = 'Queue is not Found'
_ERROR_TOPIC_NOT_FOUND = 'Topic is not Found'
_ERROR_CONFLICT = 'Conflict'
_ERROR_NOT_FOUND = 'Not found'
_ERROR_UNKNOWN = 'Unknown error (%s)'
_ERROR_SERVICEBUS_MISSING_INFO = 'You need to provide servicebus namespace, access key and Issuer'
_ERROR_STORAGE_MISSING_INFO = 'You need to provide both account name and access key'
_ERROR_ACCESS_POLICY = 'share_access_policy must be either SignedIdentifier or AccessPolicy instance'
_ERROR_VALUE_SHOULD_NOT_BE_NULL = '%s should not be None.'
_ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY = 'Cannot serialize the specified value (%s) to an entity. Please use an EntityProperty (which can specify custom types), int, str, bool, or datetime'
class WindowsAzureData(object):
''' This is the base of data class. It is only used to check whether it is instance or not. '''
pass
class WindowsAzureError(Exception):
''' WindowsAzure Excpetion base class. '''
def __init__(self, message):
Exception.__init__(self, message)
class WindowsAzureConflictError(WindowsAzureError):
'''Indicates that the resource could not be created because it already
exists'''
def __init__(self, message):
self.message = message
class WindowsAzureMissingResourceError(WindowsAzureError):
'''Indicates that a request for a request for a resource (queue, table,
container, etc...) failed because the specified resource does not exist'''
def __init__(self, message):
self.message = message
class Feed:
def __init__(self, type):
self.type = type
def _get_readable_id(id_name):
"""simplified an id to be more friendly for us people"""
pos = id_name.rfind('/')
if pos != -1:
return id_name[pos+1:]
else:
return id_name
def _get_entry_properties(xmlstr, include_id):
''' get properties from entry xml '''
xmldoc = minidom.parseString(xmlstr)
properties = {}
for entry in _get_child_nodes(xmldoc, 'entry'):
for updated in _get_child_nodes(entry, 'updated'):
properties['updated'] = updated.firstChild.nodeValue
for name in _get_children_from_path(entry, 'author', 'name'):
if name.firstChild is not None:
properties['author'] = name.firstChild.nodeValue
if include_id:
for id in _get_child_nodes(entry, 'id'):
properties['name'] = _get_readable_id(id.firstChild.nodeValue)
return properties
def _get_child_nodes(node, tagName):
return [childNode for childNode in node.getElementsByTagName(tagName)
if childNode.parentNode == node]
def _get_children_from_path(node, *path):
'''descends through a hierarchy of nodes returning the list of children
at the inner most level. Only returns children who share a common parent,
not cousins.'''
cur = node
for index, child in enumerate(path):
if isinstance(child, basestring):
next = _get_child_nodes(cur, child)
else:
next = _get_child_nodesNS(cur, *child)
if index == len(path) - 1:
return next
elif not next:
break
cur = next[0]
return []
def _get_child_nodesNS(node, ns, tagName):
return [childNode for childNode in node.getElementsByTagNameNS(ns, tagName)
if childNode.parentNode == node]
def _create_entry(entry_body):
''' Adds common part of entry to a given entry body and return the whole xml. '''
updated_str = datetime.utcnow().isoformat()
if datetime.utcnow().utcoffset() is None:
updated_str += '+00:00'
entry_start = '''<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<entry xmlns:d="http://schemas.microsoft.com/ado/2007/08/dataservices" xmlns:m="http://schemas.microsoft.com/ado/2007/08/dataservices/metadata" xmlns="http://www.w3.org/2005/Atom">
<title /><updated>{updated}</updated><author><name /></author><id />
<content type="application/xml">
{body}</content></entry>'''
return entry_start.format(updated=updated_str, body=entry_body)
def _to_datetime(strtime):
return datetime.strptime(strtime, "%Y-%m-%dT%H:%M:%S.%f")
_KNOWN_SERIALIZATION_XFORMS = {'include_apis':'IncludeAPIs',
'message_id': 'MessageId',
'content_md5':'Content-MD5',
'last_modified': 'Last-Modified',
'cache_control': 'Cache-Control',
}
def _get_serialization_name(element_name):
"""converts a Python name into a serializable name"""
known = _KNOWN_SERIALIZATION_XFORMS.get(element_name)
if known is not None:
return known
if element_name.startswith('x_ms_'):
return element_name.replace('_', '-')
if element_name.endswith('_id'):
element_name = element_name.replace('_id', 'ID')
for name in ['content_', 'last_modified', 'if_', 'cache_control']:
if element_name.startswith(name):
element_name = element_name.replace('_', '-_')
return ''.join(name.capitalize() for name in element_name.split('_'))
def _str_or_none(value):
if value is None:
return None
return str(value)
def _int_or_none(value):
if value is None:
return None
return str(int(value))
def _convert_class_to_xml(source, xml_prefix = True):
if source is None:
return ''
xmlstr = ''
if xml_prefix:
xmlstr = '<?xml version="1.0" encoding="utf-8"?>'
if isinstance(source, list):
for value in source:
xmlstr += _convert_class_to_xml(value, False)
elif isinstance(source, WindowsAzureData):
class_name = source.__class__.__name__
xmlstr += '<' + class_name + '>'
for name, value in vars(source).iteritems():
if value is not None:
if isinstance(value, list) or isinstance(value, WindowsAzureData):
xmlstr += _convert_class_to_xml(value, False)
else:
xmlstr += ('<' + _get_serialization_name(name) + '>' +
xml_escape(str(value)) + '</' +
_get_serialization_name(name) + '>')
xmlstr += '</' + class_name + '>'
return xmlstr
def _find_namespaces_from_child(parent, child, namespaces):
"""Recursively searches from the parent to the child,
gathering all the applicable namespaces along the way"""
for cur_child in parent.childNodes:
if cur_child is child:
return True
if _find_namespaces_from_child(cur_child, child, namespaces):
# we are the parent node
for key in cur_child.attributes.keys():
if key.startswith('xmlns:') or key == 'xmlns':
namespaces[key] = cur_child.attributes[key]
break
return False
def _find_namespaces(parent, child):
res = {}
for key in parent.documentElement.attributes.keys():
if key.startswith('xmlns:') or key == 'xmlns':
res[key] = parent.documentElement.attributes[key]
_find_namespaces_from_child(parent, child, res)
return res
def _clone_node_with_namespaces(node_to_clone, original_doc):
clone = node_to_clone.cloneNode(True)
for key, value in _find_namespaces(original_doc, node_to_clone).iteritems():
clone.attributes[key] = value
return clone
def _convert_response_to_feeds(response, convert_func):
feeds = []
xmldoc = minidom.parseString(response.body)
for xml_entry in _get_children_from_path(xmldoc, 'feed', 'entry'):
new_node = _clone_node_with_namespaces(xml_entry, xmldoc)
feeds.append(convert_func(new_node.toxml()))
return feeds
def _validate_not_none(param_name, param):
if param is None:
raise TypeError(_ERROR_VALUE_SHOULD_NOT_BE_NULL % (param_name))
def _html_encode(html):
ch_map = (('&', '&amp;'), ('<', '&lt;'), ('>', '&gt;'), ('"', '&quot'), ('\'', '&apos'))
for name, value in ch_map:
html = html.replace(name, value)
return html
def _fill_list_of(xmldoc, element_type):
xmlelements = _get_child_nodes(xmldoc, element_type.__name__)
return [_parse_response_body(xmlelement.toxml(), element_type) for xmlelement in xmlelements]
def _fill_instance_child(xmldoc, element_name, return_type):
'''Converts a child of the current dom element to the specified type. The child name
'''
xmlelements = _get_child_nodes(xmldoc, _get_serialization_name(element_name))
if not xmlelements:
return None
return _fill_instance_element(xmlelements[0], return_type)
def _fill_instance_element(element, return_type):
"""Converts a DOM element into the specified object"""
return _parse_response_body(element.toxml(), return_type)
def _fill_data_minidom(xmldoc, element_name, data_member):
xmlelements = _get_child_nodes(xmldoc, _get_serialization_name(element_name))
if not xmlelements or not xmlelements[0].childNodes:
return None
value = xmlelements[0].firstChild.nodeValue
if data_member is None:
return value
elif isinstance(data_member, datetime):
return _to_datetime(value)
elif type(data_member) is types.BooleanType:
return value.lower() != 'false'
else:
return type(data_member)(value)
def _get_request_body(request_body):
'''Converts an object into a request body. If it's None
we'll return an empty string, if it's one of our objects it'll
convert it to XML and return it. Otherwise we just use the object
directly'''
if request_body is None:
return ''
elif isinstance(request_body, WindowsAzureData):
return _convert_class_to_xml(request_body)
return request_body
def _parse_enum_results_list(response, return_type, resp_type, item_type):
"""resp_body is the XML we received
resp_type is a string, such as Containers,
return_type is the type we're constructing, such as ContainerEnumResults
item_type is the type object of the item to be created, such as Container
This function then returns a ContainerEnumResults object with the
containers member populated with the results.
"""
# parsing something like:
# <EnumerationResults ... >
# <Queues>
# <Queue>
# <Something />
# <SomethingElse />
# </Queue>
# </Queues>
# </EnumerationResults>
respbody = response.body
return_obj = return_type()
doc = minidom.parseString(respbody)
items = []
for enum_results in _get_child_nodes(doc, 'EnumerationResults'):
# path is something like Queues, Queue
for child in _get_children_from_path(enum_results, resp_type, resp_type[:-1]):
items.append(_fill_instance_element(child, item_type))
for name, value in vars(return_obj).iteritems():
if name == resp_type.lower(): # queues, Queues, this is the list its self which we populated above
# the list its self.
continue
value = _fill_data_minidom(enum_results, name, value)
if value is not None:
setattr(return_obj, name, value)
setattr(return_obj, resp_type.lower(), items)
return return_obj
def _parse_simple_list(response, type, item_type, list_name):
respbody = response.body
res = type()
res_items = []
doc = minidom.parseString(respbody)
type_name = type.__name__
item_name = item_type.__name__
for item in _get_children_from_path(doc, type_name, item_name):
res_items.append(_fill_instance_element(item, item_type))
setattr(res, list_name, res_items)
return res
def _parse_response(response, return_type):
'''
parse the HTTPResponse's body and fill all the data into a class of return_type
'''
return _parse_response_body(response.body, return_type)
def _parse_response_body(respbody, return_type):
'''
parse the xml and fill all the data into a class of return_type
'''
doc = minidom.parseString(respbody)
return_obj = return_type()
for node in _get_child_nodes(doc, return_type.__name__):
for name, value in vars(return_obj).iteritems():
if isinstance(value, _list_of):
setattr(return_obj, name, _fill_list_of(node, value.list_type))
elif isinstance(value, WindowsAzureData):
setattr(return_obj, name, _fill_instance_child(node, name, value.__class__))
else:
value = _fill_data_minidom(node, name, value)
if value is not None:
setattr(return_obj, name, value)
return return_obj
class _list_of(list):
"""a list which carries with it the type that's expected to go in it.
Used for deserializaion and construction of the lists"""
def __init__(self, list_type):
self.list_type = list_type
def _update_request_uri_query_local_storage(request, use_local_storage):
''' create correct uri and query for the request '''
uri, query = _update_request_uri_query(request)
if use_local_storage:
return '/' + DEV_ACCOUNT_NAME + uri, query
return uri, query
def _update_request_uri_query(request):
'''pulls the query string out of the URI and moves it into
the query portion of the request object. If there are already
query parameters on the request the parameters in the URI will
appear after the existing parameters'''
if '?' in request.path:
pos = request.path.find('?')
query_string = request.path[pos+1:]
request.path = request.path[:pos]
if query_string:
query_params = query_string.split('&')
for query in query_params:
if '=' in query:
pos = query.find('=')
name = query[:pos]
value = query[pos+1:]
request.query.append((name, value))
request.path = urllib2.quote(request.path, '/()$=\',')
#add encoded queries to request.path.
if request.query:
request.path += '?'
for name, value in request.query:
if value is not None:
request.path += name + '=' + urllib2.quote(value, '/()$=\',') + '&'
request.path = request.path[:-1]
return request.path, request.query
def _dont_fail_on_exist(error):
''' don't throw exception if the resource exists. This is called by create_* APIs with fail_on_exist=False'''
if isinstance(error, WindowsAzureConflictError):
return False
else:
raise error
def _dont_fail_not_exist(error):
''' don't throw exception if the resource doesn't exist. This is called by create_* APIs with fail_on_exist=False'''
if isinstance(error, WindowsAzureMissingResourceError):
return False
else:
raise error
def _parse_response_for_dict(response):
''' Extracts name-values from response header. Filter out the standard http headers.'''
http_headers = ['server', 'date', 'location', 'host',
'via', 'proxy-connection', 'x-ms-version', 'connection',
'content-length']
return_dict = {}
if response.headers:
for name, value in response.headers:
if not name.lower() in http_headers:
return_dict[name] = value
return return_dict
def _parse_response_for_dict_prefix(response, prefix):
''' Extracts name-values for names starting with prefix from response header. Filter out the standard http headers.'''
return_dict = {}
orig_dict = _parse_response_for_dict(response)
if orig_dict:
for name, value in orig_dict.iteritems():
for prefix_value in prefix:
if name.lower().startswith(prefix_value.lower()):
return_dict[name] = value
break
return return_dict
else:
return None
def _parse_response_for_dict_filter(response, filter):
''' Extracts name-values for names in filter from response header. Filter out the standard http headers.'''
return_dict = {}
orig_dict = _parse_response_for_dict(response)
if orig_dict:
for name, value in orig_dict.iteritems():
if name.lower() in filter:
return_dict[name] = value
return return_dict
else:
return None
def _get_table_host(account_name, use_local_storage=False):
''' Gets service host base on the service type and whether it is using local storage. '''
if use_local_storage:
return DEV_TABLE_HOST
else:
return account_name + TABLE_SERVICE_HOST_BASE
def _get_queue_host(account_name, use_local_storage=False):
if use_local_storage:
return DEV_QUEUE_HOST
else:
return account_name + QUEUE_SERVICE_HOST_BASE
def _get_blob_host(account_name, use_local_storage=False):
if use_local_storage:
return DEV_BLOB_HOST
else:
return account_name + BLOB_SERVICE_HOST_BASE

Просмотреть файл

@ -0,0 +1,64 @@
#-------------------------------------------------------------------------
# Copyright 2011 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
class HTTPError(Exception):
''' HTTP Exception when response status code >= 300 '''
def __init__(self, status, message, respheader, respbody):
'''Creates a new HTTPError with the specified status, message,
response headers and body'''
self.message = message
self.status = status
self.respheader = respheader
self.respbody = respbody
class HTTPResponse(object):
"""Represents a response from an HTTP request. An HTTPResponse has the
following attributes:
status: the status code of the response
message: the message
headers: the returned headers, as a list of (name, value) pairs
body: the body of the response
"""
def __init__(self, status, message, headers, body):
self.status = status
self.message = message
self.headers = headers
self.body = body
class HTTPRequest:
'''Represents an HTTP Request. An HTTP Request consists of the following attributes:
host: the host name to connect to
method: the method to use to connect (string such as GET, POST, PUT, etc...)
path: the uri fragment
query: query parameters specified as a list of (name, value) pairs
headers: header values specified as (name, value) pairs
body: the body of the request.
'''
def __init__(self):
self.host = ''
self.method = ''
self.path = ''
self.query = [] # list of (name, value)
self.headers = [] # list of (header name, header value)
self.body = ''

Просмотреть файл

@ -0,0 +1,237 @@
#-------------------------------------------------------------------------
# Copyright 2011 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import urllib2
import azure
from azure.http.httpclient import _HTTPClient
from azure.http import HTTPError, HTTPRequest
from azure import _update_request_uri_query, WindowsAzureError, _get_children_from_path
from azure.storage import _update_storage_table_header, METADATA_NS, _sign_storage_table_request
from xml.dom import minidom
_DATASERVICES_NS = 'http://schemas.microsoft.com/ado/2007/08/dataservices'
class _BatchClient(_HTTPClient):
'''
This is the class that is used for batch operation for storage table service.
It only supports one changeset.
'''
def __init__(self, service_instance, account_key, account_name, x_ms_version=None, protocol='http'):
_HTTPClient.__init__(self, service_instance, account_name=account_name, account_key=account_key, x_ms_version=x_ms_version, protocol=protocol)
self.is_batch = False
self.batch_requests = []
self.batch_table = ''
self.batch_partition_key = ''
self.batch_row_keys = []
def get_request_table(self, request):
'''
Extracts table name from request.uri. The request.uri has either "/mytable(...)"
or "/mytable" format.
request: the request to insert, update or delete entity
'''
if '(' in request.path:
pos = request.path.find('(')
return request.path[1:pos]
else:
return request.path[1:]
def get_request_partition_key(self, request):
'''
Extracts PartitionKey from request.body if it is a POST request or from request.path if
it is not a POST request. Only insert operation request is a POST request and the
PartitionKey is in the request body.
request: the request to insert, update or delete entity
'''
if request.method == 'POST':
doc = minidom.parseString(request.body)
part_key = _get_children_from_path(doc, 'entry', 'content', (METADATA_NS, 'properties'), (_DATASERVICES_NS, 'PartitionKey'))
if not part_key:
raise WindowsAzureError(azure._ERROR_CANNOT_FIND_PARTITION_KEY)
return part_key[0].firstChild.nodeValue
else:
uri = urllib2.unquote(request.path)
pos1 = uri.find('PartitionKey=\'')
pos2 = uri.find('\',', pos1)
if pos1 == -1 or pos2 == -1:
raise WindowsAzureError(azure._ERROR_CANNOT_FIND_PARTITION_KEY)
return uri[pos1 + len('PartitionKey=\''):pos2]
def get_request_row_key(self, request):
'''
Extracts RowKey from request.body if it is a POST request or from request.path if
it is not a POST request. Only insert operation request is a POST request and the
Rowkey is in the request body.
request: the request to insert, update or delete entity
'''
if request.method == 'POST':
doc = minidom.parseString(request.body)
row_key = _get_children_from_path(doc, 'entry', 'content', (METADATA_NS, 'properties'), (_DATASERVICES_NS, 'RowKey'))
if not row_key:
raise WindowsAzureError(azure._ERROR_CANNOT_FIND_ROW_KEY)
return row_key[0].firstChild.nodeValue
else:
uri = urllib2.unquote(request.path)
pos1 = uri.find('RowKey=\'')
pos2 = uri.find('\')', pos1)
if pos1 == -1 or pos2 == -1:
raise WindowsAzureError(azure._ERROR_CANNOT_FIND_ROW_KEY)
row_key = uri[pos1 + len('RowKey=\''):pos2]
return row_key
def validate_request_table(self, request):
'''
Validates that all requests have the same table name. Set the table name if it is
the first request for the batch operation.
request: the request to insert, update or delete entity
'''
if self.batch_table:
if self.get_request_table(request) != self.batch_table:
raise WindowsAzureError(azure._ERROR_INCORRECT_TABLE_IN_BATCH)
else:
self.batch_table = self.get_request_table(request)
def validate_request_partition_key(self, request):
'''
Validates that all requests have the same PartitiionKey. Set the PartitionKey if it is
the first request for the batch operation.
request: the request to insert, update or delete entity
'''
if self.batch_partition_key:
if self.get_request_partition_key(request) != self.batch_partition_key:
raise WindowsAzureError(azure._ERROR_INCORRECT_PARTITION_KEY_IN_BATCH)
else:
self.batch_partition_key = self.get_request_partition_key(request)
def validate_request_row_key(self, request):
'''
Validates that all requests have the different RowKey and adds RowKey to existing RowKey list.
request: the request to insert, update or delete entity
'''
if self.batch_row_keys:
if self.get_request_row_key(request) in self.batch_row_keys:
raise WindowsAzureError(azure._ERROR_DUPLICATE_ROW_KEY_IN_BATCH)
else:
self.batch_row_keys.append(self.get_request_row_key(request))
def begin_batch(self):
'''
Starts the batch operation. Intializes the batch variables
is_batch: batch operation flag.
batch_table: the table name of the batch operation
batch_partition_key: the PartitionKey of the batch requests.
batch_row_keys: the RowKey list of adding requests.
batch_requests: the list of the requests.
'''
self.is_batch = True
self.batch_table = ''
self.batch_partition_key = ''
self.batch_row_keys = []
self.batch_requests = []
def insert_request_to_batch(self, request):
'''
Adds request to batch operation.
request: the request to insert, update or delete entity
'''
self.validate_request_table(request)
self.validate_request_partition_key(request)
self.validate_request_row_key(request)
self.batch_requests.append(request)
def commit_batch(self):
''' Resets batch flag and commits the batch requests. '''
if self.is_batch:
self.is_batch = False
self.commit_batch_requests()
def commit_batch_requests(self):
''' Commits the batch requests. '''
batch_boundary = 'batch_a2e9d677-b28b-435e-a89e-87e6a768a431'
changeset_boundary = 'changeset_8128b620-b4bb-458c-a177-0959fb14c977'
#Commits batch only the requests list is not empty.
if self.batch_requests:
request = HTTPRequest()
request.method = 'POST'
request.host = self.batch_requests[0].host
request.path = '/$batch'
request.headers = [('Content-Type', 'multipart/mixed; boundary=' + batch_boundary),
('Accept', 'application/atom+xml,application/xml'),
('Accept-Charset', 'UTF-8')]
request.body = '--' + batch_boundary + '\n'
request.body += 'Content-Type: multipart/mixed; boundary=' + changeset_boundary + '\n\n'
content_id = 1
# Adds each request body to the POST data.
for batch_request in self.batch_requests:
request.body += '--' + changeset_boundary + '\n'
request.body += 'Content-Type: application/http\n'
request.body += 'Content-Transfer-Encoding: binary\n\n'
request.body += batch_request.method + ' http://' + batch_request.host + batch_request.path + ' HTTP/1.1\n'
request.body += 'Content-ID: ' + str(content_id) + '\n'
content_id += 1
# Add different headers for different type requests.
if not batch_request.method == 'DELETE':
request.body += 'Content-Type: application/atom+xml;type=entry\n'
request.body += 'Content-Length: ' + str(len(batch_request.body)) + '\n\n'
request.body += batch_request.body + '\n'
else:
find_if_match = False
for name, value in batch_request.headers:
#If-Match should be already included in batch_request.headers, but in case it is missing, just add it.
if name == 'If-Match':
request.body += name + ': ' + value + '\n\n'
break
else:
request.body += 'If-Match: *\n\n'
request.body += '--' + changeset_boundary + '--' + '\n'
request.body += '--' + batch_boundary + '--'
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_storage_table_header(request)
auth = _sign_storage_table_request(request,
self.account_name,
self.account_key)
request.headers.append(('Authorization', auth))
#Submit the whole request as batch request.
response = self.perform_request(request)
resp = response.body
if response.status >= 300:
raise HTTPError(status, azure._ERROR_BATCH_COMMIT_FAIL, self.respheader, resp)
return resp
def cancel_batch(self):
''' Resets the batch flag. '''
self.is_batch = False

Просмотреть файл

@ -0,0 +1,107 @@
#-------------------------------------------------------------------------
# Copyright 2011 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import os
import types
import base64
import datetime
import time
import hashlib
import hmac
import urllib2
import httplib
import ast
import sys
from xml.dom import minidom
from azure.http import HTTPError, HTTPResponse
class _HTTPClient:
'''
Takes the request and sends it to cloud service and returns the response.
'''
def __init__(self, service_instance, cert_file=None, account_name=None, account_key=None, service_namespace=None, issuer=None, x_ms_version=None, protocol='https'):
'''
service_instance: service client instance.
cert_file: certificate file name/location. This is only used in hosted service management.
account_name: the storage account.
account_key: the storage account access key for storage services or servicebus access key for service bus service.
service_namespace: the service namespace for service bus.
issuer: the issuer for service bus service.
x_ms_version: the x_ms_version for the service.
'''
self.service_instance = service_instance
self.status = None
self.respheader = None
self.message = None
self.cert_file = cert_file
self.account_name = account_name
self.account_key = account_key
self.service_namespace = service_namespace
self.issuer = issuer
self.x_ms_version = x_ms_version
self.protocol = protocol
def get_connection(self, request):
''' Create connection for the request. '''
# If on Windows then use winhttp HTTPConnection instead of httplib HTTPConnection due to the
# bugs in httplib HTTPSConnection. We've reported the issue to the Python
# dev team and it's already fixed for 2.7.4 but we'll need to keep this workaround meanwhile.
if sys.platform.lower().startswith('win'):
import azure.http.winhttp
_connection = azure.http.winhttp._HTTPConnection(request.host, cert_file=self.cert_file, protocol=self.protocol)
elif self.protocol == 'http':
_connection = httplib.HTTPConnection(request.host)
else:
_connection = httplib.HTTPSConnection(request.host, cert_file=self.cert_file)
return _connection
def send_request_headers(self, connection, request_headers):
for name, value in request_headers:
if value:
connection.putheader(name, value)
connection.endheaders()
def send_request_body(self, connection, request_body):
if request_body:
connection.send(request_body)
elif (not isinstance(connection, httplib.HTTPSConnection) and
not isinstance(connection, httplib.HTTPConnection)):
connection.send(None)
def perform_request(self, request):
''' Sends request to cloud service server and return the response. '''
connection = self.get_connection(request)
connection.putrequest(request.method, request.path)
self.send_request_headers(connection, request.headers)
self.send_request_body(connection, request.body)
resp = connection.getresponse()
self.status = int(resp.status)
self.message = resp.reason
self.respheader = headers = resp.getheaders()
respbody = None
if resp.length is None:
respbody = resp.read()
elif resp.length > 0:
respbody = resp.read(resp.length)
response = HTTPResponse(int(resp.status), resp.reason, headers, respbody)
if self.status >= 300:
raise HTTPError(self.status, self.message, self.respheader, respbody)
return response

342
src/azure/http/winhttp.py Normal file
Просмотреть файл

@ -0,0 +1,342 @@
#-------------------------------------------------------------------------
# Copyright 2011 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from ctypes import c_void_p, c_long, c_ulong, c_longlong, c_ulonglong, c_short, c_ushort, c_wchar_p, c_byte
from ctypes import byref, Structure, Union, POINTER, WINFUNCTYPE, HRESULT, oledll, WinDLL, cast, create_string_buffer
import ctypes
import urllib2
#------------------------------------------------------------------------------
# Constants that are used in COM operations
VT_EMPTY = 0
VT_NULL = 1
VT_I2 = 2
VT_I4 = 3
VT_BSTR = 8
VT_BOOL = 11
VT_I1 = 16
VT_UI1 = 17
VT_UI2 = 18
VT_UI4 = 19
VT_I8 = 20
VT_UI8 = 21
VT_ARRAY = 8192
HTTPREQUEST_PROXY_SETTING = c_long
HTTPREQUEST_SETCREDENTIALS_FLAGS = c_long
#------------------------------------------------------------------------------
# Com related APIs that are used.
_ole32 = oledll.ole32
_oleaut32 = WinDLL('oleaut32')
_CLSIDFromString = _ole32.CLSIDFromString
_CoInitialize = _ole32.CoInitialize
_CoCreateInstance = _ole32.CoCreateInstance
_SysAllocString = _oleaut32.SysAllocString
_SysFreeString = _oleaut32.SysFreeString
_SafeArrayDestroy = _oleaut32.SafeArrayDestroy
_CoTaskMemAlloc = _ole32.CoTaskMemAlloc
#------------------------------------------------------------------------------
class BSTR(c_wchar_p):
''' BSTR class in python. '''
def __init__(self, value):
super(BSTR, self).__init__(_SysAllocString(value))
def __del__(self):
_SysFreeString(self)
class _tagSAFEARRAY(Structure):
'''
SAFEARRAY structure in python. Does not match the definition in
MSDN exactly & it is only mapping the used fields. Field names are also
slighty different.
'''
class _tagSAFEARRAYBOUND(Structure):
_fields_ = [('c_elements', c_ulong), ('l_lbound', c_long)]
_fields_ = [('c_dims', c_ushort),
('f_features', c_ushort),
('cb_elements', c_ulong),
('c_locks', c_ulong),
('pvdata', c_void_p),
('rgsabound', _tagSAFEARRAYBOUND*1)]
def __del__(self):
_SafeArrayDestroy(self.pvdata)
pass
class VARIANT(Structure):
'''
VARIANT structure in python. Does not match the definition in
MSDN exactly & it is only mapping the used fields. Field names are also
slighty different.
'''
class _tagData(Union):
class _tagRecord(Structure):
_fields_= [('pvoid', c_void_p), ('precord', c_void_p)]
_fields_ = [('llval', c_longlong),
('ullval', c_ulonglong),
('lval', c_long),
('ulval', c_ulong),
('ival', c_short),
('boolval', c_ushort),
('bstrval', BSTR),
('parray', POINTER(_tagSAFEARRAY)),
('record', _tagRecord)]
_fields_ = [('vt', c_ushort),
('wReserved1', c_ushort),
('wReserved2', c_ushort),
('wReserved3', c_ushort),
('vdata', _tagData)]
class GUID(Structure):
''' GUID structure in python. '''
_fields_ = [("data1", c_ulong),
("data2", c_ushort),
("data3", c_ushort),
("data4", c_byte*8)]
def __init__(self, name=None):
if name is not None:
_CLSIDFromString(unicode(name), byref(self))
class _WinHttpRequest(c_void_p):
'''
Maps the Com API to Python class functions. Not all methods in IWinHttpWebRequest
are mapped - only the methods we use.
'''
_AddRef = WINFUNCTYPE(c_long)(1, 'AddRef')
_Release = WINFUNCTYPE(c_long)(2, 'Release')
_SetProxy = WINFUNCTYPE(HRESULT, HTTPREQUEST_PROXY_SETTING, VARIANT, VARIANT)(7, 'SetProxy')
_SetCredentials = WINFUNCTYPE(HRESULT, BSTR, BSTR, HTTPREQUEST_SETCREDENTIALS_FLAGS)(8, 'SetCredentials')
_Open = WINFUNCTYPE(HRESULT, BSTR, BSTR, VARIANT)(9, 'Open')
_SetRequestHeader = WINFUNCTYPE(HRESULT, BSTR, BSTR)(10, 'SetRequestHeader')
_GetResponseHeader = WINFUNCTYPE(HRESULT, BSTR, POINTER(c_void_p))(11, 'GetResponseHeader')
_GetAllResponseHeaders = WINFUNCTYPE(HRESULT, POINTER(c_void_p))(12, 'GetAllResponseHeaders')
_Send = WINFUNCTYPE(HRESULT, VARIANT)(13, 'Send')
_Status = WINFUNCTYPE(HRESULT, POINTER(c_long))(14, 'Status')
_StatusText = WINFUNCTYPE(HRESULT, POINTER(c_void_p))(15, 'StatusText')
_ResponseText = WINFUNCTYPE(HRESULT, POINTER(c_void_p))(16, 'ResponseText')
_ResponseBody = WINFUNCTYPE(HRESULT, POINTER(VARIANT))(17, 'ResponseBody')
_ResponseStream = WINFUNCTYPE(HRESULT, POINTER(VARIANT))(18, 'ResponseStream')
_WaitForResponse = WINFUNCTYPE(HRESULT, VARIANT, POINTER(c_ushort))(21, 'WaitForResponse')
_Abort = WINFUNCTYPE(HRESULT)(22, 'Abort')
_SetTimeouts = WINFUNCTYPE(HRESULT, c_long, c_long, c_long, c_long)(23, 'SetTimeouts')
_SetClientCertificate = WINFUNCTYPE(HRESULT, BSTR)(24, 'SetClientCertificate')
def open(self, method, url):
'''
Opens the request.
method: the request VERB 'GET', 'POST', etc.
url: the url to connect
'''
flag = VARIANT()
flag.vt = VT_BOOL
flag.vdata.boolval = 0
_method = BSTR(method)
_url = BSTR(url)
_WinHttpRequest._Open(self, _method, _url, flag)
def set_request_header(self, name, value):
''' Sets the request header. '''
_name = BSTR(name)
_value = BSTR(value)
_WinHttpRequest._SetRequestHeader(self, _name, _value)
def get_all_response_headers(self):
''' Gets back all response headers. '''
bstr_headers = c_void_p()
_WinHttpRequest._GetAllResponseHeaders(self, byref(bstr_headers))
bstr_headers = ctypes.cast(bstr_headers, c_wchar_p)
headers = bstr_headers.value
_SysFreeString(bstr_headers)
return headers
def send(self, request = None):
''' Sends the request body. '''
# Sends VT_EMPTY if it is GET, HEAD request.
if request is None:
var_empty = VARIANT()
var_empty.vt = VT_EMPTY
var_empty.vdata.llval = 0
_WinHttpRequest._Send(self, var_empty)
else: # Sends request body as SAFEArray.
_request = VARIANT()
_request.vt = VT_ARRAY | VT_UI1
safearray = _tagSAFEARRAY()
safearray.c_dims = 1
safearray.cb_elements = 1
safearray.c_locks = 0
safearray.f_features = 128
safearray.rgsabound[0].c_elements = len(request)
safearray.rgsabound[0].l_lbound = 0
safearray.pvdata = cast(_CoTaskMemAlloc(len(request)), c_void_p)
ctypes.memmove(safearray.pvdata, request, len(request))
_request.vdata.parray = cast(byref(safearray), POINTER(_tagSAFEARRAY))
_WinHttpRequest._Send(self, _request)
def status(self):
''' Gets status of response. '''
status = c_long()
_WinHttpRequest._Status(self, byref(status))
return int(status.value)
def status_text(self):
''' Gets status text of response. '''
bstr_status_text = c_void_p()
_WinHttpRequest._StatusText(self, byref(bstr_status_text))
bstr_status_text = ctypes.cast(bstr_status_text, c_wchar_p)
status_text = bstr_status_text.value
_SysFreeString(bstr_status_text)
return status_text
def response_text(self):
''' Gets response body as text. '''
bstr_resptext = c_void_p()
_WinHttpRequest._ResponseText(self, byref(bstr_resptext))
bstr_resptext = ctypes.cast(bstr_resptext, c_wchar_p)
resptext = bstr_resptext.value
_SysFreeString(bstr_resptext)
return resptext
def response_body(self):
'''
Gets response body as a SAFEARRAY and converts the SAFEARRAY to str. If it is an xml
file, it always contains 3 characters before <?xml, so we remove them.
'''
var_respbody = VARIANT()
_WinHttpRequest._ResponseBody(self, byref(var_respbody))
if var_respbody.vt == VT_ARRAY | VT_UI1:
safearray = var_respbody.vdata.parray.contents
respbody = ctypes.string_at(safearray.pvdata, safearray.rgsabound[0].c_elements)
if respbody[3:].startswith('<?xml') and respbody.startswith('\xef\xbb\xbf'):
respbody = respbody[3:]
return respbody
else:
return ''
def set_client_certificate(self, certificate):
'''Sets client certificate for the request. '''
_certificate = BSTR(certificate)
_WinHttpRequest._SetClientCertificate(self, _certificate)
def __del__(self):
if self.value is not None:
_WinHttpRequest._Release(self)
class _Response:
''' Response class corresponding to the response returned from httplib HTTPConnection. '''
def __init__(self, _status, _status_text, _length, _headers, _respbody):
self.status = _status
self.reason = _status_text
self.length = _length
self.headers = _headers
self.respbody = _respbody
def getheaders(self):
'''Returns response headers.'''
return self.headers
def read(self, _length):
'''Returns resonse body. '''
return self.respbody[:_length]
class _HTTPConnection:
''' Class corresponding to httplib HTTPConnection class. '''
def __init__(self, host, cert_file=None, key_file=None, protocol='http'):
''' initialize the IWinHttpWebRequest Com Object.'''
self.host = unicode(host)
self.cert_file = cert_file
self._httprequest = _WinHttpRequest()
self.protocol = protocol
clsid = GUID('{2087C2F4-2CEF-4953-A8AB-66779B670495}')
iid = GUID('{016FE2EC-B2C8-45F8-B23B-39E53A75396B}')
_CoInitialize(0)
_CoCreateInstance(byref(clsid), 0, 1, byref(iid), byref(self._httprequest))
def putrequest(self, method, uri):
''' Connects to host and sends the request. '''
protocol = unicode(self.protocol + '://')
url = protocol + self.host + unicode(uri)
self._httprequest.open(unicode(method), url)
#sets certificate for the connection if cert_file is set.
if self.cert_file is not None:
self._httprequest.set_client_certificate(BSTR(unicode(self.cert_file)))
def putheader(self, name, value):
''' Sends the headers of request. '''
self._httprequest.set_request_header(unicode(name), unicode(value))
def endheaders(self):
''' No operation. Exists only to provide the same interface of httplib HTTPConnection.'''
pass
def send(self, request_body):
''' Sends request body. '''
if not request_body:
self._httprequest.send()
else:
self._httprequest.send(request_body)
def getresponse(self):
''' Gets the response and generates the _Response object'''
status = self._httprequest.status()
status_text = self._httprequest.status_text()
resp_headers = self._httprequest.get_all_response_headers()
fixed_headers = []
for resp_header in resp_headers.split('\n'):
if (resp_header.startswith('\t') or resp_header.startswith(' ')) and fixed_headers:
# append to previous header
fixed_headers[-1] += resp_header
else:
fixed_headers.append(resp_header)
headers = []
for resp_header in fixed_headers:
if ':' in resp_header:
pos = resp_header.find(':')
headers.append((resp_header[:pos], resp_header[pos+1:].strip()))
body = self._httprequest.response_body()
length = len(body)
return _Response(status, status_text, length, headers, body)

Просмотреть файл

@ -0,0 +1,615 @@
#-------------------------------------------------------------------------
# Copyright 2011 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import sys
import time
import urllib2
from xml.dom import minidom
import ast
import httplib
from datetime import datetime
from azure.http import HTTPError
from azure import (WindowsAzureError, WindowsAzureData,
_create_entry, _get_entry_properties, _html_encode,
_get_child_nodes, WindowsAzureMissingResourceError,
WindowsAzureConflictError, _get_serialization_name,
_get_children_from_path)
import azure
#default rule name for subscription
DEFAULT_RULE_NAME='$Default'
#-----------------------------------------------------------------------------
# Constants for Azure app environment settings.
AZURE_SERVICEBUS_NAMESPACE = 'AZURE_SERVICEBUS_NAMESPACE'
AZURE_SERVICEBUS_ACCESS_KEY = 'AZURE_SERVICEBUS_ACCESS_KEY'
AZURE_SERVICEBUS_ISSUER = 'AZURE_SERVICEBUS_ISSUER'
#token cache for Authentication
_tokens = {}
# namespace used for converting rules to objects
XML_SCHEMA_NAMESPACE = 'http://www.w3.org/2001/XMLSchema-instance'
class Queue(WindowsAzureData):
''' Queue class corresponding to Queue Description: http://msdn.microsoft.com/en-us/library/windowsazure/hh780773'''
def __init__(self):
self.lock_duration = None
self.max_size_in_megabytes = None
self.duplicate_detection = None
self.requires_duplicate_detection = None
self.requires_session = None
self.default_message_time_to_live = None
self.enable_dead_lettering_on_message_expiration = None
self.duplicate_detection_history_time_window = None
self.max_delivery_count = None
self.enable_batched_operations = None
self.size_in_bytes = None
self.message_count = None
class Topic(WindowsAzureData):
''' Topic class corresponding to Topic Description: http://msdn.microsoft.com/en-us/library/windowsazure/hh780749. '''
def __init__(self):
self.default_message_time_to_live = None
self.max_size_in_mega_bytes = None
self.requires_duplicate_detection = None
self.duplicate_detection_history_time_window = None
self.enable_batched_operations = None
self.size_in_bytes = None
class Subscription(WindowsAzureData):
''' Subscription class corresponding to Subscription Description: http://msdn.microsoft.com/en-us/library/windowsazure/hh780763. '''
def __init__(self):
self.lock_duration = None
self.requires_session = None
self.default_message_time_to_live = None
self.dead_lettering_on_message_expiration = None
self.dead_lettering_on_filter_evaluation_exceptions = None
self.enable_batched_operations = None
self.max_delivery_count = None
self.message_count = None
class Rule(WindowsAzureData):
''' Rule class corresponding to Rule Description: http://msdn.microsoft.com/en-us/library/windowsazure/hh780753. '''
def __init__(self):
self.filter_type = ''
self.filter_expression = ''
self.action_type = ''
self.action_expression = ''
class Message(WindowsAzureData):
''' Message class that used in send message/get mesage apis. '''
def __init__(self, body=None, service_bus_service=None, location=None, custom_properties=None,
type='application/atom+xml;type=entry;charset=utf-8', broker_properties=None):
self.body = body
self.location = location
self.broker_properties = broker_properties
self.custom_properties = custom_properties
self.type = type
self.service_bus_service = service_bus_service
self._topic_name = None
self._subscription_name = None
self._queue_name = None
if not service_bus_service:
return
# if location is set, then extracts the queue name for queue message and
# extracts the topic and subscriptions name if it is topic message.
if location:
if '/subscriptions/' in location:
pos = location.find('/subscriptions/')
pos1 = location.rfind('/', 0, pos-1)
self._topic_name = location[pos1+1:pos]
pos += len('/subscriptions/')
pos1 = location.find('/', pos)
self._subscription_name = location[pos:pos1]
elif '/messages/' in location:
pos = location.find('/messages/')
pos1 = location.rfind('/', 0, pos-1)
self._queue_name = location[pos1+1:pos]
def delete(self):
''' Deletes itself if find queue name or topic name and subscription name. '''
if self._queue_name:
self.service_bus_service.delete_queue_message(self._queue_name, self.broker_properties['SequenceNumber'], self.broker_properties['LockToken'])
elif self._topic_name and self._subscription_name:
self.service_bus_service.delete_subscription_message(self._topic_name, self._subscription_name, self.broker_properties['SequenceNumber'], self.broker_properties['LockToken'])
else:
raise WindowsAzureError(azure._ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_DELETE)
def unlock(self):
''' Unlocks itself if find queue name or topic name and subscription name. '''
if self._queue_name:
self.service_bus_service.unlock_queue_message(self._queue_name, self.broker_properties['SequenceNumber'], self.broker_properties['LockToken'])
elif self._topic_name and self._subscription_name:
self.service_bus_service.unlock_subscription_message(self._topic_name, self._subscription_name, self.broker_properties['SequenceNumber'], self.broker_properties['LockToken'])
else:
raise WindowsAzureError(azure._ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_UNLOCK)
def add_headers(self, request):
''' add addtional headers to request for message request.'''
# Adds custom properties
if self.custom_properties:
for name, value in self.custom_properties.iteritems():
if isinstance(value, str):
request.headers.append((name, '"' + str(value) + '"'))
elif isinstance(value, datetime):
request.headers.append((name, '"' + value.strftime('%a, %d %b %Y %H:%M:%S GMT') + '"'))
else:
request.headers.append((name, str(value)))
# Adds content-type
request.headers.append(('Content-Type', self.type))
# Adds BrokerProperties
if self.broker_properties:
request.headers.append(('BrokerProperties', str(self.broker_properties)))
return request.headers
def _update_service_bus_header(request, account_key, issuer):
''' Add additional headers for service bus. '''
if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']:
request.headers.append(('Content-Length', str(len(request.body))))
# if it is not GET or HEAD request, must set content-type.
if not request.method in ['GET', 'HEAD']:
for name, value in request.headers:
if 'content-type' == name.lower():
break
else:
request.headers.append(('Content-Type', 'application/atom+xml;type=entry;charset=utf-8'))
# Adds authoriaztion header for authentication.
request.headers.append(('Authorization', _sign_service_bus_request(request, account_key, issuer)))
return request.headers
def _sign_service_bus_request(request, account_key, issuer):
''' return the signed string with token. '''
return 'WRAP access_token="' + _get_token(request, account_key, issuer) + '"'
def _token_is_expired(token):
''' Check if token expires or not. '''
time_pos_begin = token.find('ExpiresOn=') + len('ExpiresOn=')
time_pos_end = token.find('&', time_pos_begin)
token_expire_time = int(token[time_pos_begin:time_pos_end])
time_now = time.mktime(time.localtime())
#Adding 30 seconds so the token wouldn't be expired when we send the token to server.
return (token_expire_time - time_now) < 30
def _get_token(request, account_key, issuer):
'''
Returns token for the request.
request: the service bus service request.
account_key: service bus access key
issuer: service bus issuer
'''
wrap_scope = 'http://' + request.host + request.path
# Check whether has unexpired cache, return cached token if it is still usable.
if _tokens.has_key(wrap_scope):
token = _tokens[wrap_scope]
if not _token_is_expired(token):
return token
#get token from accessconstrol server
request_body = ('wrap_name=' + urllib2.quote(issuer) + '&wrap_password=' +
urllib2.quote(account_key) + '&wrap_scope=' +
urllib2.quote('http://' + request.host + request.path))
host = request.host.replace('.servicebus.', '-sb.accesscontrol.')
if sys.platform.lower().startswith('win'):
import azure.http.winhttp
connection = azure.http.winhttp._HTTPConnection(host, protocol='https')
else:
connection = httplib.HTTPSConnection(host)
connection.putrequest('POST', '/WRAPv0.9')
connection.putheader('Content-Length', len(request_body))
connection.endheaders()
connection.send(request_body)
resp = connection.getresponse()
token = ''
if int(resp.status) >= 200 and int(resp.status) < 300:
if resp.length:
token = resp.read(resp.length)
else:
raise HTTPError(resp.status, resp.reason, resp.getheaders(), None)
else:
raise HTTPError(resp.status, resp.reason, resp.getheaders(), None)
token = urllib2.unquote(token[token.find('=')+1:token.rfind('&')])
_tokens[wrap_scope] = token
return token
def _create_message(response, service_instance):
''' Create message from response.
response: response from service bus cloud server.
service_instance: the service bus client.
'''
respbody = response.body
custom_properties = {}
broker_properties = None
message_type = None
message_location = None
#gets all information from respheaders.
for name, value in response.headers:
if name.lower() == 'brokerproperties':
broker_properties = ast.literal_eval(value)
elif name.lower() == 'content-type':
message_type = value
elif name.lower() == 'location':
message_location = value
elif name.lower() not in ['content-type', 'brokerproperties', 'transfer-encoding', 'server', 'location', 'date']:
if '"' in value:
custom_properties[name] = value[1:-1]
else:
custom_properties[name] = value
if message_type == None:
message = Message(respbody, service_instance, message_location, custom_properties, broker_properties)
else:
message = Message(respbody, service_instance, message_location, custom_properties, message_type, broker_properties)
return message
#convert functions
def _convert_response_to_rule(response):
return _convert_xml_to_rule(response.body)
def _convert_xml_to_rule(xmlstr):
''' Converts response xml to rule object.
The format of xml for rule:
<entry xmlns='http://www.w3.org/2005/Atom'>
<content type='application/xml'>
<RuleDescription xmlns:i="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect">
<Filter i:type="SqlFilterExpression">
<SqlExpression>MyProperty='XYZ'</SqlExpression>
</Filter>
<Action i:type="SqlFilterAction">
<SqlExpression>set MyProperty2 = 'ABC'</SqlExpression>
</Action>
</RuleDescription>
</content>
</entry>
'''
xmldoc = minidom.parseString(xmlstr)
rule = Rule()
for rule_desc in _get_children_from_path(xmldoc, 'entry', 'content', 'RuleDescription'):
for xml_filter in _get_child_nodes(rule_desc, 'Filter'):
filter_type = xml_filter.getAttributeNS(XML_SCHEMA_NAMESPACE, 'type')
setattr(rule, 'filter_type', str(filter_type))
if xml_filter.childNodes:
for expr in _get_child_nodes(xml_filter, 'SqlExpression'):
setattr(rule, 'filter_expression', expr.firstChild.nodeValue)
for xml_action in _get_child_nodes(rule_desc, 'Action'):
action_type = xml_action.getAttributeNS(XML_SCHEMA_NAMESPACE, 'type')
setattr(rule, 'action_type', str(action_type))
if xml_action.childNodes:
action_expression = xml_action.childNodes[0].firstChild
if action_expression:
setattr(rule, 'action_expression', action_expression.nodeValue)
#extract id, updated and name value from feed entry and set them of rule.
for name, value in _get_entry_properties(xmlstr, True).iteritems():
setattr(rule, name, value)
return rule
def _convert_response_to_queue(response):
return _convert_xml_to_queue(response.body)
def _parse_bool(value):
if value.lower() == 'true':
return True
return False
_QUEUE_CONVERSION = {
'MaxSizeInMegaBytes': int,
'RequiresGroupedReceives': _parse_bool,
'SupportsDuplicateDetection': _parse_bool,
'SizeinBytes': int,
'MessageCount': int,
'EnableBatchedOperations': _parse_bool,
'RequiresSession': _parse_bool,
'LockDuration': int,
}
def _convert_xml_to_queue(xmlstr):
''' Converts xml response to queue object.
The format of xml response for queue:
<QueueDescription xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\">
<MaxSizeInBytes>10000</MaxSizeInBytes>
<DefaultMessageTimeToLive>PT5M</DefaultMessageTimeToLive>
<LockDuration>PT2M</LockDuration>
<RequiresGroupedReceives>False</RequiresGroupedReceives>
<SupportsDuplicateDetection>False</SupportsDuplicateDetection>
...
</QueueDescription>
'''
xmldoc = minidom.parseString(xmlstr)
queue = Queue()
invalid_queue = True
#get node for each attribute in Queue class, if nothing found then the response is not valid xml for Queue.
for queue_desc in _get_children_from_path(xmldoc, 'entry', 'content', 'QueueDescription'):
for attr_name, attr_value in vars(queue).iteritems():
xml_attrs = _get_child_nodes(queue_desc, _get_serialization_name(attr_name))
if xml_attrs:
xml_attr = xml_attrs[0]
if xml_attr.firstChild:
value = xml_attr.firstChild.nodeValue
conversion = _QUEUE_CONVERSION.get(attr_name)
if conversion is not None:
value = conversion(value)
setattr(queue, attr_name, value)
invalid_queue = False
if invalid_queue:
raise WindowsAzureError(azure._ERROR_QUEUE_NOT_FOUND)
#extract id, updated and name value from feed entry and set them of queue.
for name, value in _get_entry_properties(xmlstr, True).iteritems():
setattr(queue, name, value)
return queue
def _convert_response_to_topic(response):
return _convert_xml_to_topic(response.body)
_TOPIC_CONVERSION = {
'MaxSizeInMegaBytes': int,
'RequiresDuplicateDetection': _parse_bool,
'DeadLetteringOnFilterEvaluationExceptions': _parse_bool
}
def _convert_xml_to_topic(xmlstr):
'''Converts xml response to topic
The xml format for topic:
<entry xmlns='http://www.w3.org/2005/Atom'>
<content type='application/xml'>
<TopicDescription xmlns:i="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect">
<DefaultMessageTimeToLive>P10675199DT2H48M5.4775807S</DefaultMessageTimeToLive>
<MaxSizeInMegaBytes>1024</MaxSizeInMegaBytes>
<RequiresDuplicateDetection>false</RequiresDuplicateDetection>
<DuplicateDetectionHistoryTimeWindow>P7D</DuplicateDetectionHistoryTimeWindow>
<DeadLetteringOnFilterEvaluationExceptions>true</DeadLetteringOnFilterEvaluationExceptions>
</TopicDescription>
</content>
</entry>
'''
xmldoc = minidom.parseString(xmlstr)
topic = Topic()
invalid_topic = True
#get node for each attribute in Topic class, if nothing found then the response is not valid xml for Topic.
for desc in _get_children_from_path(xmldoc, 'entry', 'content', 'TopicDescription'):
invalid_topic = True
for attr_name, attr_value in vars(topic).iteritems():
xml_attrs = _get_child_nodes(desc, _get_serialization_name(attr_name))
if xml_attrs:
xml_attr = xml_attrs[0]
if xml_attr.firstChild:
value = xml_attr.firstChild.nodeValue
conversion = _TOPIC_CONVERSION.get(attr_name)
if conversion is not None:
value = conversion(value)
setattr(topic, attr_name, value)
invalid_topic = False
if invalid_topic:
raise WindowsAzureError(azure._ERROR_TOPIC_NOT_FOUND)
#extract id, updated and name value from feed entry and set them of topic.
for name, value in _get_entry_properties(xmlstr, True).iteritems():
setattr(topic, name, value)
return topic
def _convert_response_to_subscription(response):
return _convert_xml_to_subscription(response.body)
_SUBSCRIPTION_CONVERSION = {
'RequiresSession' : _parse_bool,
'DeadLetteringOnMessageExpiration': _parse_bool,
'DefaultMessageTimeToLive': int,
'EnableBatchedOperations': _parse_bool,
'MaxDeliveryCount': int,
'MessageCount': int,
}
def _convert_xml_to_subscription(xmlstr):
'''Converts xml response to subscription
The xml format for subscription:
<entry xmlns='http://www.w3.org/2005/Atom'>
<content type='application/xml'>
<SubscriptionDescription xmlns:i="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect">
<LockDuration>PT5M</LockDuration>
<RequiresSession>false</RequiresSession>
<DefaultMessageTimeToLive>P10675199DT2H48M5.4775807S</DefaultMessageTimeToLive>
<DeadLetteringOnMessageExpiration>false</DeadLetteringOnMessageExpiration> <DeadLetteringOnFilterEvaluationExceptions>true</DeadLetteringOnFilterEvaluationExceptions>
</SubscriptionDescription>
</content>
</entry>
'''
xmldoc = minidom.parseString(xmlstr)
subscription = Subscription()
for desc in _get_children_from_path(xmldoc, 'entry', 'content', 'subscriptiondescription'):
for attr_name, attr_value in vars(subscription).iteritems():
tag_name = attr_name.replace('_', '')
xml_attrs = _get_child_nodes(desc, tag_name)
if xml_attrs:
xml_attr = xml_attrs[0]
if xml_attr.firstChild:
value = xml_attr.firstChild.nodeValue
conversion = _SUBSCRIPTION_CONVERSION.get(attr_name)
if conversion is not None:
value = conversion(value)
setattr(subscription, attr_name, value)
for name, value in _get_entry_properties(xmlstr, True).iteritems():
setattr(subscription, name, value)
return subscription
def convert_subscription_to_xml(subscription):
'''
Converts a subscription object to xml to send. The order of each field of subscription
in xml is very important so we cann't simple call convert_class_to_xml.
subscription: the subsciption object to be converted.
'''
subscription_body = '<SubscriptionDescription xmlns:i="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect">'
if subscription:
if subscription.lock_duration is not None:
subscription_body += ''.join(['<LockDuration>', subscription.lock_duration, '</LockDuration>'])
if subscription.requires_session is not None:
subscription_body += ''.join(['<RequiresSession>', subscription.requires_session, '</RequiresSession>'])
if subscription.default_message_time_to_live is not None:
subscription_body += ''.join(['<DefaultMessageTimeToLive>', subscription.default_message_time_to_live, '</DefaultMessageTimeToLive>'])
if subscription.dead_lettering_on_message_expiration is not None:
subscription_body += ''.join(['<DeadLetteringOnMessageExpiration>', subscription.dead_lettering_on_message_expiration, '</DeadLetteringOnMessageExpiration>'])
if subscription.dead_lettering_on_filter_evaluation_exceptions is not None:
subscription_body += ''.join(['<DeadLetteringOnFilterEvaluationExceptions>', subscription.dead_lettering_on_filter_evaluation_exceptions, '</DeadLetteringOnFilterEvaluationExceptions>'])
if subscription.enable_batched_operations is not None:
subscription_body += ''.join(['<EnableBatchedOperations>', subscription.enable_batched_operations, '</EnableBatchedOperations>'])
if subscription.max_delivery_count is not None:
subscription_body += ''.join(['<MaxDeliveryCount>', subscription.max_delivery_count, '</MaxDeliveryCount>'])
if subscription.message_count is not None:
subscription_body += ''.join(['<MessageCount>', subscription.message_count, '</MessageCount>'])
subscription_body += '</SubscriptionDescription>'
return _create_entry(subscription_body)
def convert_rule_to_xml(rule):
'''
Converts a rule object to xml to send. The order of each field of rule
in xml is very important so we cann't simple call convert_class_to_xml.
rule: the rule object to be converted.
'''
rule_body = '<RuleDescription xmlns:i="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect">'
if rule:
if rule.filter_type:
rule_body += ''.join(['<Filter i:type="', _html_encode(rule.filter_type), '">'])
if rule.filter_type == 'CorrelationFilter':
rule_body += ''.join(['<CorrelationId>', _html_encode(rule.filter_expression), '</CorrelationId>'])
else:
rule_body += ''.join(['<SqlExpression>', _html_encode(rule.filter_expression), '</SqlExpression>'])
rule_body += '<CompatibilityLevel>20</CompatibilityLevel>'
rule_body += '</Filter>'
if rule.action_type:
rule_body += ''.join(['<Action i:type="', _html_encode(rule.action_type), '">'])
if rule.action_type == 'SqlFilterAction':
rule_body += ''.join(['<SqlExpression>', _html_encode(rule.action_expression), '</SqlExpression>'])
rule_body += '</Action>'
rule_body += '</RuleDescription>'
return _create_entry(rule_body)
def convert_topic_to_xml(topic):
'''
Converts a topic object to xml to send. The order of each field of topic
in xml is very important so we cann't simple call convert_class_to_xml.
topic: the topic object to be converted.
'''
topic_body = '<TopicDescription xmlns:i="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect">'
if topic:
if topic.default_message_time_to_live is not None:
topic_body += ''.join(['<DefaultMessageTimeToLive>', str(topic.default_message_time_to_live), '</DefaultMessageTimeToLive>'])
if topic.max_size_in_mega_bytes is not None:
topic_body += ''.join(['<MaxSizeInMegabytes>', str(topic.max_size_in_megabytes), '</MaxSizeInMegabytes>'])
if topic.requires_duplicate_detection is not None:
topic_body += ''.join(['<RequiresDuplicateDetection>', str(topic.requires_duplicate_detection), '</RequiresDuplicateDetection>'])
if topic.duplicate_detection_history_time_window is not None:
topic_body += ''.join(['<DuplicateDetectionHistoryTimeWindow>', str(topic.duplicate_detection_history_time_window), '</DuplicateDetectionHistoryTimeWindow>'])
if topic.enable_batched_operations is not None:
topic_body += ''.join(['<EnableBatchedOperations>', str(topic.enable_batched_operations), '</EnableBatchedOperations>'])
if topic.size_in_bytes is not None:
topic_body += ''.join(['<SizeinBytes>', str(topic.size_in_bytes), '</SizeinBytes>'])
topic_body += '</TopicDescription>'
return _create_entry(topic_body)
def convert_queue_to_xml(queue):
'''
Converts a queue object to xml to send. The order of each field of queue
in xml is very important so we cann't simple call convert_class_to_xml.
queue: the queue object to be converted.
'''
queue_body = '<QueueDescription xmlns:i="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect">'
if queue:
if queue.lock_duration:
queue_body += ''.join(['<LockDuration>', str(queue.lock_duration), '</LockDuration>'])
if queue.max_size_in_megabytes is not None:
queue_body += ''.join(['<MaxSizeInMegabytes>', str(queue.max_size_in_megabytes), '</MaxSizeInMegabytes>'])
if queue.requires_duplicate_detection is not None:
queue_body += ''.join(['<RequiresDuplicateDetection>', str(queue.requires_duplicate_detection), '</RequiresDuplicateDetection>'])
if queue.requires_session is not None:
queue_body += ''.join(['<RequiresSession>', str(queue.requires_session), '</RequiresSession>'])
if queue.default_message_time_to_live is not None:
queue_body += ''.join(['<DefaultMessageTimeToLive>', str(queue.default_message_time_to_live), '</DefaultMessageTimeToLive>'])
if queue.enable_dead_lettering_on_message_expiration is not None:
queue_body += ''.join(['<EnableDeadLetteringOnMessageExpiration>', str(queue.enable_dead_lettering_on_message_expiration), '</EnableDeadLetteringOnMessageExpiration>'])
if queue.duplicate_detection_history_time_window is not None:
queue_body += ''.join(['<DuplicateDetectionHistoryTimeWindow>', str(queue.duplicate_detection_history_time_window), '</DuplicateDetectionHistoryTimeWindow>'])
if queue.max_delivery_count is not None:
queue_body += ''.join(['<MaxDeliveryCount>', str(queue.max_delivery_count), '</MaxDeliveryCount>'])
if queue.enable_batched_operations is not None:
queue_body += ''.join(['<EnableBatchedOperations>', str(queue.enable_batched_operations), '</EnableBatchedOperations>'])
if queue.size_in_bytes is not None:
queue_body += ''.join(['<SizeinBytes>', str(queue.size_in_bytes), '</SizeinBytes>'])
if queue.message_count is not None:
queue_body += ''.join(['<MessageCount>', str(queue.message_count), '</MessageCount>'])
queue_body += '</QueueDescription>'
return _create_entry(queue_body)
def _service_bus_error_handler(http_error):
''' Simple error handler for service bus service. Will add more specific cases '''
if http_error.status == 409:
raise WindowsAzureConflictError(azure._ERROR_CONFLICT)
elif http_error.status == 404:
raise WindowsAzureMissingResourceError(azure._ERROR_NOT_FOUND)
else:
raise WindowsAzureError(azure._ERROR_UNKNOWN % http_error.message)
from azure.servicebus.servicebusservice import ServiceBusService

Просмотреть файл

@ -0,0 +1,705 @@
#-------------------------------------------------------------------------
# Copyright 2011 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import base64
import os
import urllib2
from azure.http.httpclient import _HTTPClient
from azure.http import HTTPError
from azure.servicebus import (_update_service_bus_header, _create_message,
convert_topic_to_xml, _convert_response_to_topic,
convert_queue_to_xml, _convert_response_to_queue,
convert_subscription_to_xml, _convert_response_to_subscription,
convert_rule_to_xml, _convert_response_to_rule,
_convert_xml_to_queue, _convert_xml_to_topic,
_convert_xml_to_subscription, _convert_xml_to_rule,
_service_bus_error_handler, AZURE_SERVICEBUS_NAMESPACE,
AZURE_SERVICEBUS_ACCESS_KEY, AZURE_SERVICEBUS_ISSUER)
from azure.http import HTTPRequest
from azure import (_validate_not_none, Feed,
_convert_response_to_feeds, _str_or_none, _int_or_none,
_get_request_body, _update_request_uri_query,
_dont_fail_on_exist, _dont_fail_not_exist,
WindowsAzureError, _parse_response, _convert_class_to_xml,
_parse_response_for_dict, _parse_response_for_dict_prefix,
_parse_response_for_dict_filter,
_parse_enum_results_list, _update_request_uri_query_local_storage,
_get_table_host, _get_queue_host, _get_blob_host,
_parse_simple_list, SERVICE_BUS_HOST_BASE, xml_escape)
class ServiceBusService:
def create_queue(self, queue_name, queue=None, fail_on_exist=False):
'''
Creates a new queue. Once created, this queue's resource manifest is immutable.
queue: queue object to create.
queue_name: the name of the queue.
fail_on_exist: specify whether to throw an exception when the queue exists.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
request.path = '/' + str(queue_name) + ''
request.body = _get_request_body(convert_queue_to_xml(queue))
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
if not fail_on_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as e:
_dont_fail_on_exist(e)
return False
else:
self._perform_request(request)
return True
def delete_queue(self, queue_name, fail_not_exist=False):
'''
Deletes an existing queue. This operation will also remove all associated state
including messages in the queue.
fail_not_exist: specify whether to throw an exception if the queue doesn't exist.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
request.path = '/' + str(queue_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
if not fail_not_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as e:
_dont_fail_not_exist(e)
return False
else:
self._perform_request(request)
return True
def get_queue(self, queue_name):
'''
Retrieves an existing queue.
queue_name: name of the queue.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
request.path = '/' + str(queue_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
response = self._perform_request(request)
return _convert_response_to_queue(response)
def list_queues(self):
'''
Enumerates the queues in the service namespace.
'''
request = HTTPRequest()
request.method = 'GET'
request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
request.path = '/$Resources/Queues'
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
response = self._perform_request(request)
return _convert_response_to_feeds(response, _convert_xml_to_queue)
def create_topic(self, topic_name, topic=None, fail_on_exist=False):
'''
Creates a new topic. Once created, this topic resource manifest is immutable.
topic_name: name of the topic.
topic: the Topic object to create.
fail_on_exist: specify whether to throw an exception when the topic exists.
'''
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
request.path = '/' + str(topic_name) + ''
request.body = _get_request_body(convert_topic_to_xml(topic))
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
if not fail_on_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as e:
_dont_fail_on_exist(e)
return False
else:
self._perform_request(request)
return True
def delete_topic(self, topic_name, fail_not_exist=False):
'''
Deletes an existing topic. This operation will also remove all associated state
including associated subscriptions.
topic_name: name of the topic.
fail_not_exist: specify whether throw exception when topic doesn't exist.
'''
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
request.path = '/' + str(topic_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
if not fail_not_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as e:
_dont_fail_not_exist(e)
return False
else:
self._perform_request(request)
return True
def get_topic(self, topic_name):
'''
Retrieves the description for the specified topic.
topic_name: name of the topic.
'''
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
request.path = '/' + str(topic_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
response = self._perform_request(request)
return _convert_response_to_topic(response)
def list_topics(self):
'''
Retrieves the topics in the service namespace.
'''
request = HTTPRequest()
request.method = 'GET'
request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
request.path = '/$Resources/Topics'
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
response = self._perform_request(request)
return _convert_response_to_feeds(response, _convert_xml_to_topic)
def create_rule(self, topic_name, subscription_name, rule_name, rule=None, fail_on_exist=False):
'''
Creates a new rule. Once created, this rule's resource manifest is immutable.
topic_name: the name of the topic
subscription_name: the name of the subscription
rule_name: name of the rule.
fail_on_exist: specify whether to throw an exception when the rule exists.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
_validate_not_none('rule_name', rule_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
request.path = '/' + str(topic_name) + '/subscriptions/' + str(subscription_name) + '/rules/' + str(rule_name) + ''
request.body = _get_request_body(convert_rule_to_xml(rule))
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
if not fail_on_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as e:
_dont_fail_on_exist(e)
return False
else:
self._perform_request(request)
return True
def delete_rule(self, topic_name, subscription_name, rule_name, fail_not_exist=False):
'''
Deletes an existing rule.
topic_name: the name of the topic
subscription_name: the name of the subscription
rule_name: the name of the rule. DEFAULT_RULE_NAME=$Default. Use DEFAULT_RULE_NAME
to delete default rule for the subscription.
fail_not_exist: specify whether throw exception when rule doesn't exist.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
_validate_not_none('rule_name', rule_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
request.path = '/' + str(topic_name) + '/subscriptions/' + str(subscription_name) + '/rules/' + str(rule_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
if not fail_not_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as e:
_dont_fail_not_exist(e)
return False
else:
self._perform_request(request)
return True
def get_rule(self, topic_name, subscription_name, rule_name):
'''
Retrieves the description for the specified rule.
topic_name: the name of the topic
subscription_name: the name of the subscription
rule_name: name of the rule
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
_validate_not_none('rule_name', rule_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
request.path = '/' + str(topic_name) + '/subscriptions/' + str(subscription_name) + '/rules/' + str(rule_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
response = self._perform_request(request)
return _convert_response_to_rule(response)
def list_rules(self, topic_name, subscription_name):
'''
Retrieves the rules that exist under the specified subscription.
topic_name: the name of the topic
subscription_name: the name of the subscription
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
request.path = '/' + str(topic_name) + '/subscriptions/' + str(subscription_name) + '/rules/'
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
response = self._perform_request(request)
return _convert_response_to_feeds(response, _convert_xml_to_rule)
def create_subscription(self, topic_name, subscription_name, subscription=None, fail_on_exist=False):
'''
Creates a new subscription. Once created, this subscription resource manifest is
immutable.
topic_name: the name of the topic
subscription_name: the name of the subscription
fail_on_exist: specify whether throw exception when subscription exists.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
request.path = '/' + str(topic_name) + '/subscriptions/' + str(subscription_name) + ''
request.body = _get_request_body(convert_subscription_to_xml(subscription))
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
if not fail_on_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as e:
_dont_fail_on_exist(e)
return False
else:
self._perform_request(request)
return True
def delete_subscription(self, topic_name, subscription_name, fail_not_exist=False):
'''
Deletes an existing subscription.
topic_name: the name of the topic
subscription_name: the name of the subscription
fail_not_exist: specify whether to throw an exception when the subscription doesn't exist.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
request.path = '/' + str(topic_name) + '/subscriptions/' + str(subscription_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
if not fail_not_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as e:
_dont_fail_not_exist(e)
return False
else:
self._perform_request(request)
return True
def get_subscription(self, topic_name, subscription_name):
'''
Gets an existing subscription.
topic_name: the name of the topic
subscription_name: the name of the subscription
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
request.path = '/' + str(topic_name) + '/subscriptions/' + str(subscription_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
response = self._perform_request(request)
return _convert_response_to_subscription(response)
def list_subscriptions(self, topic_name):
'''
Retrieves the subscriptions in the specified topic.
topic_name: the name of the topic
'''
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
request.path = '/' + str(topic_name) + '/subscriptions/'
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
response = self._perform_request(request)
return _convert_response_to_feeds(response, _convert_xml_to_subscription)
def send_topic_message(self, topic_name, message=None):
'''
Enqueues a message into the specified topic. The limit to the number of messages
which may be present in the topic is governed by the message size in MaxTopicSizeInBytes.
If this message causes the topic to exceed its quota, a quota exceeded error is
returned and the message will be rejected.
topic_name: name of the topic.
message: the Message object containing message body and properties.
'''
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
request.method = 'POST'
request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
request.path = '/' + str(topic_name) + '/messages'
request.headers = message.add_headers(request)
request.body = _get_request_body(message.body)
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
response = self._perform_request(request)
def peek_lock_subscription_message(self, topic_name, subscription_name, timeout='60'):
'''
This operation is used to atomically retrieve and lock a message for processing.
The message is guaranteed not to be delivered to other receivers during the lock
duration period specified in buffer description. Once the lock expires, the
message will be available to other receivers (on the same subscription only)
during the lock duration period specified in the topic description. Once the lock
expires, the message will be available to other receivers. In order to complete
processing of the message, the receiver should issue a delete command with the
lock ID received from this operation. To abandon processing of the message and
unlock it for other receivers, an Unlock Message command should be issued, or
the lock duration period can expire.
topic_name: the name of the topic
subscription_name: the name of the subscription
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'POST'
request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
request.path = '/' + str(topic_name) + '/subscriptions/' + str(subscription_name) + '/messages/head'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
response = self._perform_request(request)
return _create_message(response, self)
def unlock_subscription_message(self, topic_name, subscription_name, sequence_number, lock_token):
'''
Unlock a message for processing by other receivers on a given subscription.
This operation deletes the lock object, causing the message to be unlocked.
A message must have first been locked by a receiver before this operation
is called.
topic_name: the name of the topic
subscription_name: the name of the subscription
sequence_name: The sequence number of the message to be unlocked as returned
in BrokerProperties['SequenceNumber'] by the Peek Message operation.
lock_token: The ID of the lock as returned by the Peek Message operation in
BrokerProperties['LockToken']
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
_validate_not_none('sequence_number', sequence_number)
_validate_not_none('lock_token', lock_token)
request = HTTPRequest()
request.method = 'PUT'
request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
request.path = '/' + str(topic_name) + '/subscriptions/' + str(subscription_name) + '/messages/' + str(sequence_number) + '/' + str(lock_token) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
response = self._perform_request(request)
def read_delete_subscription_message(self, topic_name, subscription_name, timeout='60'):
'''
Read and delete a message from a subscription as an atomic operation. This
operation should be used when a best-effort guarantee is sufficient for an
application; that is, using this operation it is possible for messages to
be lost if processing fails.
topic_name: the name of the topic
subscription_name: the name of the subscription
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
request.path = '/' + str(topic_name) + '/subscriptions/' + str(subscription_name) + '/messages/head'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
response = self._perform_request(request)
return _create_message(response, self)
def delete_subscription_message(self, topic_name, subscription_name, sequence_number, lock_token):
'''
Completes processing on a locked message and delete it from the subscription.
This operation should only be called after processing a previously locked
message is successful to maintain At-Least-Once delivery assurances.
topic_name: the name of the topic
subscription_name: the name of the subscription
sequence_name: The sequence number of the message to be deleted as returned
in BrokerProperties['SequenceNumber'] by the Peek Message operation.
lock_token: The ID of the lock as returned by the Peek Message operation in
BrokerProperties['LockToken']
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
_validate_not_none('sequence_number', sequence_number)
_validate_not_none('lock_token', lock_token)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
request.path = '/' + str(topic_name) + '/subscriptions/' + str(subscription_name) + '/messages/' + str(sequence_number) + '/' + str(lock_token) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
response = self._perform_request(request)
def send_queue_message(self, queue_name, message=None):
'''
Sends a message into the specified queue. The limit to the number of messages
which may be present in the topic is governed by the message size the
MaxTopicSizeInMegaBytes. If this message will cause the queue to exceed its
quota, a quota exceeded error is returned and the message will be rejected.
queue_name: name of the queue
message: the Message object containing message body and properties.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'POST'
request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
request.path = '/' + str(queue_name) + '/messages'
request.headers = message.add_headers(request)
request.body = _get_request_body(message.body)
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
response = self._perform_request(request)
def peek_lock_queue_message(self, queue_name, timeout='60'):
'''
Automically retrieves and locks a message from a queue for processing. The
message is guaranteed not to be delivered to other receivers (on the same
subscription only) during the lock duration period specified in the queue
description. Once the lock expires, the message will be available to other
receivers. In order to complete processing of the message, the receiver
should issue a delete command with the lock ID received from this operation.
To abandon processing of the message and unlock it for other receivers,
an Unlock Message command should be issued, or the lock duration period
can expire.
queue_name: name of the queue
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'POST'
request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
request.path = '/' + str(queue_name) + '/messages/head'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
response = self._perform_request(request)
return _create_message(response, self)
def unlock_queue_message(self, queue_name, sequence_number, lock_token):
'''
Unlocks a message for processing by other receivers on a given subscription.
This operation deletes the lock object, causing the message to be unlocked.
A message must have first been locked by a receiver before this operation is
called.
queue_name: name of the queue
sequence_name: The sequence number of the message to be unlocked as returned
in BrokerProperties['SequenceNumber'] by the Peek Message operation.
lock_token: The ID of the lock as returned by the Peek Message operation in
BrokerProperties['LockToken']
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('sequence_number', sequence_number)
_validate_not_none('lock_token', lock_token)
request = HTTPRequest()
request.method = 'PUT'
request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
request.path = '/' + str(queue_name) + '/messages/' + str(sequence_number) + '/' + str(lock_token) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
response = self._perform_request(request)
def read_delete_queue_message(self, queue_name, timeout='60'):
'''
Reads and deletes a message from a queue as an atomic operation. This operation
should be used when a best-effort guarantee is sufficient for an application;
that is, using this operation it is possible for messages to be lost if
processing fails.
queue_name: name of the queue
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
request.path = '/' + str(queue_name) + '/messages/head'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
response = self._perform_request(request)
return _create_message(response, self)
def delete_queue_message(self, queue_name, sequence_number, lock_token):
'''
Completes processing on a locked message and delete it from the queue. This
operation should only be called after processing a previously locked message
is successful to maintain At-Least-Once delivery assurances.
queue_name: name of the queue
sequence_name: The sequence number of the message to be deleted as returned
in BrokerProperties['SequenceNumber'] by the Peek Message operation.
lock_token: The ID of the lock as returned by the Peek Message operation in
BrokerProperties['LockToken']
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('sequence_number', sequence_number)
_validate_not_none('lock_token', lock_token)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
request.path = '/' + str(queue_name) + '/messages/' + str(sequence_number) + '/' + str(lock_token) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
response = self._perform_request(request)
def receive_queue_message(self, queue_name, peek_lock=True, timeout=60):
if peek_lock:
return self.peek_lock_queue_message(queue_name, timeout)
else:
return self.read_delete_queue_message(queue_name, timeout)
def receive_subscription_message(self, topic_name, subscription_name, peek_lock=True, timeout=60):
if peek_lock:
return self.peek_lock_subscription_message(topic_name, subscription_name, timeout)
else:
return self.read_delete_subscription_message(topic_name, subscription_name, timeout)
def __init__(self, service_namespace=None, account_key=None, issuer=None, x_ms_version='2011-06-01'):
self.requestid = None
self.service_namespace = service_namespace
self.account_key = account_key
self.issuer = issuer
#get service namespace, account key and issuer. If they are set when constructing, then use them.
#else find them from environment variables.
if not service_namespace:
if os.environ.has_key(AZURE_SERVICEBUS_NAMESPACE):
self.service_namespace = os.environ[AZURE_SERVICEBUS_NAMESPACE]
if not account_key:
if os.environ.has_key(AZURE_SERVICEBUS_ACCESS_KEY):
self.account_key = os.environ[AZURE_SERVICEBUS_ACCESS_KEY]
if not issuer:
if os.environ.has_key(AZURE_SERVICEBUS_ISSUER):
self.issuer = os.environ[AZURE_SERVICEBUS_ISSUER]
if not self.service_namespace or not self.account_key or not self.issuer:
raise WindowsAzureError('You need to provide servicebus namespace, access key and Issuer')
self.x_ms_version = x_ms_version
self._httpclient = _HTTPClient(service_instance=self, service_namespace=service_namespace, account_key=account_key, issuer=issuer, x_ms_version=self.x_ms_version)
self._filter = self._httpclient.perform_request
def with_filter(self, filter):
'''Returns a new service which will process requests with the
specified filter. Filtering operations can include logging, automatic
retrying, etc... The filter is a lambda which receives the HTTPRequest
and another lambda. The filter can perform any pre-processing on the
request, pass it off to the next lambda, and then perform any post-processing
on the response.'''
res = ServiceBusService(self.service_namespace, self.account_key,
self.issuer, self.x_ms_version)
old_filter = self._filter
def new_filter(request):
return filter(request, old_filter)
res._filter = new_filter
return res
def _perform_request(self, request):
try:
resp = self._filter(request)
except HTTPError as e:
return _service_bus_error_handler(e)
if not resp:
return None
return resp

Просмотреть файл

@ -0,0 +1,705 @@
#-------------------------------------------------------------------------
# Copyright 2011 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import datetime
import base64
import hashlib
import hmac
import urllib2
from xml.dom import minidom
import types
from datetime import datetime
from azure import (_create_entry,
_get_entry_properties, _html_encode, WindowsAzureError,
_get_child_nodes, _get_child_nodesNS,
WindowsAzureConflictError,
WindowsAzureMissingResourceError, _list_of,
DEV_TABLE_HOST, TABLE_SERVICE_HOST_BASE, DEV_BLOB_HOST,
BLOB_SERVICE_HOST_BASE, DEV_QUEUE_HOST,
QUEUE_SERVICE_HOST_BASE, WindowsAzureData,
_get_children_from_path, xml_escape,
_ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY)
import azure
#x-ms-version for storage service.
X_MS_VERSION = '2011-08-18'
class EnumResultsBase:
''' base class for EnumResults. '''
def __init__(self):
self.prefix = ''
self.marker = ''
self.max_results = 0
self.next_marker = ''
class ContainerEnumResults(EnumResultsBase):
''' Blob Container list. '''
def __init__(self):
EnumResultsBase.__init__(self)
self.containers = _list_of(Container)
def __iter__(self):
return iter(self.containers)
def __len__(self):
return len(self.containers)
def __getitem__(self, index):
return self.containers[index]
class Container(WindowsAzureData):
''' Blob container class. '''
def __init__(self):
self.name = ''
self.url = ''
self.properties = Properties()
self.metadata = Metadata()
class Properties(WindowsAzureData):
''' Blob container's properties class. '''
def __init__(self):
self.last_modified = ''
self.etag = ''
class Metadata(WindowsAzureData):
''' Metadata class. '''
def __init__(self):
self.metadata_name = ''
class RetentionPolicy(WindowsAzureData):
''' RetentionPolicy in service properties. '''
def __init__(self):
self.enabled = False
self.__dict__['days'] = None
def get_days(self):
#convert days to int value
return int(self.__dict__['days'])
def set_days(self, value):
''' set default days if days is set to empty. '''
if value == '':
self.__dict__['days'] = 10
else:
self.__dict__['days'] = value
days = property(fget=get_days, fset=set_days)
class Logging(WindowsAzureData):
''' Logging class in service properties. '''
def __init__(self):
self.version = '1.0'
self.delete = False
self.read = False
self.write = False
self.retention_policy = RetentionPolicy()
class Metrics(WindowsAzureData):
''' Metrics class in service properties. '''
def __init__(self):
self.version = '1.0'
self.enabled = False
self.include_apis = None
self.retention_policy = RetentionPolicy()
class StorageServiceProperties(WindowsAzureData):
''' Storage Service Propeties class. '''
def __init__(self):
self.logging = Logging()
self.metrics = Metrics()
class AccessPolicy(WindowsAzureData):
''' Access Policy class in service properties. '''
def __init__(self):
self.start = ''
self.expiry = ''
self.permission = ''
class SignedIdentifier(WindowsAzureData):
''' Signed Identifier class for service properties. '''
def __init__(self):
self.id = ''
self.access_policy = AccessPolicy()
class SignedIdentifiers(WindowsAzureData):
''' SignedIdentifier list. '''
def __init__(self):
self.signed_identifiers = _list_of(SignedIdentifier)
def __iter__(self):
return self.signed_identifiers
class BlobEnumResults(EnumResultsBase):
''' Blob list.'''
def __init__(self):
EnumResultsBase.__init__(self)
self.blobs = _list_of(Blob)
def __iter__(self):
return iter(self.blobs)
def __len__(self):
return len(self.blobs)
def __getitem__(self, index):
return self.blobs[index]
class Blob(WindowsAzureData):
''' Blob class. '''
def __init__(self):
self.name = ''
self.snapshot = ''
self.url = ''
self.properties = BlobProperties()
self.metadata = Metadata()
self.blob_prefix = BlobPrefix()
class BlobProperties(WindowsAzureData):
''' Blob Properties '''
def __init__(self):
self.last_modified = ''
self.etag = ''
self.content_length = 0
self.content_type = ''
self.content_encoding = ''
self.content_language = ''
self.content_md5 = ''
self.xms_blob_sequence_number = 0
self.blob_type = ''
self.lease_status = ''
class BlobPrefix(WindowsAzureData):
''' BlobPrefix in Blob. '''
def __init__(self):
self.name = ''
class BlobBlock(WindowsAzureData):
''' BlobBlock class '''
def __init__(self, id=None, size=None):
self.id = id
self.size = size
class BlobBlockList(WindowsAzureData):
''' BlobBlockList class '''
def __init__(self):
self.committed_blocks = []
self.uncommitted_blocks = []
class BlockList(WindowsAzureData):
''' BlockList used to submit block list. '''
def __init__(self):
self.committed = []
self.uncommitted = []
self.latest = []
class PageRange(WindowsAzureData):
''' Page Range for page blob. '''
def __init__(self):
self.start = 0
self.end = 0
class PageList:
''' Page list for page blob. '''
def __init__(self):
self.page_ranges = _list_of(PageRange)
def __iter__(self):
return self.page_ranges
class QueueEnumResults(EnumResultsBase):
''' Queue list'''
def __init__(self):
EnumResultsBase.__init__(self)
self.queues = _list_of(Queue)
def __iter__(self):
return iter(self.queues)
def __len__(self):
return len(self.queues)
def __getitem__(self, index):
return self.queues[index]
class Queue(WindowsAzureData):
''' Queue class '''
def __init__(self):
self.name = ''
self.url = ''
self.metadata = Metadata()
class QueueMessagesList(WindowsAzureData):
''' Queue message list. '''
def __init__(self):
self.queue_messages = _list_of(QueueMessage)
def __iter__(self):
return iter(self.queue_messages)
def __len__(self):
return len(self.queue_messages)
def __getitem__(self, index):
return self.queue_messages[index]
class QueueMessage(WindowsAzureData):
''' Queue message class. '''
def __init__(self):
self.message_id = ''
self.insertion_time = ''
self.expiration_time = ''
self.pop_receipt = ''
self.time_next_visible = ''
self.dequeue_count = ''
self.message_text = ''
class TableEnumResult(EnumResultsBase):
def __init__():
EnumResultsBase.__init__(self)
self.tables = _list_of(Table)
def __iter__(self):
return iter(self.tables)
def __len__(self):
return len(self.tables)
def __getitem__(self, index):
return self.tables[index]
class Entity(WindowsAzureData):
''' Entity class. The attributes of entity will be created dynamically. '''
pass
class EntityProperty(WindowsAzureData):
''' Entity property. contains type and value. '''
def __init__(self, type=None, value=None):
self.type = type
self.value = value
class Table(WindowsAzureData):
''' Only for intellicens and telling user the return type. '''
pass
def _update_storage_header(request):
''' add addtional headers for storage request. '''
#if it is PUT, POST, MERGE, DELETE, need to add content-lengt to header.
if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']:
request.headers.append(('Content-Length', str(len(request.body))))
#append addtional headers base on the service
request.headers.append(('x-ms-version', X_MS_VERSION))
#append x-ms-meta name, values to header
for name, value in request.headers:
if 'x-ms-meta-name-values' in name and value:
for meta_name, meta_value in value.iteritems():
request.headers.append(('x-ms-meta-' + meta_name, meta_value))
request.headers.remove((name, value))
break
return request
def _update_storage_blob_header(request, account_name, account_key):
''' add additional headers for storage blob request. '''
request = _update_storage_header(request)
current_time = datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
request.headers.append(('x-ms-date', current_time))
request.headers.append(('Content-Type', 'application/octet-stream Charset=UTF-8'))
request.headers.append(('Authorization', _sign_storage_blob_request(request, account_name, account_key)))
return request.headers
def _update_storage_queue_header(request, account_name, account_key):
''' add additional headers for storage queue request. '''
return _update_storage_blob_header(request, account_name, account_key)
def _update_storage_table_header(request):
''' add additional headers for storage table request. '''
request = _update_storage_header(request)
for name, value in request.headers:
if name.lower() == 'content-type':
break;
else:
request.headers.append(('Content-Type', 'application/atom+xml'))
request.headers.append(('DataServiceVersion', '2.0;NetFx'))
request.headers.append(('MaxDataServiceVersion', '2.0;NetFx'))
current_time = datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
request.headers.append(('x-ms-date', current_time))
request.headers.append(('Date', current_time))
return request.headers
def _sign_storage_blob_request(request, account_name, account_key):
'''
Returns the signed string for blob request which is used to set Authorization header.
This is also used to sign queue request.
'''
uri_path = request.path.split('?')[0]
#method to sign
string_to_sign = request.method + '\n'
#get headers to sign
headers_to_sign = ['content-encoding', 'content-language', 'content-length',
'content-md5', 'content-type', 'date', 'if-modified-since',
'if-match', 'if-none-match', 'if-unmodified-since', 'range']
for header in headers_to_sign:
for name, value in request.headers:
if value and name.lower() == header:
string_to_sign += value + '\n'
break
else:
string_to_sign += '\n'
#get x-ms header to sign
x_ms_headers = []
for name, value in request.headers:
if 'x-ms' in name:
x_ms_headers.append((name.lower(), value))
x_ms_headers.sort()
for name, value in x_ms_headers:
if value:
string_to_sign += ''.join([name, ':', value, '\n'])
#get account_name and uri path to sign
string_to_sign += '/' + account_name + uri_path
#get query string to sign if it is not table service
query_to_sign = request.query
query_to_sign.sort()
current_name = ''
for name, value in query_to_sign:
if value:
if current_name != name:
string_to_sign += '\n' + name + ':' + value
else:
string_to_sign += '\n' + ',' + value
#sign the request
decode_account_key = base64.b64decode(account_key)
signed_hmac_sha256 = hmac.HMAC(decode_account_key, string_to_sign, hashlib.sha256)
auth_string = 'SharedKey ' + account_name + ':' + base64.b64encode(signed_hmac_sha256.digest())
return auth_string
def _sign_storage_table_request(request, account_name, account_key):
uri_path = request.path.split('?')[0]
string_to_sign = request.method + '\n'
headers_to_sign = ['content-md5', 'content-type', 'date']
for header in headers_to_sign:
for name, value in request.headers:
if value and name.lower() == header:
string_to_sign += value + '\n'
break
else:
string_to_sign += '\n'
#get account_name and uri path to sign
string_to_sign += ''.join(['/', account_name, uri_path])
for name, value in request.query:
if name == 'comp' and uri_path == '/':
string_to_sign += '?comp=' + value
break
#sign the request
decode_account_key = base64.b64decode(account_key)
signed_hmac_sha256 = hmac.HMAC(decode_account_key, string_to_sign, hashlib.sha256)
auth_string = 'SharedKey ' + account_name + ':' + base64.b64encode(signed_hmac_sha256.digest())
return auth_string
def _to_python_bool(value):
if value.lower() == 'true':
return True
return False
def _to_entity_int(data):
return 'Edm.Int32', str(data)
def _to_entity_bool(value):
if value:
return 'Edm.Boolean', 'true'
return 'Edm.Boolean', 'false'
def _to_entity_datetime(value):
return 'Edm.DateTime', value.strftime('%Y-%m-%dT%H:%M:%S')
def _to_entity_float(value):
return 'Edm.Double', str(value)
def _to_entity_property(value):
return value.type, str(value.value)
def _to_entity_none(value):
return '', ''
def _to_entity_str(value):
return 'Edm.String', value
# Tables of conversions to and from entity types. We support specific
# datatypes, and beyond that the user can use an EntityProperty to get
# custom data type support.
def _from_entity_int(value):
return int(value)
def _from_entity_datetime(value):
return datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
_ENTITY_TO_PYTHON_CONVERSIONS = {
'Edm.Int32': _from_entity_int,
'Edm.Int64': _from_entity_int,
'Edm.Double': float,
'Edm.Boolean': _to_python_bool,
'Edm.DateTime': _from_entity_datetime,
}
# Conversion from Python type to a function which returns a tuple of the
# type string and content string.
_PYTHON_TO_ENTITY_CONVERSIONS = {
int: _to_entity_int,
long: _to_entity_int,
bool: _to_entity_bool,
datetime: _to_entity_datetime,
float: _to_entity_float,
EntityProperty: _to_entity_property,
types.NoneType: _to_entity_none,
str: _to_entity_str,
unicode: _to_entity_str,
}
def convert_entity_to_xml(source):
''' Converts an entity object to xml to send.
The entity format is:
<entry xmlns:d="http://schemas.microsoft.com/ado/2007/08/dataservices" xmlns:m="http://schemas.microsoft.com/ado/2007/08/dataservices/metadata" xmlns="http://www.w3.org/2005/Atom">
<title />
<updated>2008-09-18T23:46:19.3857256Z</updated>
<author>
<name />
</author>
<id />
<content type="application/xml">
<m:properties>
<d:Address>Mountain View</d:Address>
<d:Age m:type="Edm.Int32">23</d:Age>
<d:AmountDue m:type="Edm.Double">200.23</d:AmountDue>
<d:BinaryData m:type="Edm.Binary" m:null="true" />
<d:CustomerCode m:type="Edm.Guid">c9da6455-213d-42c9-9a79-3e9149a57833</d:CustomerCode>
<d:CustomerSince m:type="Edm.DateTime">2008-07-10T00:00:00</d:CustomerSince>
<d:IsActive m:type="Edm.Boolean">true</d:IsActive>
<d:NumOfOrders m:type="Edm.Int64">255</d:NumOfOrders>
<d:PartitionKey>mypartitionkey</d:PartitionKey>
<d:RowKey>myrowkey1</d:RowKey>
<d:Timestamp m:type="Edm.DateTime">0001-01-01T00:00:00</d:Timestamp>
</m:properties>
</content>
</entry>
'''
#construct the entity body included in <m:properties> and </m:properties>
entity_body = '<m:properties>{properties}</m:properties>'
if isinstance(source, WindowsAzureData):
source = vars(source)
properties_str = ''
#set properties type for types we know if value has no type info.
#if value has type info, then set the type to value.type
for name, value in source.iteritems():
mtype = ''
conv = _PYTHON_TO_ENTITY_CONVERSIONS.get(type(value))
if conv is None:
raise WindowsAzureError(_ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY % type(value).__name__)
mtype, value = conv(value)
#form the property node
properties_str += ''.join(['<d:', name])
if mtype:
properties_str += ''.join([' m:type="', mtype, '"'])
properties_str += ''.join(['>', xml_escape(value), '</d:', name, '>'])
#generate the entity_body
entity_body = entity_body.format(properties=properties_str)
xmlstr = _create_entry(entity_body)
return xmlstr
def convert_table_to_xml(table_name):
'''
Create xml to send for a given table name. Since xml format for table is
the same as entity and the only difference is that table has only one
property 'TableName', so we just call convert_entity_to_xml.
table_name: the name of the table
'''
return convert_entity_to_xml({'TableName': table_name})
def convert_block_list_to_xml(block_id_list):
'''
Convert a block list to xml to send.
block_id_list: a str list containing the block ids that are used in put_block_list.
Only get block from latest blocks.
'''
if block_id_list is None:
return ''
xml = '<?xml version="1.0" encoding="utf-8"?><BlockList>'
for value in block_id_list:
xml += '<Latest>%s</Latest>' % base64.b64encode(value)
return xml+'</BlockList>'
def convert_response_to_block_list(response):
'''
Converts xml response to block list class.
'''
blob_block_list = BlobBlockList()
xmldoc = minidom.parseString(response.body)
for xml_block in _get_children_from_path(xmldoc, 'BlockList', 'CommittedBlocks', 'Block'):
xml_block_id = base64.b64decode(_get_child_nodes(xml_block, 'Name')[0].firstChild.nodeValue)
xml_block_size = int(_get_child_nodes(xml_block, 'Size')[0].firstChild.nodeValue)
blob_block_list.committed_blocks.append(BlobBlock(xml_block_id, xml_block_size))
for xml_block in _get_children_from_path(xmldoc, 'BlockList', 'UncommittedBlocks', 'Block'):
xml_block_id = base64.b64decode(_get_child_nodes(xml_block, 'Name')[0].firstChild.nodeValue)
xml_block_size = int(_get_child_nodes(xml_block, 'Size')[0].firstChild.nodeValue)
blob_block_list.uncommitted_blocks.append(BlobBlock(xml_block_id, xml_block_size))
return blob_block_list
def _remove_prefix(name):
colon = name.find(':')
if colon != -1:
return name[colon + 1:]
return name
METADATA_NS = 'http://schemas.microsoft.com/ado/2007/08/dataservices/metadata'
def _convert_response_to_entity(response):
return _convert_xml_to_entity(response.body)
def _convert_xml_to_entity(xmlstr):
''' Convert xml response to entity.
The format of entity:
<entry xmlns:d="http://schemas.microsoft.com/ado/2007/08/dataservices" xmlns:m="http://schemas.microsoft.com/ado/2007/08/dataservices/metadata" xmlns="http://www.w3.org/2005/Atom">
<title />
<updated>2008-09-18T23:46:19.3857256Z</updated>
<author>
<name />
</author>
<id />
<content type="application/xml">
<m:properties>
<d:Address>Mountain View</d:Address>
<d:Age m:type="Edm.Int32">23</d:Age>
<d:AmountDue m:type="Edm.Double">200.23</d:AmountDue>
<d:BinaryData m:type="Edm.Binary" m:null="true" />
<d:CustomerCode m:type="Edm.Guid">c9da6455-213d-42c9-9a79-3e9149a57833</d:CustomerCode>
<d:CustomerSince m:type="Edm.DateTime">2008-07-10T00:00:00</d:CustomerSince>
<d:IsActive m:type="Edm.Boolean">true</d:IsActive>
<d:NumOfOrders m:type="Edm.Int64">255</d:NumOfOrders>
<d:PartitionKey>mypartitionkey</d:PartitionKey>
<d:RowKey>myrowkey1</d:RowKey>
<d:Timestamp m:type="Edm.DateTime">0001-01-01T00:00:00</d:Timestamp>
</m:properties>
</content>
</entry>
'''
xmldoc = minidom.parseString(xmlstr)
xml_properties = None
for entry in _get_child_nodes(xmldoc, 'entry'):
for content in _get_child_nodes(entry, 'content'):
xml_properties = _get_child_nodesNS(content, METADATA_NS, 'properties') # TODO: Namespace
if not xml_properties:
return None
entity = Entity()
#extract each property node and get the type from attribute and node value
for xml_property in xml_properties[0].childNodes:
if xml_property.firstChild:
name = _remove_prefix(xml_property.nodeName)
#exclude the Timestamp since it is auto added by azure when inserting
#entity. We don't want this to mix with real properties
if name in ['Timestamp']:
continue
value = xml_property.firstChild.nodeValue
isnull = xml_property.getAttributeNS(METADATA_NS, 'null')
mtype = xml_property.getAttributeNS(METADATA_NS, 'type')
#if not isnull and no type info, then it is a string and we just need the str type to hold the property.
if not isnull and not mtype:
setattr(entity, name, value)
else: #need an object to hold the property
conv = _ENTITY_TO_PYTHON_CONVERSIONS.get(mtype)
if conv is not None:
property = conv(value)
else:
property = EntityProperty()
setattr(property, 'value', value)
if isnull:
property.isnull = str(isnull)
if mtype:
property.type = str(mtype)
setattr(entity, name, property)
return entity
def _convert_xml_to_table(xmlstr):
''' Converts the xml response to table class
Simply call convert_xml_to_entity and extract the table name, and add updated and author info
'''
table = Table()
entity = _convert_xml_to_entity(xmlstr)
setattr(table, 'name', entity.TableName)
for name, value in _get_entry_properties(xmlstr, False).iteritems():
setattr(table, name, value)
return table
def _storage_error_handler(http_error):
''' Simple error handler for storage service. Will add more specific cases '''
if http_error.status == 409:
raise WindowsAzureConflictError(azure._ERROR_CONFLICT)
elif http_error.status == 404:
raise WindowsAzureMissingResourceError(azure._ERROR_NOT_FOUND)
else:
raise WindowsAzureError(azure._ERROR_UNKNOWN % http_error.message)
# make these available just from storage.
from blobservice import BlobService
from queueservice import QueueService
from tableservice import TableService
from cloudstorageaccount import CloudStorageAccount
from sharedaccesssignature import SharedAccessSignature, SharedAccessPolicy, Permission, WebResource

Просмотреть файл

@ -0,0 +1,748 @@
#-------------------------------------------------------------------------
# Copyright 2011 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import base64
import os
import urllib2
from azure.storage import *
from azure.storage.storageclient import _StorageClient
from azure.storage import (_update_storage_blob_header,
convert_block_list_to_xml, convert_response_to_block_list)
from azure.http import HTTPRequest
from azure import (_validate_not_none, Feed,
_convert_response_to_feeds, _str_or_none, _int_or_none,
_get_request_body, _update_request_uri_query,
_dont_fail_on_exist, _dont_fail_not_exist,
WindowsAzureError, _parse_response, _convert_class_to_xml,
_parse_response_for_dict, _parse_response_for_dict_prefix,
_parse_response_for_dict_filter,
_parse_enum_results_list, _update_request_uri_query_local_storage,
_get_table_host, _get_queue_host, _get_blob_host,
_parse_simple_list, SERVICE_BUS_HOST_BASE, xml_escape)
class BlobService(_StorageClient):
'''
This is the main class managing Blob resources.
account_name: your storage account name, required for all operations.
account_key: your storage account key, required for all operations.
'''
def list_containers(self, prefix=None, marker=None, maxresults=None, include=None):
'''
The List Containers operation returns a list of the containers under the specified account.
prefix: Optional. Filters the results to return only containers whose names begin with
the specified prefix.
marker: Optional. A string value that identifies the portion of the list to be returned
with the next list operation.
maxresults: Optional. Specifies the maximum number of containers to return.
include: Optional. Include this parameter to specify that the container's metadata be
returned as part of the response body.
'''
request = HTTPRequest()
request.method = 'GET'
request.host = _get_blob_host(self.account_name, self.use_local_storage)
request.path = '/?comp=list'
request.query = [
('prefix', _str_or_none(prefix)),
('marker', _str_or_none(marker)),
('maxresults', _int_or_none(maxresults)),
('include', _str_or_none(include))
]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_enum_results_list(response, ContainerEnumResults, "Containers", Container)
def create_container(self, container_name, x_ms_meta_name_values=None, x_ms_blob_public_access=None, fail_on_exist=False):
'''
Creates a new container under the specified account. If the container with the same name
already exists, the operation fails.
x_ms_meta_name_values: Optional. A dict with name_value pairs to associate with the
container as metadata. Example:{'Category':'test'}
x_ms_blob_public_access: Optional. Possible values include: container, blob.
fail_on_exist: specify whether to throw an exception when the container exists.
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = _get_blob_host(self.account_name, self.use_local_storage)
request.path = '/' + str(container_name) + '?restype=container'
request.headers = [
('x-ms-meta-name-values', x_ms_meta_name_values),
('x-ms-blob-public-access', _str_or_none(x_ms_blob_public_access))
]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
if not fail_on_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as e:
_dont_fail_on_exist(e)
return False
else:
self._perform_request(request)
return True
def get_container_properties(self, container_name):
'''
Returns all user-defined metadata and system properties for the specified container.
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'GET'
request.host = _get_blob_host(self.account_name, self.use_local_storage)
request.path = '/' + str(container_name) + '?restype=container'
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response_for_dict(response)
def get_container_metadata(self, container_name):
'''
Returns all user-defined metadata for the specified container. The metadata will be
in returned dictionary['x-ms-meta-(name)'].
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'GET'
request.host = _get_blob_host(self.account_name, self.use_local_storage)
request.path = '/' + str(container_name) + '?restype=container&comp=metadata'
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response_for_dict(response)
def set_container_metadata(self, container_name, x_ms_meta_name_values=None):
'''
Sets one or more user-defined name-value pairs for the specified container.
x_ms_meta_name_values: A dict containing name, value for metadata. Example: {'category':'test'}
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = _get_blob_host(self.account_name, self.use_local_storage)
request.path = '/' + str(container_name) + '?restype=container&comp=metadata'
request.headers = [('x-ms-meta-name-values', x_ms_meta_name_values)]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
def get_container_acl(self, container_name):
'''
Gets the permissions for the specified container.
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'GET'
request.host = _get_blob_host(self.account_name, self.use_local_storage)
request.path = '/' + str(container_name) + '?restype=container&comp=acl'
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response(response, SignedIdentifiers)
def set_container_acl(self, container_name, signed_identifiers=None, x_ms_blob_public_access=None):
'''
Sets the permissions for the specified container.
x_ms_blob_public_access: Optional. Possible values include 'container' and 'blob'.
signed_identifiers: SignedIdentifers instance
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = _get_blob_host(self.account_name, self.use_local_storage)
request.path = '/' + str(container_name) + '?restype=container&comp=acl'
request.headers = [('x-ms-blob-public-access', _str_or_none(x_ms_blob_public_access))]
request.body = _get_request_body(_convert_class_to_xml(signed_identifiers))
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
def delete_container(self, container_name, fail_not_exist=False):
'''
Marks the specified container for deletion.
fail_not_exist: specify whether to throw an exception when the container doesn't exist.
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = _get_blob_host(self.account_name, self.use_local_storage)
request.path = '/' + str(container_name) + '?restype=container'
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
if not fail_not_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as e:
_dont_fail_not_exist(e)
return False
else:
self._perform_request(request)
return True
def list_blobs(self, container_name, prefix=None, marker=None, maxresults=None, include=None):
'''
Returns the list of blobs under the specified container.
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'GET'
request.host = _get_blob_host(self.account_name, self.use_local_storage)
request.path = '/' + str(container_name) + '?restype=container&comp=list'
request.query = [
('prefix', _str_or_none(prefix)),
('marker', _str_or_none(marker)),
('maxresults', _int_or_none(maxresults)),
('include', _str_or_none(include))
]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_enum_results_list(response, BlobEnumResults, "Blobs", Blob)
def set_blob_service_properties(self, storage_service_properties, timeout=None):
'''
Sets the properties of a storage account's Blob service, including Windows Azure
Storage Analytics. You can also use this operation to set the default request
version for all incoming requests that do not have a version specified.
storage_service_properties: a StorageServiceProperties object.
timeout: Optional. The timeout parameter is expressed in seconds. For example, the
following value sets a timeout of 30 seconds for the request: timeout=30.
'''
_validate_not_none('storage_service_properties', storage_service_properties)
request = HTTPRequest()
request.method = 'PUT'
request.host = _get_blob_host(self.account_name, self.use_local_storage)
request.path = '/?restype=service&comp=properties'
request.query = [('timeout', _int_or_none(timeout))]
request.body = _get_request_body(_convert_class_to_xml(storage_service_properties))
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
def get_blob_service_properties(self, timeout=None):
'''
Gets the properties of a storage account's Blob service, including Windows Azure
Storage Analytics.
timeout: Optional. The timeout parameter is expressed in seconds. For example, the
following value sets a timeout of 30 seconds for the request: timeout=30.
'''
request = HTTPRequest()
request.method = 'GET'
request.host = _get_blob_host(self.account_name, self.use_local_storage)
request.path = '/?restype=service&comp=properties'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response(response, StorageServiceProperties)
def get_blob_properties(self, container_name, blob_name, x_ms_lease_id=None):
'''
Returns all user-defined metadata, standard HTTP properties, and system properties for the blob.
x_ms_lease_id: Required if the blob has an active lease.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'HEAD'
request.host = _get_blob_host(self.account_name, self.use_local_storage)
request.path = '/' + str(container_name) + '/' + str(blob_name) + ''
request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response_for_dict(response)
def set_blob_properties(self, container_name, blob_name, x_ms_blob_cache_control=None, x_ms_blob_content_type=None, x_ms_blob_content_md5=None, x_ms_blob_content_encoding=None, x_ms_blob_content_language=None, x_ms_lease_id=None):
'''
Sets system properties on the blob.
x_ms_blob_cache_control: Optional. Modifies the cache control string for the blob.
x_ms_blob_content_type: Optional. Sets the blob's content type.
x_ms_blob_content_md5: Optional. Sets the blob's MD5 hash.
x_ms_blob_content_encoding: Optional. Sets the blob's content encoding.
x_ms_blob_content_language: Optional. Sets the blob's content language.
x_ms_lease_id: Required if the blob has an active lease.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = _get_blob_host(self.account_name, self.use_local_storage)
request.path = '/' + str(container_name) + '/' + str(blob_name) + '?comp=properties'
request.headers = [
('x-ms-blob-cache-control', _str_or_none(x_ms_blob_cache_control)),
('x-ms-blob-content-type', _str_or_none(x_ms_blob_content_type)),
('x-ms-blob-content-md5', _str_or_none(x_ms_blob_content_md5)),
('x-ms-blob-content-encoding', _str_or_none(x_ms_blob_content_encoding)),
('x-ms-blob-content-language', _str_or_none(x_ms_blob_content_language)),
('x-ms-lease-id', _str_or_none(x_ms_lease_id))
]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
def put_blob(self, container_name, blob_name, blob, x_ms_blob_type, content_encoding=None, content_language=None, content_m_d5=None, cache_control=None, x_ms_blob_content_type=None, x_ms_blob_content_encoding=None, x_ms_blob_content_language=None, x_ms_blob_content_md5=None, x_ms_blob_cache_control=None, x_ms_meta_name_values=None, x_ms_lease_id=None, x_ms_blob_content_length=None, x_ms_blob_sequence_number=None):
'''
Creates a new block blob or page blob, or updates the content of an existing block blob.
container_name: the name of container to put the blob
blob_name: the name of blob
x_ms_blob_type: Required. Could be BlockBlob or PageBlob
x_ms_meta_name_values: A dict containing name, value for metadata.
x_ms_lease_id: Required if the blob has an active lease.
blob: the content of blob.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('blob', blob)
_validate_not_none('x_ms_blob_type', x_ms_blob_type)
request = HTTPRequest()
request.method = 'PUT'
request.host = _get_blob_host(self.account_name, self.use_local_storage)
request.path = '/' + str(container_name) + '/' + str(blob_name) + ''
request.headers = [
('x-ms-blob-type', _str_or_none(x_ms_blob_type)),
('Content-Encoding', _str_or_none(content_encoding)),
('Content-Language', _str_or_none(content_language)),
('Content-MD5', _str_or_none(content_m_d5)),
('Cache-Control', _str_or_none(cache_control)),
('x-ms-blob-content-type', _str_or_none(x_ms_blob_content_type)),
('x-ms-blob-content-encoding', _str_or_none(x_ms_blob_content_encoding)),
('x-ms-blob-content-language', _str_or_none(x_ms_blob_content_language)),
('x-ms-blob-content-md5', _str_or_none(x_ms_blob_content_md5)),
('x-ms-blob-cache-control', _str_or_none(x_ms_blob_cache_control)),
('x-ms-meta-name-values', x_ms_meta_name_values),
('x-ms-lease-id', _str_or_none(x_ms_lease_id)),
('x-ms-blob-content-length', _str_or_none(x_ms_blob_content_length)),
('x-ms-blob-sequence-number', _str_or_none(x_ms_blob_sequence_number))
]
request.body = _get_request_body(blob)
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
def get_blob(self, container_name, blob_name, snapshot=None, x_ms_range=None, x_ms_lease_id=None, x_ms_range_get_content_md5=None):
'''
Reads or downloads a blob from the system, including its metadata and properties.
container_name: the name of container to get the blob
blob_name: the name of blob
x_ms_range: Optional. Return only the bytes of the blob in the specified range.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'GET'
request.host = _get_blob_host(self.account_name, self.use_local_storage)
request.path = '/' + str(container_name) + '/' + str(blob_name) + ''
request.headers = [
('x-ms-range', _str_or_none(x_ms_range)),
('x-ms-lease-id', _str_or_none(x_ms_lease_id)),
('x-ms-range-get-content-md5', _str_or_none(x_ms_range_get_content_md5))
]
request.query = [('snapshot', _str_or_none(snapshot))]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
return response.body
def get_blob_metadata(self, container_name, blob_name, snapshot=None, x_ms_lease_id=None):
'''
Returns all user-defined metadata for the specified blob or snapshot.
container_name: the name of container containing the blob.
blob_name: the name of blob to get metadata.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'GET'
request.host = _get_blob_host(self.account_name, self.use_local_storage)
request.path = '/' + str(container_name) + '/' + str(blob_name) + '?comp=metadata'
request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]
request.query = [('snapshot', _str_or_none(snapshot))]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response_for_dict_prefix(response, prefix='x-ms-meta')
def set_blob_metadata(self, container_name, blob_name, x_ms_meta_name_values=None, x_ms_lease_id=None):
'''
Sets user-defined metadata for the specified blob as one or more name-value pairs.
container_name: the name of container containing the blob
blob_name: the name of blob
x_ms_meta_name_values: Dict containing name and value pairs.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = _get_blob_host(self.account_name, self.use_local_storage)
request.path = '/' + str(container_name) + '/' + str(blob_name) + '?comp=metadata'
request.headers = [
('x-ms-meta-name-values', x_ms_meta_name_values),
('x-ms-lease-id', _str_or_none(x_ms_lease_id))
]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
def lease_blob(self, container_name, blob_name, x_ms_lease_action, x_ms_lease_id=None):
'''
Establishes and manages a one-minute lock on a blob for write operations.
container_name: the name of container.
blob_name: the name of blob
x_ms_lease_id: Any GUID format string
x_ms_lease_action: Required. Possible values: acquire|renew|release|break
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('x_ms_lease_action', x_ms_lease_action)
request = HTTPRequest()
request.method = 'PUT'
request.host = _get_blob_host(self.account_name, self.use_local_storage)
request.path = '/' + str(container_name) + '/' + str(blob_name) + '?comp=lease'
request.headers = [
('x-ms-lease-id', _str_or_none(x_ms_lease_id)),
('x-ms-lease-action', _str_or_none(x_ms_lease_action))
]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response_for_dict_filter(response, filter=['x-ms-lease-id'])
def snapshot_blob(self, container_name, blob_name, x_ms_meta_name_values=None, if_modified_since=None, if_unmodified_since=None, if_match=None, if_none_match=None, x_ms_lease_id=None):
'''
Creates a read-only snapshot of a blob.
container_name: the name of container.
blob_name: the name of blob
x_ms_meta_name_values: Optional. Dict containing name and value pairs.
if_modified_since: Optional. Datetime string.
if_unmodified_since: DateTime string.
if_match: Optional. snapshot the blob only if its ETag value matches the value specified.
if_none_match: Optional. An ETag value
x_ms_lease_id: Optional. If this header is specified, the operation will be performed
only if both of the following conditions are met.
1. The blob's lease is currently active
2. The lease ID specified in the request matches that of the blob.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = _get_blob_host(self.account_name, self.use_local_storage)
request.path = '/' + str(container_name) + '/' + str(blob_name) + '?comp=snapshot'
request.headers = [
('x-ms-meta-name-values', x_ms_meta_name_values),
('If-Modified-Since', _str_or_none(if_modified_since)),
('If-Unmodified-Since', _str_or_none(if_unmodified_since)),
('If-Match', _str_or_none(if_match)),
('If-None-Match', _str_or_none(if_none_match)),
('x-ms-lease-id', _str_or_none(x_ms_lease_id))
]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
def copy_blob(self, container_name, blob_name, x_ms_copy_source, x_ms_meta_name_values=None, x_ms_source_if_modified_since=None, x_ms_source_if_unmodified_since=None, x_ms_source_if_match=None, x_ms_source_if_none_match=None, if_modified_since=None, if_unmodified_since=None, if_match=None, if_none_match=None, x_ms_lease_id=None, x_ms_source_lease_id=None):
'''
Copies a blob to a destination within the storage account.
container_name: the name of container.
blob_name: the name of blob
x_ms_copy_source: the blob to be copied. Should be absolute path format.
x_ms_meta_name_values: Optional. Dict containing name and value pairs.
x_ms_source_if_modified_since: Optional. An ETag value. Specify this conditional
header to copy the source blob only if its ETag matches the value specified.
x_ms_source_if_unmodified_since: Optional. An ETag value. Specify this conditional
header to copy the blob only if its ETag does not match the value specified.
x_ms_source_if_match: Optional. A DateTime value. Specify this conditional header to copy
the blob only if the source blob has been modified since the specified date/time.
x_ms_source_if_none_match: Optional. An ETag value. Specify this conditional header to
copy the source blob only if its ETag matches the value specified.
if_modified_since: Optional. Datetime string.
if_unmodified_since: DateTime string.
if_match: Optional. snapshot the blob only if its ETag value matches the value specified.
if_none_match: Optional. An ETag value
x_ms_lease_id: Optional. If this header is specified, the operation will be performed
only if both of the following conditions are met.
1. The blob's lease is currently active
2. The lease ID specified in the request matches that of the blob.
x-ms-meta-name-values: a dict containing name, value for metadata.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('x_ms_copy_source', x_ms_copy_source)
request = HTTPRequest()
request.method = 'PUT'
request.host = _get_blob_host(self.account_name, self.use_local_storage)
request.path = '/' + str(container_name) + '/' + str(blob_name) + ''
request.headers = [
('x-ms-copy-source', _str_or_none(x_ms_copy_source)),
('x-ms-meta-name-values', x_ms_meta_name_values),
('x-ms-source-if-modified-since', _str_or_none(x_ms_source_if_modified_since)),
('x-ms-source-if-unmodified-since', _str_or_none(x_ms_source_if_unmodified_since)),
('x-ms-source-if-match', _str_or_none(x_ms_source_if_match)),
('x-ms-source-if-none-match', _str_or_none(x_ms_source_if_none_match)),
('If-Modified-Since', _str_or_none(if_modified_since)),
('If-Unmodified-Since', _str_or_none(if_unmodified_since)),
('If-Match', _str_or_none(if_match)),
('If-None-Match', _str_or_none(if_none_match)),
('x-ms-lease-id', _str_or_none(x_ms_lease_id)),
('x-ms-source-lease-id', _str_or_none(x_ms_source_lease_id))
]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
def delete_blob(self, container_name, blob_name, snapshot=None, x_ms_lease_id=None):
'''
Marks the specified blob or snapshot for deletion. The blob is later deleted
during garbage collection.
To mark a specific snapshot for deletion provide the date/time of the snapshot via
the snapshot parameter.
container_name: the name of container.
blob_name: the name of blob
x_ms_lease_id: Optional. If this header is specified, the operation will be performed
only if both of the following conditions are met.
1. The blob's lease is currently active
2. The lease ID specified in the request matches that of the blob.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = _get_blob_host(self.account_name, self.use_local_storage)
request.path = '/' + str(container_name) + '/' + str(blob_name) + ''
request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]
request.query = [('snapshot', _str_or_none(snapshot))]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
def put_block(self, container_name, blob_name, block, blockid, content_m_d5=None, x_ms_lease_id=None):
'''
Creates a new block to be committed as part of a blob.
container_name: the name of the container.
blob_name: the name of the blob
content_md5: Optional. An MD5 hash of the block content. This hash is used to verify
the integrity of the blob during transport. When this header is specified,
the storage service checks the hash that has arrived with the one that was sent.
x_ms_lease_id: Required if the blob has an active lease. To perform this operation on
a blob with an active lease, specify the valid lease ID for this header.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('block', block)
_validate_not_none('blockid', blockid)
request = HTTPRequest()
request.method = 'PUT'
request.host = _get_blob_host(self.account_name, self.use_local_storage)
request.path = '/' + str(container_name) + '/' + str(blob_name) + '?comp=block'
request.headers = [
('Content-MD5', _str_or_none(content_m_d5)),
('x-ms-lease-id', _str_or_none(x_ms_lease_id))
]
request.query = [('blockid', base64.b64encode(_str_or_none(blockid)))]
request.body = _get_request_body(block)
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
def put_block_list(self, container_name, blob_name, block_list, content_m_d5=None, x_ms_blob_cache_control=None, x_ms_blob_content_type=None, x_ms_blob_content_encoding=None, x_ms_blob_content_language=None, x_ms_blob_content_md5=None, x_ms_meta_name_values=None, x_ms_lease_id=None):
'''
Writes a blob by specifying the list of block IDs that make up the blob. In order to
be written as part of a blob, a block must have been successfully written to the server
in a prior Put Block (REST API) operation.
container_name: the name of container.
blob_name: the name of blob
x_ms_meta_name_values: Optional. Dict containing name and value pairs.
x_ms_blob_cache_control: Optional. Sets the blob's cache control. If specified, this
property is stored with the blob and returned with a read request.
x_ms_blob_content_type: Optional. Sets the blob's content type. If specified, this
property is stored with the blob and returned with a read request.
x_ms_blob_content_encoding: Optional. Sets the blob's content encoding. If specified,
this property is stored with the blob and returned with a read request.
x_ms_blob_content_language: Optional. Set the blob's content language. If specified,
this property is stored with the blob and returned with a read request.
x_ms_blob_content_md5: Optional. An MD5 hash of the blob content. Note that this hash
is not validated, as the hashes for the individual blocks were validated when
each was uploaded.
content_md5: Optional. An MD5 hash of the block content. This hash is used to verify
the integrity of the blob during transport. When this header is specified,
the storage service checks the hash that has arrived with the one that was sent.
x_ms_lease_id: Required if the blob has an active lease. To perform this operation on
a blob with an active lease, specify the valid lease ID for this header.
x-ms-meta-name-values: a dict containing name, value for metadata.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('block_list', block_list)
request = HTTPRequest()
request.method = 'PUT'
request.host = _get_blob_host(self.account_name, self.use_local_storage)
request.path = '/' + str(container_name) + '/' + str(blob_name) + '?comp=blocklist'
request.headers = [
('Content-MD5', _str_or_none(content_m_d5)),
('x-ms-blob-cache-control', _str_or_none(x_ms_blob_cache_control)),
('x-ms-blob-content-type', _str_or_none(x_ms_blob_content_type)),
('x-ms-blob-content-encoding', _str_or_none(x_ms_blob_content_encoding)),
('x-ms-blob-content-language', _str_or_none(x_ms_blob_content_language)),
('x-ms-blob-content-md5', _str_or_none(x_ms_blob_content_md5)),
('x-ms-meta-name-values', x_ms_meta_name_values),
('x-ms-lease-id', _str_or_none(x_ms_lease_id))
]
request.body = _get_request_body(convert_block_list_to_xml(block_list))
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
def get_block_list(self, container_name, blob_name, snapshot=None, blocklisttype=None, x_ms_lease_id=None):
'''
Retrieves the list of blocks that have been uploaded as part of a block blob.
container_name: the name of container.
blob_name: the name of blob
snapshot: Optional. Datetime to determine the time to retrieve the blocks.
blocklisttype: Specifies whether to return the list of committed blocks, the
list of uncommitted blocks, or both lists together. Valid values are
committed, uncommitted, or all.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'GET'
request.host = _get_blob_host(self.account_name, self.use_local_storage)
request.path = '/' + str(container_name) + '/' + str(blob_name) + '?comp=blocklist'
request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]
request.query = [
('snapshot', _str_or_none(snapshot)),
('blocklisttype', _str_or_none(blocklisttype))
]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
return convert_response_to_block_list(response)
def put_page(self, container_name, blob_name, page, x_ms_range, x_ms_page_write, timeout=None, content_m_d5=None, x_ms_lease_id=None, x_ms_if_sequence_number_lte=None, x_ms_if_sequence_number_lt=None, x_ms_if_sequence_number_eq=None, if_modified_since=None, if_unmodified_since=None, if_match=None, if_none_match=None):
'''
Writes a range of pages to a page blob.
container_name: the name of container.
blob_name: the name of blob
timeout: the timeout parameter is expressed in seconds.
x_ms_range: Required. Specifies the range of bytes to be written as a page. Both the start
and end of the range must be specified. Must be in format: bytes=startByte-endByte.
Given that pages must be aligned with 512-byte boundaries, the start offset must be
a modulus of 512 and the end offset must be a modulus of 512-1. Examples of valid
byte ranges are 0-511, 512-1023, etc.
x_ms_page_write: Required. You may specify one of the following options :
1. update(lower case): Writes the bytes specified by the request body into the specified
range. The Range and Content-Length headers must match to perform the update.
2. clear(lower case): Clears the specified range and releases the space used in storage
for that range. To clear a range, set the Content-Length header to zero, and the Range
header to a value that indicates the range to clear, up to maximum blob size.
x_ms_lease_id: Required if the blob has an active lease. To perform this operation on a blob
with an active lease, specify the valid lease ID for this header.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('page', page)
_validate_not_none('x_ms_range', x_ms_range)
_validate_not_none('x_ms_page_write', x_ms_page_write)
request = HTTPRequest()
request.method = 'PUT'
request.host = _get_blob_host(self.account_name, self.use_local_storage)
request.path = '/' + str(container_name) + '/' + str(blob_name) + '?comp=page'
request.headers = [
('x-ms-range', _str_or_none(x_ms_range)),
('Content-MD5', _str_or_none(content_m_d5)),
('x-ms-page-write', _str_or_none(x_ms_page_write)),
('x-ms-lease-id', _str_or_none(x_ms_lease_id)),
('x-ms-if-sequence-number-lte', _str_or_none(x_ms_if_sequence_number_lte)),
('x-ms-if-sequence-number-lt', _str_or_none(x_ms_if_sequence_number_lt)),
('x-ms-if-sequence-number-eq', _str_or_none(x_ms_if_sequence_number_eq)),
('If-Modified-Since', _str_or_none(if_modified_since)),
('If-Unmodified-Since', _str_or_none(if_unmodified_since)),
('If-Match', _str_or_none(if_match)),
('If-None-Match', _str_or_none(if_none_match))
]
request.query = [('timeout', _int_or_none(timeout))]
request.body = _get_request_body(page)
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
def get_page_ranges(self, container_name, blob_name, snapshot=None, range=None, x_ms_range=None, x_ms_lease_id=None):
'''
Retrieves the page ranges for a blob.
container_name: the name of container.
blob_name: the name of blob
_ms_range: Optional. Specifies the range of bytes to be written as a page. Both the start
and end of the range must be specified. Must be in format: bytes=startByte-endByte.
Given that pages must be aligned with 512-byte boundaries, the start offset must be
a modulus of 512 and the end offset must be a modulus of 512-1. Examples of valid
byte ranges are 0-511, 512-1023, etc.
x_ms_lease_id: Required if the blob has an active lease. To perform this operation on a blob
with an active lease, specify the valid lease ID for this header.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'GET'
request.host = _get_blob_host(self.account_name, self.use_local_storage)
request.path = '/' + str(container_name) + '/' + str(blob_name) + '?comp=pagelist'
request.headers = [
('Range', _str_or_none(range)),
('x-ms-range', _str_or_none(x_ms_range)),
('x-ms-lease-id', _str_or_none(x_ms_lease_id))
]
request.query = [('snapshot', _str_or_none(snapshot))]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_simple_list(response, PageList, PageRange, "page_ranges")

Просмотреть файл

@ -0,0 +1,35 @@
#-------------------------------------------------------------------------
# Copyright 2011 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from azure.storage.blobservice import BlobService
from azure.storage.tableservice import TableService
from azure.storage.queueservice import QueueService
class CloudStorageAccount:
"""Provides a factory for creating the blob, queue, and table services
with a common account name and account key. Users can either use the
factory or can construct the appropriate service directly."""
def __init__(self, account_name=None, account_key=None):
self.account_name = account_name
self.account_key = account_key
def create_blob_service(self):
return BlobService(self.account_name, self.account_key)
def create_table_service(self):
return TableService(self.account_name, self.account_key)
def create_queue_service(self):
return QueueService(self.account_name, self.account_key)

Просмотреть файл

@ -0,0 +1,346 @@
#-------------------------------------------------------------------------
# Copyright 2011 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import base64
import os
import urllib2
from azure.storage import *
from azure.storage.storageclient import _StorageClient
from azure.storage import (_update_storage_queue_header)
from azure.http import HTTPRequest
from azure import (_validate_not_none, Feed,
_convert_response_to_feeds, _str_or_none, _int_or_none,
_get_request_body, _update_request_uri_query,
_dont_fail_on_exist, _dont_fail_not_exist,
WindowsAzureError, _parse_response, _convert_class_to_xml,
_parse_response_for_dict, _parse_response_for_dict_prefix,
_parse_response_for_dict_filter,
_parse_enum_results_list, _update_request_uri_query_local_storage,
_get_table_host, _get_queue_host, _get_blob_host,
_parse_simple_list, SERVICE_BUS_HOST_BASE, xml_escape)
class QueueService(_StorageClient):
'''
This is the main class managing queue resources.
account_name: your storage account name, required for all operations.
account_key: your storage account key, required for all operations.
'''
def get_queue_service_properties(self, timeout=None):
'''
Gets the properties of a storage account's Queue Service, including Windows Azure
Storage Analytics.
timeout: Optional. The timeout parameter is expressed in seconds. For example, the
following value sets a timeout of 30 seconds for the request: timeout=30
'''
request = HTTPRequest()
request.method = 'GET'
request.host = _get_queue_host(self.account_name, self.use_local_storage)
request.path = '/?restype=service&comp=properties'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response(response, StorageServiceProperties)
def list_queues(self, prefix=None, marker=None, maxresults=None, include=None):
'''
Lists all of the queues in a given storage account.
'''
request = HTTPRequest()
request.method = 'GET'
request.host = _get_queue_host(self.account_name, self.use_local_storage)
request.path = '/?comp=list'
request.query = [
('prefix', _str_or_none(prefix)),
('marker', _str_or_none(marker)),
('maxresults', _int_or_none(maxresults)),
('include', _str_or_none(include))
]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_enum_results_list(response, QueueEnumResults, "Queues", Queue)
def create_queue(self, queue_name, x_ms_meta_name_values=None, fail_on_exist=False):
'''
Creates a queue under the given account.
queue_name: name of the queue.
x_ms_meta_name_values: Optional. A dict containing name-value pairs to associate
with the queue as metadata.
fail_on_exist: specify whether throw exception when queue exists.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = _get_queue_host(self.account_name, self.use_local_storage)
request.path = '/' + str(queue_name) + ''
request.headers = [('x-ms-meta-name-values', x_ms_meta_name_values)]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)
if not fail_on_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as e:
_dont_fail_on_exist(e)
return False
else:
self._perform_request(request)
return True
def delete_queue(self, queue_name, fail_not_exist=False):
'''
Permanently deletes the specified queue.
queue_name: name of the queue.
fail_not_exist: specify whether throw exception when queue doesn't exist.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = _get_queue_host(self.account_name, self.use_local_storage)
request.path = '/' + str(queue_name) + ''
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)
if not fail_not_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as e:
_dont_fail_not_exist(e)
return False
else:
self._perform_request(request)
return True
def get_queue_metadata(self, queue_name):
'''
Retrieves user-defined metadata and queue properties on the specified queue.
Metadata is associated with the queue as name-values pairs.
queue_name: name of the queue.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'GET'
request.host = _get_queue_host(self.account_name, self.use_local_storage)
request.path = '/' + str(queue_name) + '?comp=metadata'
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response_for_dict_prefix(response, prefix='x-ms-meta')
def set_queue_metadata(self, queue_name, x_ms_meta_name_values=None):
'''
Sets user-defined metadata on the specified queue. Metadata is associated
with the queue as name-value pairs.
queue_name: name of the queue.
x_ms_meta_name_values: Optional. A dict containing name-value pairs to associate
with the queue as metadata.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = _get_queue_host(self.account_name, self.use_local_storage)
request.path = '/' + str(queue_name) + '?comp=metadata'
request.headers = [('x-ms-meta-name-values', x_ms_meta_name_values)]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
def put_message(self, queue_name, message_text, visibilitytimeout=None, messagettl=None):
'''
Adds a new message to the back of the message queue. A visibility timeout can
also be specified to make the message invisible until the visibility timeout
expires. A message must be in a format that can be included in an XML request
with UTF-8 encoding. The encoded message can be up to 64KB in size for versions
2011-08-18 and newer, or 8KB in size for previous versions.
queue_name: name of the queue.
visibilitytimeout: Optional. If specified, the request must be made using an
x-ms-version of 2011-08-18 or newer.
messagettl: Optional. Specifies the time-to-live interval for the message,
in seconds. The maximum time-to-live allowed is 7 days. If this parameter
is omitted, the default time-to-live is 7 days.
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('message_text', message_text)
request = HTTPRequest()
request.method = 'POST'
request.host = _get_queue_host(self.account_name, self.use_local_storage)
request.path = '/' + str(queue_name) + '/messages'
request.query = [
('visibilitytimeout', _str_or_none(visibilitytimeout)),
('messagettl', _str_or_none(messagettl))
]
request.body = _get_request_body('<?xml version="1.0" encoding="utf-8"?> \
<QueueMessage> \
<MessageText>' + xml_escape(str(message_text)) + '</MessageText> \
</QueueMessage>')
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
def get_messages(self, queue_name, numofmessages=None, visibilitytimeout=None):
'''
Retrieves one or more messages from the front of the queue.
queue_name: name of the queue.
numofmessages: Optional. A nonzero integer value that specifies the number of
messages to retrieve from the queue, up to a maximum of 32. If fewer are
visible, the visible messages are returned. By default, a single message
is retrieved from the queue with this operation.
visibilitytimeout: Required. Specifies the new visibility timeout value, in
seconds, relative to server time. The new value must be larger than or
equal to 1 second, and cannot be larger than 7 days, or larger than 2
hours on REST protocol versions prior to version 2011-08-18. The visibility
timeout of a message can be set to a value later than the expiry time.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'GET'
request.host = _get_queue_host(self.account_name, self.use_local_storage)
request.path = '/' + str(queue_name) + '/messages'
request.query = [
('numofmessages', _str_or_none(numofmessages)),
('visibilitytimeout', _str_or_none(visibilitytimeout))
]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response(response, QueueMessagesList)
def peek_messages(self, queue_name, numofmessages=None):
'''
Retrieves one or more messages from the front of the queue, but does not alter
the visibility of the message.
queue_name: name of the queue.
numofmessages: Optional. A nonzero integer value that specifies the number of
messages to peek from the queue, up to a maximum of 32. By default,
a single message is peeked from the queue with this operation.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'GET'
request.host = _get_queue_host(self.account_name, self.use_local_storage)
request.path = '/' + str(queue_name) + '/messages?peekonly=true'
request.query = [('numofmessages', _str_or_none(numofmessages))]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response(response, QueueMessagesList)
def delete_message(self, queue_name, message_id, popreceipt):
'''
Deletes the specified message.
queue_name: name of the queue.
popreceipt: Required. A valid pop receipt value returned from an earlier call
to the Get Messages or Update Message operation.
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('message_id', message_id)
_validate_not_none('popreceipt', popreceipt)
request = HTTPRequest()
request.method = 'DELETE'
request.host = _get_queue_host(self.account_name, self.use_local_storage)
request.path = '/' + str(queue_name) + '/messages/' + str(message_id) + ''
request.query = [('popreceipt', _str_or_none(popreceipt))]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
def clear_messages(self, queue_name):
'''
Deletes all messages from the specified queue.
queue_name: name of the queue.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = _get_queue_host(self.account_name, self.use_local_storage)
request.path = '/' + str(queue_name) + '/messages'
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
def update_message(self, queue_name, message_id, message_text, popreceipt, visibilitytimeout):
'''
Updates the visibility timeout of a message. You can also use this
operation to update the contents of a message.
queue_name: name of the queue.
popreceipt: Required. A valid pop receipt value returned from an earlier call
to the Get Messages or Update Message operation.
visibilitytimeout: Required. Specifies the new visibility timeout value, in
seconds, relative to server time. The new value must be larger than or
equal to 0, and cannot be larger than 7 days. The visibility timeout
of a message cannot be set to a value later than the expiry time. A
message can be updated until it has been deleted or has expired.
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('message_id', message_id)
_validate_not_none('message_text', message_text)
_validate_not_none('popreceipt', popreceipt)
_validate_not_none('visibilitytimeout', visibilitytimeout)
request = HTTPRequest()
request.method = 'PUT'
request.host = _get_queue_host(self.account_name, self.use_local_storage)
request.path = '/' + str(queue_name) + '/messages/' + str(message_id) + ''
request.query = [
('popreceipt', _str_or_none(popreceipt)),
('visibilitytimeout', _str_or_none(visibilitytimeout))
]
request.body = _get_request_body('<?xml version="1.0" encoding="utf-8"?> \
<QueueMessage> \
<MessageText>' + xml_escape(str(message_text)) + '</MessageText> \
</QueueMessage>')
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response_for_dict_filter(response, filter=['x-ms-popreceipt', 'x-ms-time-next-visible'])
def set_queue_service_properties(self, storage_service_properties, timeout=None):
'''
Sets the properties of a storage account's Queue service, including Windows Azure
Storage Analytics.
storage_service_properties: a StorageServiceProperties object.
timeout: Optional. The timeout parameter is expressed in seconds.
'''
_validate_not_none('storage_service_properties', storage_service_properties)
request = HTTPRequest()
request.method = 'PUT'
request.host = _get_queue_host(self.account_name, self.use_local_storage)
request.path = '/?restype=service&comp=properties'
request.query = [('timeout', _int_or_none(timeout))]
request.body = _get_request_body(_convert_class_to_xml(storage_service_properties))
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)
response = self._perform_request(request)

Просмотреть файл

@ -0,0 +1,190 @@
#-------------------------------------------------------------------------
# Copyright 2011 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import base64
import hmac
import hashlib
#-------------------------------------------------------------------------
# Constants for the share access signature
SIGNED_START = 'st'
SIGNED_EXPIRY = 'se'
SIGNED_RESOURCE = 'sr'
SIGNED_PERMISSION = 'sp'
SIGNED_IDENTIFIER = 'si'
SIGNED_SIGNATURE = 'sig'
RESOURCE_BLOB = 'blob'
RESOURCE_CONTAINER = 'container'
SIGNED_RESOURCE_TYPE = 'resource'
SHARED_ACCESS_PERMISSION = 'permission'
#--------------------------------------------------------------------------
class WebResource:
'''
Class that stands for the resource to get the share access signature
path: the resource path.
properties: dict of name and values. Contains 2 item: resource type and
permission
request_url: the url of the webresource include all the queries.
'''
def __init__(self, path=None, request_url=None, properties={}):
self.path = path
self.properties = properties
self.request_url = request_url
class Permission:
'''
Permission class. Contains the path and query_string for the path.
path: the resource path
query_string: dict of name, values. Contains SIGNED_START, SIGNED_EXPIRY
SIGNED_RESOURCE, SIGNED_PERMISSION, SIGNED_IDENTIFIER,
SIGNED_SIGNATURE name values.
'''
def __init__(self, path=None, query_string=None):
self.path = path
self.query_string = query_string
class SharedAccessPolicy:
''' SharedAccessPolicy class. '''
def __init__(self, access_policy, signed_identifier=None):
self.id = signed_identifier
self.access_policy = access_policy
class SharedAccessSignature:
'''
The main class used to do the signing and generating the signature.
account_name: the storage account name used to generate shared access signature
account_key: the access key to genenerate share access signature
permission_set: the permission cache used to signed the request url.
'''
def __init__(self, account_name, account_key, permission_set=None):
self.account_name = account_name
self.account_key = account_key
self.permission_set = permission_set
def generate_signed_query_string(self, path, resource_type, shared_access_policy):
'''
Generates the query string for path, resource type and shared access policy.
path: the resource
resource_type: could be blob or container
shared_access_policy: shared access policy
'''
query_string = {}
if shared_access_policy.access_policy.start:
query_string[SIGNED_START] = shared_access_policy.access_policy.start
query_string[SIGNED_EXPIRY] = shared_access_policy.access_policy.expiry
query_string[SIGNED_RESOURCE] = resource_type
query_string[SIGNED_PERMISSION] = shared_access_policy.access_policy.permission
if shared_access_policy.id:
query_string[SIGNED_IDENTIFIER] = shared_access_policy.id
query_string[SIGNED_SIGNATURE] = self._generate_signature(path, resource_type, shared_access_policy)
return query_string
def sign_request(self, web_resource):
''' sign request to generate request_url with sharedaccesssignature info for web_resource.'''
if self.permission_set:
for shared_access_signature in self.permission_set:
if self._permission_matches_request(shared_access_signature, web_resource,
web_resource.properties[SIGNED_RESOURCE_TYPE],
web_resource.properties[SHARED_ACCESS_PERMISSION]):
if web_resource.request_url.find('?') == -1:
web_resource.request_url += '?'
else:
web_resource.request_url += '&'
web_resource.request_url += self._convert_query_string(shared_access_signature.query_string)
break
return web_resource
def _convert_query_string(self, query_string):
''' Converts query string to str. The order of name, values is very import and can't be wrong.'''
convert_str = ''
if query_string.has_key(SIGNED_START):
convert_str += SIGNED_START + '=' + query_string[SIGNED_START] + '&'
convert_str += SIGNED_EXPIRY + '=' + query_string[SIGNED_EXPIRY] + '&'
convert_str += SIGNED_PERMISSION + '=' + query_string[SIGNED_PERMISSION] + '&'
convert_str += SIGNED_RESOURCE_TYPE + '=' + query_string[SIGNED_RESOURCE] + '&'
if query_string.has_key(SIGNED_IDENTIFIER):
convert_str += SIGNED_IDENTIFIER + '=' + query_string[SIGNED_IDENTIFIER] + '&'
convert_str += SIGNED_SIGNATURE + '=' + query_string[SIGNED_SIGNATURE] + '&'
return convert_str
def _generate_signature(self, path, resource_type, shared_access_policy):
''' Generates signature for a given path, resource_type and shared access policy. '''
def get_value_to_append(value, no_new_line=False):
return_value = ''
if value:
return_value = value
if not no_new_line:
return_value += '\n'
return return_value
if path[0] != '/':
path = '/' + path
canonicalized_resource = '/' + self.account_name + path;
#form the string to sign from shared_access_policy and canonicalized resource.
#The order of values is import.
string_to_sign = (get_value_to_append(shared_access_policy.access_policy.permission) +
get_value_to_append(shared_access_policy.access_policy.start) +
get_value_to_append(shared_access_policy.access_policy.expiry) +
get_value_to_append(canonicalized_resource) +
get_value_to_append(shared_access_policy.id, True))
return self._sign(string_to_sign)
def _permission_matches_request(self, shared_access_signature, web_resource, resource_type, required_permission):
''' Check whether requested permission matches given shared_access_signature, web_resource and resource type. '''
required_resource_type = resource_type
if required_resource_type == RESOURCE_BLOB:
required_resource_type += RESOURCE_CONTAINER
for name, value in shared_access_signature.query_string.iteritems():
if name == SIGNED_RESOURCE and required_resource_type.find(value) == -1:
return False
elif name == SIGNED_PERMISSION and required_permission.find(value) == -1:
return False
return web_resource.path.find(shared_access_signature.path) != -1
def _sign(self, string_to_sign):
''' use HMAC-SHA256 to sign the string and convert it as base64 encoded string. '''
decode_account_key = base64.b64decode(self.account_key)
signed_hmac_sha256 = hmac.HMAC(decode_account_key, string_to_sign, hashlib.sha256)
return base64.b64encode(signed_hmac_sha256.digest())

Просмотреть файл

@ -0,0 +1,116 @@
#-------------------------------------------------------------------------
# Copyright 2011 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import base64
import urllib2
import hmac
import hashlib
import os
from azure.storage import _storage_error_handler, X_MS_VERSION
from azure.http.httpclient import _HTTPClient
from azure.http import HTTPError
from azure import (_parse_response, WindowsAzureError,
DEV_ACCOUNT_NAME, DEV_ACCOUNT_KEY)
import azure
#--------------------------------------------------------------------------
# constants for azure app setting environment variables
AZURE_STORAGE_ACCOUNT = 'AZURE_STORAGE_ACCOUNT'
AZURE_STORAGE_ACCESS_KEY = 'AZURE_STORAGE_ACCESS_KEY'
EMULATED = 'EMULATED'
#--------------------------------------------------------------------------
class _StorageClient(object):
'''
This is the base class for BlobManager, TableManager and QueueManager.
'''
def __init__(self, account_name=None, account_key=None, protocol='http'):
self.account_name = account_name
self.account_key = account_key
self.requestid = None
self.protocol = protocol
#the app is not run in azure emulator or use default development
#storage account and key if app is run in emulator.
self.use_local_storage = False
#check whether it is run in emulator.
if os.environ.has_key(EMULATED):
if os.environ[EMULATED].lower() == 'false':
self.is_emulated = False
else:
self.is_emulated = True
else:
self.is_emulated = False
#get account_name and account key. If they are not set when constructing,
#get the account and key from environment variables if the app is not run
#in azure emulator or use default development storage account and key if
#app is run in emulator.
if not account_name or not account_key:
if self.is_emulated:
self.account_name = DEV_ACCOUNT_NAME
self.account_key = DEV_ACCOUNT_KEY
self.use_local_storage = True
else:
if os.environ.has_key(AZURE_STORAGE_ACCOUNT):
self.account_name = os.environ[AZURE_STORAGE_ACCOUNT]
if os.environ.has_key(AZURE_STORAGE_ACCESS_KEY):
self.account_key = os.environ[AZURE_STORAGE_ACCESS_KEY]
else:
self.account_name = account_name
self.account_key = account_key
if not self.account_name or not self.account_key:
raise WindowsAzureError(azure._ERROR_STORAGE_MISSING_INFO)
self.x_ms_version = X_MS_VERSION
self._httpclient = _HTTPClient(service_instance=self, account_key=account_key, account_name=account_name, x_ms_version=self.x_ms_version, protocol=protocol)
self._batchclient = None
self._filter = self._perform_request_worker
def with_filter(self, filter):
'''Returns a new service which will process requests with the
specified filter. Filtering operations can include logging, automatic
retrying, etc... The filter is a lambda which receives the HTTPRequest
and another lambda. The filter can perform any pre-processing on the
request, pass it off to the next lambda, and then perform any post-processing
on the response.'''
res = type(self)(self.account_name, self.account_key, self.protocol)
old_filter = self._filter
def new_filter(request):
return filter(request, old_filter)
res._filter = new_filter
return res
def _perform_request_worker(self, request):
return self._httpclient.perform_request(request)
def _perform_request(self, request):
''' Sends the request and return response. Catches HTTPError and hand it to error handler'''
try:
if self._batchclient is not None:
return self._batchclient.insert_request_to_batch(request)
else:
resp = self._filter(request)
except HTTPError as e:
_storage_error_handler(e)
if not resp:
return None
return resp

Просмотреть файл

@ -0,0 +1,378 @@
#-------------------------------------------------------------------------
# Copyright 2011 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import base64
import os
import urllib2
from azure.storage import *
from azure.storage.storageclient import _StorageClient
from azure.storage import (_update_storage_table_header,
convert_table_to_xml, _convert_xml_to_table,
convert_entity_to_xml, _convert_response_to_entity,
_convert_xml_to_entity, _sign_storage_table_request)
from azure.http.batchclient import _BatchClient
from azure.http import HTTPRequest
from azure import (_validate_not_none, Feed,
_convert_response_to_feeds, _str_or_none, _int_or_none,
_get_request_body, _update_request_uri_query,
_dont_fail_on_exist, _dont_fail_not_exist,
WindowsAzureError, _parse_response, _convert_class_to_xml,
_parse_response_for_dict, _parse_response_for_dict_prefix,
_parse_response_for_dict_filter,
_parse_enum_results_list, _update_request_uri_query_local_storage,
_get_table_host, _get_queue_host, _get_blob_host,
_parse_simple_list, SERVICE_BUS_HOST_BASE, xml_escape)
class TableService(_StorageClient):
'''
This is the main class managing Table resources.
account_name: your storage account name, required for all operations.
account_key: your storage account key, required for all operations.
'''
def begin_batch(self):
if self._batchclient is None:
self._batchclient = _BatchClient(service_instance=self, account_key=self.account_key, account_name=self.account_name)
return self._batchclient.begin_batch()
def commit_batch(self):
try:
ret = self._batchclient.commit_batch()
finally:
self._batchclient = None
return ret
def cancel_batch(self):
self._batchclient = None
def get_table_service_properties(self):
'''
Gets the properties of a storage account's Table service, including Windows Azure
Storage Analytics.
'''
request = HTTPRequest()
request.method = 'GET'
request.host = _get_table_host(self.account_name, self.use_local_storage)
request.path = '/?restype=service&comp=properties'
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
return _parse_response(response, StorageServiceProperties)
def set_table_service_properties(self, storage_service_properties):
'''
Sets the properties of a storage account's Table Service, including Windows Azure Storage Analytics.
storage_service_properties: a StorageServiceProperties object.
'''
_validate_not_none('storage_service_properties', storage_service_properties)
request = HTTPRequest()
request.method = 'PUT'
request.host = _get_table_host(self.account_name, self.use_local_storage)
request.path = '/?restype=service&comp=properties'
request.body = _get_request_body(_convert_class_to_xml(storage_service_properties))
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
return _parse_response_for_dict(response)
def query_tables(self, table_name = None, top=None):
'''
Returns a list of tables under the specified account.
table_name: optional, the specific table to query
top: the maximum number of tables to return
'''
request = HTTPRequest()
request.method = 'GET'
request.host = _get_table_host(self.account_name, self.use_local_storage)
if table_name is not None:
uri_part_table_name = "('" + table_name + "')"
else:
uri_part_table_name = ""
request.path = '/Tables' + uri_part_table_name + ''
request.query = [('$top', _int_or_none(top))]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
return _convert_response_to_feeds(response, _convert_xml_to_table)
def create_table(self, table, fail_on_exist=False):
'''
Creates a new table in the storage account.
table: name of the table to create.
fail_on_exist: specify whether throw exception when table exists.
'''
_validate_not_none('table', table)
request = HTTPRequest()
request.method = 'POST'
request.host = _get_table_host(self.account_name, self.use_local_storage)
request.path = '/Tables'
request.body = _get_request_body(convert_table_to_xml(table))
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
if not fail_on_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as e:
_dont_fail_on_exist(e)
return False
else:
self._perform_request(request)
return True
def delete_table(self, table_name, fail_not_exist=False):
'''
table_name: name of the table to delete.
fail_not_exist: specify whether throw exception when table doesn't exist.
'''
_validate_not_none('table_name', table_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = _get_table_host(self.account_name, self.use_local_storage)
request.path = '/Tables(\'' + str(table_name) + '\')'
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
if not fail_not_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as e:
_dont_fail_not_exist(e)
return False
else:
self._perform_request(request)
return True
def get_entity(self, table_name, partition_key, row_key, select=''):
'''
Get an entity in a table; includes the $select options.
partition_key: PartitionKey of the entity.
row_key: RowKey of the entity.
select: the property names to select.
'''
_validate_not_none('table_name', table_name)
_validate_not_none('partition_key', partition_key)
_validate_not_none('row_key', row_key)
_validate_not_none('select', select)
request = HTTPRequest()
request.method = 'GET'
request.host = _get_table_host(self.account_name, self.use_local_storage)
request.path = '/' + str(table_name) + '(PartitionKey=\'' + str(partition_key) + '\',RowKey=\'' + str(row_key) + '\')?$select=' + str(select) + ''
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
return _convert_response_to_entity(response)
def query_entities(self, table_name, filter=None, select=None, top=None):
'''
Get entities in a table; includes the $filter and $select options.
table_name: the table to query
filter: a filter as described at http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx
select: the property names to select from the entities
top: the maximum number of entities to return
'''
_validate_not_none('table_name', table_name)
request = HTTPRequest()
request.method = 'GET'
request.host = _get_table_host(self.account_name, self.use_local_storage)
request.path = '/' + str(table_name) + '()'
request.query = [
('$filter', _str_or_none(filter)),
('$select', _str_or_none(select)),
('$top', _int_or_none(top))
]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
return _convert_response_to_feeds(response, _convert_xml_to_entity)
def insert_entity(self, table_name, entity, content_type='application/atom+xml'):
'''
Inserts a new entity into a table.
entity: Required. The entity object to insert. Could be a dict format or entity object.
Content-Type: this is required and has to be set to application/atom+xml
'''
_validate_not_none('table_name', table_name)
_validate_not_none('entity', entity)
_validate_not_none('content_type', content_type)
request = HTTPRequest()
request.method = 'POST'
request.host = _get_table_host(self.account_name, self.use_local_storage)
request.path = '/' + str(table_name) + ''
request.headers = [('Content-Type', _str_or_none(content_type))]
request.body = _get_request_body(convert_entity_to_xml(entity))
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
def update_entity(self, table_name, partition_key, row_key, entity, content_type='application/atom+xml', if_match='*'):
'''
Updates an existing entity in a table. The Update Entity operation replaces the entire
entity and can be used to remove properties.
entity: Required. The entity object to insert. Could be a dict format or entity object.
partition_key: PartitionKey of the entity.
row_key: RowKey of the entity.
Content-Type: this is required and has to be set to application/atom+xml
'''
_validate_not_none('table_name', table_name)
_validate_not_none('partition_key', partition_key)
_validate_not_none('row_key', row_key)
_validate_not_none('entity', entity)
_validate_not_none('content_type', content_type)
request = HTTPRequest()
request.method = 'PUT'
request.host = _get_table_host(self.account_name, self.use_local_storage)
request.path = '/' + str(table_name) + '(PartitionKey=\'' + str(partition_key) + '\',RowKey=\'' + str(row_key) + '\')'
request.headers = [
('Content-Type', _str_or_none(content_type)),
('If-Match', _str_or_none(if_match))
]
request.body = _get_request_body(convert_entity_to_xml(entity))
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
def merge_entity(self, table_name, partition_key, row_key, entity, content_type='application/atom+xml', if_match='*'):
'''
Updates an existing entity by updating the entity's properties. This operation does
not replace the existing entity as the Update Entity operation does.
entity: Required. The entity object to insert. Can be a dict format or entity object.
partition_key: PartitionKey of the entity.
row_key: RowKey of the entity.
Content-Type: this is required and has to be set to application/atom+xml
'''
_validate_not_none('table_name', table_name)
_validate_not_none('partition_key', partition_key)
_validate_not_none('row_key', row_key)
_validate_not_none('entity', entity)
_validate_not_none('content_type', content_type)
request = HTTPRequest()
request.method = 'MERGE'
request.host = _get_table_host(self.account_name, self.use_local_storage)
request.path = '/' + str(table_name) + '(PartitionKey=\'' + str(partition_key) + '\',RowKey=\'' + str(row_key) + '\')'
request.headers = [
('Content-Type', _str_or_none(content_type)),
('If-Match', _str_or_none(if_match))
]
request.body = _get_request_body(convert_entity_to_xml(entity))
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
def delete_entity(self, table_name, partition_key, row_key, content_type='application/atom+xml', if_match='*'):
'''
Deletes an existing entity in a table.
partition_key: PartitionKey of the entity.
row_key: RowKey of the entity.
if_match: Required. Specifies the condition for which the delete should be performed.
To force an unconditional delete, set If-Match to the wildcard character (*).
Content-Type: this is required and has to be set to application/atom+xml
'''
_validate_not_none('table_name', table_name)
_validate_not_none('partition_key', partition_key)
_validate_not_none('row_key', row_key)
_validate_not_none('content_type', content_type)
_validate_not_none('if_match', if_match)
request = HTTPRequest()
request.method = 'DELETE'
request.host = _get_table_host(self.account_name, self.use_local_storage)
request.path = '/' + str(table_name) + '(PartitionKey=\'' + str(partition_key) + '\',RowKey=\'' + str(row_key) + '\')'
request.headers = [
('Content-Type', _str_or_none(content_type)),
('If-Match', _str_or_none(if_match))
]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
def insert_or_replace_entity(self, table_name, partition_key, row_key, entity, content_type='application/atom+xml'):
'''
Replaces an existing entity or inserts a new entity if it does not exist in the table.
Because this operation can insert or update an entity, it is also known as an "upsert"
operation.
entity: Required. The entity object to insert. Could be a dict format or entity object.
partition_key: PartitionKey of the entity.
row_key: RowKey of the entity.
Content-Type: this is required and has to be set to application/atom+xml
'''
_validate_not_none('table_name', table_name)
_validate_not_none('partition_key', partition_key)
_validate_not_none('row_key', row_key)
_validate_not_none('entity', entity)
_validate_not_none('content_type', content_type)
request = HTTPRequest()
request.method = 'PUT'
request.host = _get_table_host(self.account_name, self.use_local_storage)
request.path = '/' + str(table_name) + '(PartitionKey=\'' + str(partition_key) + '\',RowKey=\'' + str(row_key) + '\')'
request.headers = [('Content-Type', _str_or_none(content_type))]
request.body = _get_request_body(convert_entity_to_xml(entity))
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
def insert_or_merge_entity(self, table_name, partition_key, row_key, entity, content_type='application/atom+xml', if_match='*'):
'''
Merges an existing entity or inserts a new entity if it does not exist in the table.
Because this operation can insert or update an entity, it is also known as an "upsert"
operation.
entity: Required. The entity object to insert. Could be a dict format or entity object.
partition_key: PartitionKey of the entity.
row_key: RowKey of the entity.
Content-Type: this is required and has to be set to application/atom+xml
'''
_validate_not_none('table_name', table_name)
_validate_not_none('partition_key', partition_key)
_validate_not_none('row_key', row_key)
_validate_not_none('entity', entity)
_validate_not_none('content_type', content_type)
request = HTTPRequest()
request.method = 'MERGE'
request.host = _get_table_host(self.account_name, self.use_local_storage)
request.path = '/' + str(table_name) + '(PartitionKey=\'' + str(partition_key) + '\',RowKey=\'' + str(row_key) + '\')'
request.headers = [
('Content-Type', _str_or_none(content_type)),
('If-Match', _str_or_none(if_match))
]
request.body = _get_request_body(convert_entity_to_xml(entity))
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
def _perform_request_worker(self, request):
auth = _sign_storage_table_request(request,
self.account_name,
self.account_key)
request.headers.append(('Authorization', auth))
return self._httpclient.perform_request(request)

16
src/build.bat Normal file
Просмотреть файл

@ -0,0 +1,16 @@
@echo OFF
REM----------------------------------------------------------------------------
REM Copyright (c) Microsoft Corporation.
REM
REM This source code is subject to terms and conditions of the Apache License,
REM Version 2.0. A copy of the license can be found in the License.html file at
REM the root of this distribution. If you cannot locate the Apache License,
REM Version 2.0, please send an email to vspython@microsoft.com. By using this
REM source code in any fashion, you are agreeing to be bound by the terms of the
REM Apache License, Version 2.0.
REM
REM You must not remove this notice, or any other, from this software.
REM----------------------------------------------------------------------------
cls
%SystemDrive%\Python27\python.exe setup.py sdist

Просмотреть файл

@ -0,0 +1,556 @@
[class]
BlobService
[x-ms-version]
2011-08-18
[class-comment]
This is the main class managing Blob resources.
account_name: your storage account name, required for all operations.
account_key: your storage account key, required for all operations.
[init]
account_name
account_key
[method]
list_containers
[comment]
The List Containers operation returns a list of the containers under the specified account.
prefix: Optional. Filters the results to return only containers whose names begin with
the specified prefix.
marker: Optional. A string value that identifies the portion of the list to be returned
with the next list operation.
maxresults: Optional. Specifies the maximum number of containers to return.
include: Optional. Include this parameter to specify that the container's metadata be
returned as part of the response body.
[return]
ContainerEnumResults
[url]
GET http://<account-name>.blob.core.windows.net/?comp=list
[query]
prefix=
marker=
maxresults=
include=
[method]
create_container
[params]
fail_on_exist=False
[comment]
Creates a new container under the specified account. If the container with the same name
already exists, the operation fails.
x_ms_meta_name_values: Optional. A dict with name_value pairs to associate with the
container as metadata. Example:{'Category':'test'}
x_ms_blob_public_access: Optional. Possible values include: container, blob.
fail_on_exist: specify whether to throw an exception when the container exists.
[return]
None
[url]
PUT http://<account-name>.blob.core.windows.net/<container-name>?restype=container
[requestheader]
x-ms-meta-name-values=
x-ms-blob-public-access=
[method]
get_container_properties
[comment]
Returns all user-defined metadata and system properties for the specified container.
[return]
dict
[url]
GET http://<account-name>.blob.core.windows.net/<container-name>?restype=container
[method]
get_container_metadata
[comment]
Returns all user-defined metadata for the specified container. The metadata will be
in returned dictionary['x-ms-meta-(name)'].
[return]
dict
[url]
GET http://<account-name>.blob.core.windows.net/<container-name>?restype=container&comp=metadata
[method]
set_container_metadata
[comment]
Sets one or more user-defined name-value pairs for the specified container.
x_ms_meta_name_values: A dict containing name, value for metadata. Example: {'category':'test'}
[return]
[url]
PUT http://<account-name>.blob.core.windows.net/<container-name>?restype=container&comp=metadata
[requestheader]
x-ms-meta-name-values=
[method]
get_container_acl
[comment]
Gets the permissions for the specified container.
[return]
SignedIdentifiers
[url]
GET http://<account-name>.blob.core.windows.net/<container-name>?restype=container&comp=acl
[method]
set_container_acl
[comment]
Sets the permissions for the specified container.
x_ms_blob_public_access: Optional. Possible values include 'container' and 'blob'.
signed_identifiers: SignedIdentifers instance
[return]
[url]
PUT http://<account-name>.blob.core.windows.net/<container-name>?restype=container&comp=acl
[requestheader]
x-ms-blob-public-access=
[requestbody]
class:signed_identifiers;
[method]
delete_container
[params]
fail_not_exist=False
[comment]
Marks the specified container for deletion.
fail_not_exist: specify whether to throw an exception when the container doesn't exist.
[return]
None
[url]
DELETE http://<account-name>.blob.core.windows.net/<container-name>?restype=container
[method]
list_blobs
[comment]
Returns the list of blobs under the specified container.
[return]
BlobEnumResults
[url]
GET http://<account-name>.blob.core.windows.net/<container-name>?restype=container&comp=list
[query]
prefix=
marker=
maxresults=
include=
[method]
set_blob_service_properties
[comment]
Sets the properties of a storage account's Blob service, including Windows Azure
Storage Analytics. You can also use this operation to set the default request
version for all incoming requests that do not have a version specified.
storage_service_properties: a StorageServiceProperties object.
timeout: Optional. The timeout parameter is expressed in seconds. For example, the
following value sets a timeout of 30 seconds for the request: timeout=30.
[return]
[url]
PUT http://<account-name>.blob.core.windows.net/?restype=service&comp=properties
[query]
timeout=
[requestbody]
class:storage_service_properties;required
[method]
get_blob_service_properties
[comment]
Gets the properties of a storage account's Blob service, including Windows Azure
Storage Analytics.
timeout: Optional. The timeout parameter is expressed in seconds. For example, the
following value sets a timeout of 30 seconds for the request: timeout=30.
[return]
StorageServiceProperties
[url]
GET http://<account-name>.blob.core.windows.net/?restype=service&comp=properties
[query]
timeout=
[method]
get_blob_properties
[comment]
Returns all user-defined metadata, standard HTTP properties, and system properties for the blob.
x_ms_lease_id: Required if the blob has an active lease.
[return]
dict
[url]
HEAD http://myaccount.blob.core.windows.net/<container-name>/<blob-name>
[requestheader]
x-ms-lease-id=
[method]
set_blob_properties
[comment]
Sets system properties on the blob.
x_ms_blob_cache_control: Optional. Modifies the cache control string for the blob.
x_ms_blob_content_type: Optional. Sets the blob's content type.
x_ms_blob_content_md5: Optional. Sets the blob's MD5 hash.
x_ms_blob_content_encoding: Optional. Sets the blob's content encoding.
x_ms_blob_content_language: Optional. Sets the blob's content language.
x_ms_lease_id: Required if the blob has an active lease.
[return]
[url]
PUT http://myaccount.blob.core.windows.net/<container-name>/<blob-name>?comp=properties
[requestheader]
x-ms-blob-cache-control=
x-ms-blob-content-type=
x-ms-blob-content-md5=
x-ms-blob-content-encoding=
x-ms-blob-content-language=
x-ms-lease-id=
[method]
put_blob
[comment]
Creates a new block blob or page blob, or updates the content of an existing block blob.
container_name: the name of container to put the blob
blob_name: the name of blob
x_ms_blob_type: Required. Could be BlockBlob or PageBlob
x_ms_meta_name_values: A dict containing name, value for metadata.
x_ms_lease_id: Required if the blob has an active lease.
blob: the content of blob.
[return]
[url]
PUT http://<account-name>.blob.core.windows.net/<container-name>/<blob-name>
[requestheader]
x-ms-blob-type=;required
Content-Encoding=
Content-Language=
Content-MD5=
Cache-Control=
x-ms-blob-content-type=
x-ms-blob-content-encoding=
x-ms-blob-content-language=
x-ms-blob-content-md5=
x-ms-blob-cache-control=
x-ms-meta-name-values=;
x-ms-lease-id=
x-ms-blob-content-length=
x-ms-blob-sequence-number=
[requestbody]
binary:blob;required
[method]
get_blob
[comment]
Reads or downloads a blob from the system, including its metadata and properties.
container_name: the name of container to get the blob
blob_name: the name of blob
x_ms_range: Optional. Return only the bytes of the blob in the specified range.
[return]
str
[url]
GET http://<account-name>.blob.core.windows.net/<container-name>/<blob-name>
[query]
snapshot=
[requestheader]
x-ms-range=
x-ms-lease-id=
x-ms-range-get-content-md5=
[method]
get_blob_metadata
[comment]
Returns all user-defined metadata for the specified blob or snapshot.
container_name: the name of container containing the blob.
blob_name: the name of blob to get metadata.
[return]
dict
prefix='x-ms-meta'
[url]
GET http://<account-name>.blob.core.windows.net/<container-name>/<blob-name>?comp=metadata
[query]
snapshot=
[requestheader]
x-ms-lease-id=
[method]
set_blob_metadata
[comment]
Sets user-defined metadata for the specified blob as one or more name-value pairs.
container_name: the name of container containing the blob
blob_name: the name of blob
x_ms_meta_name_values: Dict containing name and value pairs.
[return]
[url]
PUT http://<account-name>.blob.core.windows.net/<container-name>/<blob-name>?comp=metadata
[requestheader]
x-ms-meta-name-values=
x-ms-lease-id=
[method]
lease_blob
[comment]
Establishes and manages a one-minute lock on a blob for write operations.
container_name: the name of container.
blob_name: the name of blob
x_ms_lease_id: Any GUID format string
x_ms_lease_action: Required. Possible values: acquire|renew|release|break
[return]
dict
filter=['x-ms-lease-id']
[url]
PUT http://<account-name>.blob.core.windows.net/<container-name>/<blob-name>?comp=lease
[requestheader]
x-ms-lease-id=
x-ms-lease-action=;required:acquire|renew|release|break
[method]
snapshot_blob
[comment]
Creates a read-only snapshot of a blob.
container_name: the name of container.
blob_name: the name of blob
x_ms_meta_name_values: Optional. Dict containing name and value pairs.
if_modified_since: Optional. Datetime string.
if_unmodified_since: DateTime string.
if_match: Optional. snapshot the blob only if its ETag value matches the value specified.
if_none_match: Optional. An ETag value
x_ms_lease_id: Optional. If this header is specified, the operation will be performed
only if both of the following conditions are met.
1. The blob's lease is currently active
2. The lease ID specified in the request matches that of the blob.
[return]
[url]
PUT http://<account-name>.blob.core.windows.net/<container-name>/<blob-name>?comp=snapshot
[query]
[requestheader]
x-ms-meta-name-values=
If-Modified-Since=
If-Unmodified-Since=
If-Match=
If-None-Match=
x-ms-lease-id=
[requestbody]
[method]
copy_blob
[comment]
Copies a blob to a destination within the storage account.
container_name: the name of container.
blob_name: the name of blob
x_ms_copy_source: the blob to be copied. Should be absolute path format.
x_ms_meta_name_values: Optional. Dict containing name and value pairs.
x_ms_source_if_modified_since: Optional. An ETag value. Specify this conditional
header to copy the source blob only if its ETag matches the value specified.
x_ms_source_if_unmodified_since: Optional. An ETag value. Specify this conditional
header to copy the blob only if its ETag does not match the value specified.
x_ms_source_if_match: Optional. A DateTime value. Specify this conditional header to copy
the blob only if the source blob has been modified since the specified date/time.
x_ms_source_if_none_match: Optional. An ETag value. Specify this conditional header to
copy the source blob only if its ETag matches the value specified.
if_modified_since: Optional. Datetime string.
if_unmodified_since: DateTime string.
if_match: Optional. snapshot the blob only if its ETag value matches the value specified.
if_none_match: Optional. An ETag value
x_ms_lease_id: Optional. If this header is specified, the operation will be performed
only if both of the following conditions are met.
1. The blob's lease is currently active
2. The lease ID specified in the request matches that of the blob.
[return]
[url]
PUT http://<account-name>.blob.core.windows.net/<container-name>/<blob-name>
[query]
[requestheader]
x-ms-copy-source=;required
x-ms-meta-name-values=;# a dict containing name, value for metadata.
x-ms-source-if-modified-since=
x-ms-source-if-unmodified-since=
x-ms-source-if-match=
x-ms-source-if-none-match=
If-Modified-Since=
If-Unmodified-Since=
If-Match=
If-None-Match=
x-ms-lease-id=
x-ms-source-lease-id=
[requestbody]
[method]
delete_blob
[comment]
Marks the specified blob or snapshot for deletion. The blob is later deleted
during garbage collection.
To mark a specific snapshot for deletion provide the date/time of the snapshot via
the snapshot parameter.
container_name: the name of container.
blob_name: the name of blob
x_ms_lease_id: Optional. If this header is specified, the operation will be performed
only if both of the following conditions are met.
1. The blob's lease is currently active
2. The lease ID specified in the request matches that of the blob.
[return]
[url]
DELETE http://<account-name>.blob.core.windows.net/<container-name>/<blob-name>
[query]
snapshot=
[requestheader]
x-ms-lease-id=
[requestbody]
[method]
put_block
[comment]
Creates a new block to be committed as part of a blob.
container_name: the name of the container.
blob_name: the name of the blob
content_md5: Optional. An MD5 hash of the block content. This hash is used to verify
the integrity of the blob during transport. When this header is specified,
the storage service checks the hash that has arrived with the one that was sent.
x_ms_lease_id: Required if the blob has an active lease. To perform this operation on
a blob with an active lease, specify the valid lease ID for this header.
[return]
[url]
PUT http://<account-name>.blob.core.windows.net/<container-name>/<blob-name>?comp=block
[query]
blockid=;required:base64
[requestheader]
Content-MD5=
x-ms-lease-id=
[requestbody]
binary:block;required
[method]
put_block_list
[comment]
Writes a blob by specifying the list of block IDs that make up the blob. In order to
be written as part of a blob, a block must have been successfully written to the server
in a prior Put Block (REST API) operation.
container_name: the name of container.
blob_name: the name of blob
x_ms_meta_name_values: Optional. Dict containing name and value pairs.
x_ms_blob_cache_control: Optional. Sets the blob's cache control. If specified, this
property is stored with the blob and returned with a read request.
x_ms_blob_content_type: Optional. Sets the blob's content type. If specified, this
property is stored with the blob and returned with a read request.
x_ms_blob_content_encoding: Optional. Sets the blob's content encoding. If specified,
this property is stored with the blob and returned with a read request.
x_ms_blob_content_language: Optional. Set the blob's content language. If specified,
this property is stored with the blob and returned with a read request.
x_ms_blob_content_md5: Optional. An MD5 hash of the blob content. Note that this hash
is not validated, as the hashes for the individual blocks were validated when
each was uploaded.
content_md5: Optional. An MD5 hash of the block content. This hash is used to verify
the integrity of the blob during transport. When this header is specified,
the storage service checks the hash that has arrived with the one that was sent.
x_ms_lease_id: Required if the blob has an active lease. To perform this operation on
a blob with an active lease, specify the valid lease ID for this header.
[return]
[url]
PUT http://<account-name>.blob.core.windows.net/<container-name>/<blob-name>?comp=blocklist
[requestheader]
Content-MD5=
x-ms-blob-cache-control=
x-ms-blob-content-type=
x-ms-blob-content-encoding=
x-ms-blob-content-language=
x-ms-blob-content-md5=
x-ms-meta-name-values=;# a dict containing name, value for metadata.
x-ms-lease-id=
[requestbody]
class:block_list;required
[method]
get_block_list
[comment]
Retrieves the list of blocks that have been uploaded as part of a block blob.
container_name: the name of container.
blob_name: the name of blob
snapshot: Optional. Datetime to determine the time to retrieve the blocks.
blocklisttype: Specifies whether to return the list of committed blocks, the
list of uncommitted blocks, or both lists together. Valid values are
committed, uncommitted, or all.
[return]
BlobBlockList
[url]
GET http://<account-name>.blob.core.windows.net/<container-name>/<blob-name>?comp=blocklist
[query]
snapshot=
blocklisttype=
[requestheader]
x-ms-lease-id=
[method]
put_page
[comment]
Writes a range of pages to a page blob.
container_name: the name of container.
blob_name: the name of blob
timeout: the timeout parameter is expressed in seconds.
x_ms_range: Required. Specifies the range of bytes to be written as a page. Both the start
and end of the range must be specified. Must be in format: bytes=startByte-endByte.
Given that pages must be aligned with 512-byte boundaries, the start offset must be
a modulus of 512 and the end offset must be a modulus of 512-1. Examples of valid
byte ranges are 0-511, 512-1023, etc.
x_ms_page_write: Required. You may specify one of the following options :
1. update(lower case): Writes the bytes specified by the request body into the specified
range. The Range and Content-Length headers must match to perform the update.
2. clear(lower case): Clears the specified range and releases the space used in storage
for that range. To clear a range, set the Content-Length header to zero, and the Range
header to a value that indicates the range to clear, up to maximum blob size.
x_ms_lease_id: Required if the blob has an active lease. To perform this operation on a blob
with an active lease, specify the valid lease ID for this header.
[return]
[url]
PUT http://<account-name>.blob.core.windows.net/<container-name>/<blob-name>?comp=page
[requestheader]
x-ms-range=;required
Content-MD5=
x-ms-page-write=;required:update|clear
x-ms-lease-id=
x-ms-if-sequence-number-lte=
x-ms-if-sequence-number-lt=
x-ms-if-sequence-number-eq=
If-Modified-Since=
If-Unmodified-Since=
If-Match=
If-None-Match=
[query]
timeout=
[requestbody]
binary:page;required
[method]
get_page_ranges
[comment]
Retrieves the page ranges for a blob.
container_name: the name of container.
blob_name: the name of blob
_ms_range: Optional. Specifies the range of bytes to be written as a page. Both the start
and end of the range must be specified. Must be in format: bytes=startByte-endByte.
Given that pages must be aligned with 512-byte boundaries, the start offset must be
a modulus of 512 and the end offset must be a modulus of 512-1. Examples of valid
byte ranges are 0-511, 512-1023, etc.
x_ms_lease_id: Required if the blob has an active lease. To perform this operation on a blob
with an active lease, specify the valid lease ID for this header.
[return]
PageList
[url]
GET http://<account-name>.blob.core.windows.net/<container-name>/<blob-name>?comp=pagelist
[query]
snapshot=
[requestheader]
Range=
x-ms-range=
x-ms-lease-id=
[end]

Просмотреть файл

@ -0,0 +1,705 @@
#-------------------------------------------------------------------------
# Copyright 2011 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
# To Run: C:\Python27\python.exe codegenerator.py
# It expects the souce files to live in ..\azure\...
from xml.dom import minidom
import urllib2
BLOB_SERVICE_HOST_BASE = '.blob.core.windows.net'
QUEUE_SERVICE_HOST_BASE = '.queue.core.windows.net'
TABLE_SERVICE_HOST_BASE = '.table.core.windows.net'
SERVICE_BUS_HOST_BASE = '.servicebus.windows.net'
def to_legalname(name):
"""Converts the name of a header value into a value which is a valid Python
attribute name."""
if name == 'IncludeAPIs':
return 'include_apis'
if name[0] == '$':
return name[1:]
name = name.split('=')[0]
if ':' in name:
name = name.split(':')[1]
name = name.replace('-', '_')
legalname = name[0]
for ch in name[1:]:
if ch.isupper():
legalname += '_'
legalname += ch
legalname = legalname.replace('__', '_').replace('_m_d5', '_md5')
return legalname.lower()
def normalize_xml(xmlstr):
if xmlstr:
xmlstr = '>'.join(xml.strip() for xml in xmlstr.split('>'))
xmlstr = '<'.join(xml.strip() for xml in xmlstr.split('<'))
return xmlstr
def to_multilines(statements):
ret = statements.replace('\n', ' \\\n').strip()
if ret.endswith(' \\'):
ret = ret[:-2]
return ret
def get_output_str(name, value, validate_string):
name = to_legalname(name)
if value:
return ''.join([name, '=\'', value, '\''])
elif 'required' in validate_string:
return name
else:
return name + '=None'
def get_value_validates_comment(value_string):
value = ''
validate_string = ''
comment = ''
if ';' in value_string:
value, value_string = value_string.split(';')[:2]
if '#' in value_string:
validate_string, comments = value_string.split('#')[:2]
else:
validate_string = value_string
return value, validate_string, comment
def output_import(output_file, class_name):
indent = ' '
output_str = 'import base64\n'
output_str += 'import os\n'
output_str += 'import urllib2\n\n'
if 'ServiceBus' in class_name:
output_str += 'from azure.http.httpclient import _HTTPClient\n'
output_str += 'from azure.http import HTTPError\n'
output_str += 'from azure.servicebus import (_update_service_bus_header, _create_message, \n'
output_str += indent*8 + 'convert_topic_to_xml, _convert_response_to_topic, \n'
output_str += indent*8 + 'convert_queue_to_xml, _convert_response_to_queue, \n'
output_str += indent*8 + 'convert_subscription_to_xml, _convert_response_to_subscription, \n'
output_str += indent*8 + 'convert_rule_to_xml, _convert_response_to_rule, \n'
output_str += indent*8 + '_convert_xml_to_queue, _convert_xml_to_topic, \n'
output_str += indent*8 + '_convert_xml_to_subscription, _convert_xml_to_rule,\n'
output_str += indent*8 + '_service_bus_error_handler, AZURE_SERVICEBUS_NAMESPACE, \n'
output_str += indent*8 + 'AZURE_SERVICEBUS_ACCESS_KEY, AZURE_SERVICEBUS_ISSUER)\n'
else:
output_str += 'from azure.storage import *\n'
output_str += 'from azure.storage.storageclient import _StorageClient\n'
if 'Blob' in class_name:
output_str += 'from azure.storage import (_update_storage_blob_header,\n'
output_str += indent*8 + 'convert_block_list_to_xml, convert_response_to_block_list) \n'
elif 'Queue' in class_name:
output_str += 'from azure.storage import (_update_storage_queue_header)\n'
else:
output_str += 'from azure.storage import (_update_storage_table_header, \n'
output_str += indent*8 + 'convert_table_to_xml, _convert_xml_to_table,\n'
output_str += indent*8 + 'convert_entity_to_xml, _convert_response_to_entity, \n'
output_str += indent*8 + '_convert_xml_to_entity, _sign_storage_table_request)\n'
if 'Table' in class_name:
output_str += 'from azure.http.batchclient import _BatchClient\n'
output_str += 'from azure.http import HTTPRequest\n'
output_str += 'from azure import (_validate_not_none, Feed,\n'
output_str += indent*8 + '_convert_response_to_feeds, _str_or_none, _int_or_none,\n'
output_str += indent*8 + '_get_request_body, _update_request_uri_query, \n'
output_str += indent*8 + '_dont_fail_on_exist, _dont_fail_not_exist, \n'
output_str += indent*8 + 'WindowsAzureError, _parse_response, _convert_class_to_xml, \n'
output_str += indent*8 + '_parse_response_for_dict, _parse_response_for_dict_prefix, \n'
output_str += indent*8 + '_parse_response_for_dict_filter, \n'
output_str += indent*8 + '_parse_enum_results_list, _update_request_uri_query_local_storage, \n'
output_str += indent*8 + '_get_table_host, _get_queue_host, _get_blob_host, \n'
output_str += indent*8 + '_parse_simple_list, SERVICE_BUS_HOST_BASE, xml_escape) \n\n'
output_file.write(output_str)
def output_class(output_file, class_name, class_comment, class_init_params, x_ms_version):
indent = ' '
if 'ServiceBus' in class_name:
output_str = ''.join(['class ', class_name, ':\n'])
else:
output_str = ''.join(['class ', class_name, '(_StorageClient):\n'])
if class_comment.strip():
output_str += ''.join([indent, '\'\'\'\n', indent, class_comment.strip(), '\n', indent, '\'\'\'\n\n'])
else:
output_str += '\n'
if 'Table' in class_name:
output_str += ''.join([indent, 'def begin_batch(self):\n'])
output_str += indent*2 + 'if self._batchclient is None:\n'
output_str += indent*3 + 'self._batchclient = _BatchClient(service_instance=self, account_key=self.account_key, account_name=self.account_name)\n'
output_str += ''.join([indent*2, 'return self._batchclient.begin_batch()\n\n'])
output_str += ''.join([indent, 'def commit_batch(self):\n'])
output_str += ''.join([indent*2, 'try:\n'])
output_str += ''.join([indent*3, 'ret = self._batchclient.commit_batch()\n'])
output_str += ''.join([indent*2, 'finally:\n'])
output_str += indent*3 + 'self._batchclient = None\n'
output_str += ''.join([indent*2, 'return ret\n\n'])
output_str += ''.join([indent, 'def cancel_batch(self):\n'])
output_str += indent*2 + 'self._batchclient = None\n\n'
if not 'ServiceBus' in class_name:
output_file.write(output_str)
return
if not 'service_namespace' in class_init_params:
output_str += ''.join([indent, 'def begin_batch(self):\n'])
output_str += ''.join([indent*2, 'self._httpclient.begin_batch()\n\n'])
output_str += ''.join([indent, 'def commit_batch(self):\n'])
output_str += ''.join([indent*2, 'self._httpclient.commit_batch()\n\n'])
output_str += ''.join([indent, 'def cancel_batch(self):\n'])
output_str += ''.join([indent*2, 'self._httpclient.cancel_batch()\n\n'])
output_file.write(output_str)
def output_method_def(method_name, method_params, uri_param, req_param, req_query, req_header):
indent = ' '
output_def = ''.join([indent, 'def ', method_name, '(self, '])
for param in uri_param:
output_def += param.build_sig()
params = req_param + req_query + req_header
ordered_params = []
for name, value, validate_string, comment in params:
if 'required' in validate_string:
ordered_params.append((name, value, validate_string, comment))
for name, value, validate_string, comment in params:
if 'required' not in validate_string:
ordered_params.append((name, value, validate_string, comment))
output_def += ', '.join(get_output_str(name, value, validate_string) for name, value, validate_string, comment in ordered_params)
if output_def.endswith(', '):
output_def = output_def[:-2]
for name, value in method_params:
output_def += ''.join([', ', name, '=', value])
output_def += '):\n'
return output_def
def output_method_comments(method_comment, req_param, req_query, req_header):
indent = ' '
output_comments = ''
if method_comment.strip():
output_comments += method_comment
for name, value, validate_string, comment in (req_param + req_query + req_header):
if comment:
output_comments += ''.join([indent*2, name, ': ', comment.rstrip(), '\n'])
if output_comments.strip():
output_comments = ''.join([indent*2, '\'\'\'\n', output_comments.rstrip(), '\n', indent*2, '\'\'\'\n'])
return output_comments
def output_method_validates(uri_param, req_param, req_query, req_header):
indent = ' '
output_validates = ''
for param in uri_param:
output_validates += param.get_validation(indent)
for name, value, validate_string, comment in (req_param + req_query + req_header):
if not validate_string.strip():
continue
validates = validate_string.split(':')
for validate in validates:
if 'required' in validate:
output_validates += ''.join([indent*2, '_validate_not_none(\'', to_legalname(name), '\', ', to_legalname(name), ')\n'])
return output_validates
HEADER_CONVERSION = {'x-ms-meta-name-values': '%s',
}
QUERY_CONVERSION = {'maxresults' : '_int_or_none(%s)',
'timeout' : '_int_or_none(%s)',
'$top': '_int_or_none(%s)',}
def output_headers(list_name, request_list):
return output_list(list_name, request_list, HEADER_CONVERSION)
def output_query(list_name, request_list):
return output_list(list_name, request_list, QUERY_CONVERSION)
def output_list(list_name, request_list, validate_conversions):
indent = ' '
output_list_str = ''
if len(request_list) == 1:
output_list_str += ''.join([indent*2, list_name, ' = [('])
for name, value, validate_string, comment in request_list:
validated = validate_conversions.get(name, '_str_or_none(%s)') % (to_legalname(name), )
if 'base64' in validate_string:
output_list_str += ''.join(['\'', name, '\', base64.b64encode(', validated, '), '])
else:
output_list_str += ''.join(['\'', name, '\', ', validated, ', '])
output_list_str = ''.join([output_list_str[:-2], ')]\n'])
elif len(request_list) > 1:
output_list_str += ''.join([indent*2, list_name, ' = [\n'])
for name, value, validate_string, comment in request_list:
validated = validate_conversions.get(name, '_str_or_none(%s)') % (to_legalname(name), )
if 'base64' in validate_string:
output_list_str += ''.join([indent*3, '(\'', name, '\', base64.b64encode(', validated, ')),\n'])
else:
output_list_str += ''.join([indent*3, '(\'', name, '\', ', validated, '),\n'])
output_list_str = ''.join([output_list_str[:-2], '\n', indent*3, ']\n'])
return output_list_str
def output_method_body(return_type, method_params, uri_param, req_protocol, req_host, host_param, req_method, req_uri, req_query, req_header, req_body, req_param):
indent = ' '
output_body = ''.join([indent*2, 'request = HTTPRequest()\n'])
output_body += ''.join([indent*2, 'request.method = \'', req_method, '\'\n'])
if BLOB_SERVICE_HOST_BASE in req_host:
output_body += indent*2 + 'request.host = _get_blob_host(self.account_name, self.use_local_storage)\n'
elif QUEUE_SERVICE_HOST_BASE in req_host:
output_body += indent*2 + 'request.host = _get_queue_host(self.account_name, self.use_local_storage)\n'
elif TABLE_SERVICE_HOST_BASE in req_host:
output_body += indent*2 + 'request.host = _get_table_host(self.account_name, self.use_local_storage)\n'
else:
output_body += indent*2 + 'request.host = self.service_namespace + SERVICE_BUS_HOST_BASE\n'
req_uri = req_uri.replace('<subscription-id>', '\' + self.subscription_id + \'')
for param in uri_param:
req_uri, extra = param.build_uri(req_uri, 2)
if extra:
output_body += extra
output_body += ''.join([indent*2, 'request.path = \'', req_uri, '\'\n'])
output_body += output_headers('request.headers', req_header)
output_body += output_query('request.query', req_query)
for name, value, validate_string, comment in req_param:
if name.startswith('feed:'):
type = name.split(':')[1]
output_body += ''.join([indent*2, 'request.body = _get_request_body(convert_' + type + '_to_xml(', to_legalname(name), '))\n'])
break
elif name.startswith('class:'):
if 'block_list' in name:
output_body += ''.join([indent*2, 'request.body = _get_request_body(convert_block_list_to_xml(', to_legalname(name), '))\n'])
else:
output_body += ''.join([indent*2, 'request.body = _get_request_body(_convert_class_to_xml(', to_legalname(name), '))\n'])
break
elif name.startswith('binary:'):
if 'message' in name:
output_body += indent*2 + 'request.headers = message.add_headers(request)\n'
output_body += ''.join([indent*2, 'request.body = _get_request_body(', to_legalname(name), '.body)\n'])
else:
output_body += ''.join([indent*2, 'request.body = _get_request_body(', to_legalname(name), ')\n'])
break
else:
fromstr = ''.join([validate_string, '</', name, '>'])
if value and comment:
fromstr = ''.join([value, ';', validate_string, '#', comment])
elif value:
fromstr = ''.join([value, ';', validate_string])
elif comment:
fromstr = ''.join([validate_string, '#', comment])
tostr = ''.join(['\'', ' + xml_escape(str(', to_legalname(name), ')) + ', '\'</', name, '>'])
req_body = req_body.replace(fromstr, tostr)
if len(req_body.strip()) > 80:
output_body += ''.join([indent*2, 'request.body = _get_request_body(\'', to_multilines(req_body.strip()), '\')\n'])
elif req_body.strip():
output_body += ''.join([indent*2, 'request.body = _get_request_body(\'', req_body.strip(), '\')\n'])
if SERVICE_BUS_HOST_BASE in req_host:
output_body += indent*2 + 'request.path, request.query = _update_request_uri_query(request)\n'
else:
output_body += indent*2 + 'request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)\n'
if 'servicebus' in req_host:
output_body += indent*2 + 'request.headers = _update_service_bus_header(request, self.account_key, self.issuer)\n'
elif 'table.core.windows.net' in req_host:
output_body += indent*2 + 'request.headers = _update_storage_table_header(request)\n'
elif 'blob.core.windows.net' in req_host:
output_body += indent*2 + 'request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)\n'
elif 'queue.core.windows.net' in req_host:
output_body += indent*2 + 'request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)\n'
for name, value in method_params:
if 'fail_on_exist' in name:
output_body += indent*2 + 'if not ' + name + ':\n'
output_body += indent*3 + 'try:\n'
output_body += ''.join([indent*4, 'self._perform_request(request)\n'])
output_body += ''.join([indent*4, 'return True\n'])
output_body += indent*3 + 'except WindowsAzureError as e:\n'
output_body += indent*4 + '_dont_fail_on_exist(e)\n'
output_body += indent*4 + 'return False\n'
output_body += indent*2 + 'else:\n'
output_body += ''.join([indent*3, 'self._perform_request(request)\n'])
output_body += ''.join([indent*3, 'return True\n\n'])
break
elif 'fail_not_exist' in name:
output_body += indent*2 + 'if not ' + name + ':\n'
output_body += indent*3 + 'try:\n'
output_body += ''.join([indent*4, 'self._perform_request(request)\n'])
output_body += ''.join([indent*4, 'return True\n'])
output_body += indent*3 + 'except WindowsAzureError as e:\n'
output_body += indent*4 + '_dont_fail_not_exist(e)\n'
output_body += indent*4 + 'return False\n'
output_body += indent*2 + 'else:\n'
output_body += ''.join([indent*3, 'self._perform_request(request)\n'])
output_body += ''.join([indent*3, 'return True\n\n'])
break
else:
output_body += ''.join([indent*2, 'response = self._perform_request(request)\n\n'])
if return_type and return_type != 'None':
if return_type.startswith('dict'):
return_params = return_type.split('\n')
if len(return_params) == 1:
output_body += indent*2 + 'return _parse_response_for_dict(response)\n\n'
elif len(return_params) == 2:
value = return_params[1].split('=')[1]
if return_params[1].startswith('prefix'):
output_body += indent*2 + 'return _parse_response_for_dict_prefix(response, prefix=' + value +')\n\n'
elif return_params[1].startswith('filter'):
output_body += indent*2 + 'return _parse_response_for_dict_filter(response, filter=' + value + ')\n\n'
elif return_type.endswith('EnumResults'):
output_body += indent*2 + 'return _parse_enum_results_list(response, ' + return_type + ', "' + return_type[:-11] + 's", ' + return_type[:-11] + ')\n\n'
elif return_type == 'PageList':
output_body += indent*2 + 'return _parse_simple_list(response, PageList, PageRange, "page_ranges")'
else:
if return_type == 'Message':
output_body += indent*2 + 'return _create_message(response, self)\n\n'
elif return_type == 'str':
output_body += indent*2 + 'return response.body\n\n'
elif return_type == 'BlobBlockList':
output_body += indent*2 + 'return convert_response_to_block_list(response)\n\n'
elif 'Feed' in return_type:
for name in ['table', 'entity', 'topic', 'subscription', 'queue', 'rule']:
if name +'\'),' in return_type:
convert_func = '_convert_xml_to_' + name
output_body += indent*2 + 'return _convert_response_to_feeds(response, ' + convert_func + ')\n\n'
break
elif name in return_type:
convert_func = '_convert_response_to_' + name
output_body += indent*2 + 'return ' + convert_func + '(response)\n\n'
break
else:
output_body += indent*2 + 'return _parse_response(response, ' + return_type + ')\n\n'
return output_body
def output_method(output_file, method_name, method_params, method_comment, return_type, uri_param, req_protocol, req_host, host_param, req_method, req_uri, req_query, req_header, req_body, req_param):
indent=' '
output_str = ''
output_str += output_method_def(method_name, method_params, uri_param, req_param, req_query, req_header)
output_str += output_method_comments(method_comment, req_param, req_query, req_header)
output_str += output_method_validates(uri_param, req_param, req_query, req_header)
output_str += output_method_body(return_type, method_params, uri_param, req_protocol, req_host, host_param, req_method, req_uri, req_query, req_header, req_body, req_param)
output_file.write(output_str)
class UriBuilder(object):
def __init__(self, value):
self.uri_str = value
def build_sig(self):
name = self.uri_str
if to_legalname(name) != 'subscription_id':
if '=' in name:
name, value = name.split('=')
return ''.join([to_legalname(name), '=', value, ', '])
else:
return ''.join([to_legalname(name), ', '])
return ''
def build_uri(self, req_uri, indent):
name = self.uri_str
return req_uri.replace('<' + name + '>', '\' + str(' + to_legalname(name) + ') + \''), ''
def get_validation(self, indent):
name = self.uri_str.split('=')[0]
if to_legalname(name) != 'subscription_id':
return ''.join([indent*2, '_validate_not_none(\'', to_legalname(name), '\', ', to_legalname(name), ')\n'])
return ''
class OptionalUriBuilder(object):
def __init__(self, value):
self.value = value
colon = self.value.find(':')
self.name = self.value[1:colon]
self.replacement = self.value[colon+1:].replace('[' + self.name + ']', '" + ' + self.name + ' + "')
def build_sig(self):
return self.name + ' = None, '
def get_validation(self, indent):
return ''
def build_uri(self, req_uri, indent):
extra = ((' ' * indent) + 'if {name} is not None:\n' +
(' ' * (indent+1)) + 'uri_part_{name} = "{replacement}"\n' +
(' ' * indent) + 'else:\n' +
(' ' * (indent+1)) + 'uri_part_{name} = ""\n').format(name=self.name, replacement=self.replacement)
return req_uri.replace('<' + self.value + '>', "' + uri_part_" + self.name + " + '"), extra
def auto_codegen(source_filename, output_filename='output.py'):
source_file = open(source_filename,'r')
output_file = open(output_filename,'w')
return_type = None
indent = ' '
method_name = ''
req_host = ''
req_method = ''
req_uri = ''
req_body = ''
req_query = []
req_header = []
req_param = []
uri_param = []
host_param = ''
class_init_params = []
class_name = ''
x_ms_version = ''
class_comment = ''
method_comment = ''
req_protocol = ''
method_params = []
methods_code = ''
line = source_file.readline().strip().lower()
while True:
if line == '[end]':
break
elif line == '[class]':
if method_name != '':
output_method(output_file, method_name, method_params, method_comment, return_type, uri_param, req_protocol, req_host, host_param, req_method, req_uri, req_query, req_header, req_body, req_param)
method_name = ''
class_name = source_file.readline().strip()
elif line == '[x-ms-version]':
x_ms_version = source_file.readline().strip()
elif line == '[class-comment]':
while True:
line = source_file.readline().strip()
if line.startswith('['):
break
else:
class_comment += ''.join([indent, line, '\n'])
continue
elif line == '[init]':
while True:
param_name = source_file.readline().strip()
if param_name.startswith('['):
line = param_name.strip()
break
elif param_name.strip():
class_init_params.append(param_name.strip())
output_import(output_file, class_name)
output_class(output_file, class_name, class_comment, class_init_params, x_ms_version)
class_name = ''
x_ms_version = ''
class_init_params = []
class_comment = ''
continue
elif line == '[methods_code]':
while True:
line = source_file.readline()
if line.startswith('['):
line = line.strip()
break
else:
methods_code += ''.join([indent, line])
continue
elif line == '[method]':
if method_name != '':
output_method(output_file, method_name, method_params, method_comment, return_type, uri_param, req_protocol, req_host, host_param, req_method, req_uri, req_query, req_header, req_body, req_param)
req_query = []
req_header = []
req_param = []
req_body = ''
return_type = None
method_comment = ''
method_params = []
method_name = source_file.readline().strip()
elif line == '[params]':
method_params = []
while True:
param = source_file.readline().strip()
if param.startswith('['):
line = param.strip()
break
elif param.strip():
name, value = param.split('=')
method_params.append((name, value))
continue
elif line == '[comment]':
while True:
line = source_file.readline()
if line.startswith('['):
line = line.strip()
break
else:
method_comment += ''.join([indent*2, line])
continue
elif line == '[return]':
return_type = ''
while True:
line = source_file.readline()
if line.startswith('['):
line = line.strip()
break
else:
return_type += line
return_type = return_type.strip()
continue
elif line == '[url]':
url = source_file.readline().strip()
if 'https://' in url:
req_protocol = 'https'
else:
req_protocol = 'http'
req_host = url.split(' ')[1].split('//')[1].split('/')[0]
host_param = ''
if '<' in req_host:
pos1 = req_host.find('<')
pos2 = req_host.find('>')
host_param = req_host[pos1+1:pos2]
req_method = url.split(' ')[0]
req_uri = url[url.find('//')+2:].replace(req_host, '')
uri_param = []
uri_path = req_uri
while '<' in uri_path:
pos1 = uri_path.find('<')
pos2 = uri_path.find('>')
uri_param_name = uri_path[pos1+1:pos2]
if uri_param_name.startswith('?'):
builder = OptionalUriBuilder(uri_param_name)
else:
builder = UriBuilder(uri_param_name)
uri_param.append(builder)
if pos2 < (len(uri_path)-1):
uri_path = uri_path[pos2+1:]
else:
break
elif line == '[query]':
req_query = []
while True:
query = source_file.readline().strip()
if query.startswith('['):
line = query.strip()
break
elif query.strip():
name, value = query.split('=')
validate_string = ''
comment = ''
if '#' in value:
pos = value.rfind('#')
comment = value[pos+1:]
value = value[:pos]
if ';' in value:
value, validate_string = value.split(';')
req_query.append((name, value, validate_string, comment))
continue
elif line == '[requestheader]':
req_header = []
while True:
header = source_file.readline().strip()
if header.startswith('['):
line = header.strip()
break
elif header.strip():
name, value = header.split('=')
validate_string = ''
comment = ''
if '#' in value:
pos = value.rfind('#')
comment = value[pos+1:]
value = value[:pos]
if ';' in value:
value, validate_string = value.split(';')
req_header.append((name, value, validate_string, comment))
continue
elif line == '[requestbody]':
req_body = ''
req_param = []
while True:
body = source_file.readline()
if body.startswith('['):
line = body.strip()
break
elif body.strip():
req_body += body
if req_body.startswith('class:') or req_body.startswith('binary:') or req_body.startswith('feed:'):
name_value_string = req_body.strip()
name = ''
value_string = ''
if ';' in name_value_string:
name, value_string = name_value_string.split(';')
else:
name = name_value_string
value, validate_string, comment = get_value_validates_comment(value_string)
req_param.append((name, value, validate_string, comment))
elif req_body.strip():
newbody = normalize_xml(req_body)
xmldoc = minidom.parseString(newbody)
for xmlelement in xmldoc.childNodes[0].childNodes:
value_string = xmlelement.firstChild.nodeValue
value, validate_string, comment = get_value_validates_comment(value_string)
req_param.append((xmlelement.nodeName, value, validate_string, comment))
continue
line = source_file.readline().strip().lower()
output_method(output_file, method_name, method_params, method_comment, return_type, uri_param, req_protocol, req_host, host_param, req_method, req_uri, req_query, req_header, req_body, req_param)
output_file.write('\n' + methods_code)
source_file.close()
output_file.close()
if __name__ == '__main__':
auto_codegen('blob_input.txt', '../azure/storage/blobservice.py')
auto_codegen('table_input.txt', '../azure/storage/tableservice.py')
auto_codegen('queue_input.txt', '../azure/storage/queueservice.py')
auto_codegen('servicebus_input.txt', '../azure/servicebus/servicebusservice.py')
def add_license(license_file_name, output_file_name):
license_file = open(license_file_name, 'r')
output_file = open(output_file_name, 'r')
content = output_file.read()
license_txt = license_file.read()
license_file.close()
output_file.close()
output_file = open(output_file_name, 'w')
output_file.write(license_txt)
output_file.write(content)
output_file.close()
add_license('license.txt', '../azure/storage/blobservice.py')
add_license('license.txt', '../azure/storage/tableservice.py')
add_license('license.txt', '../azure/storage/queueservice.py')
add_license('license.txt', '../azure/servicebus/servicebusservice.py')

Просмотреть файл

@ -0,0 +1,43 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup>
<Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
<SchemaVersion>2.0</SchemaVersion>
<ProjectGuid>{6ea33d82-ec4a-4e01-ba16-003e66b38e5b}</ProjectGuid>
<ProjectHome>.</ProjectHome>
<StartupFile>codegenerator.py</StartupFile>
<SearchPath>C:\ptvs\Open_Source\Incubation\windowsazure</SearchPath>
<WorkingDirectory>.</WorkingDirectory>
<OutputPath>.</OutputPath>
<Name>codegenerator</Name>
<RootNamespace>codegenerator</RootNamespace>
<SccProjectName>SAK</SccProjectName>
<SccProvider>SAK</SccProvider>
<SccAuxPath>SAK</SccAuxPath>
<SccLocalPath>SAK</SccLocalPath>
<IsWindowsApplication>False</IsWindowsApplication>
<InterpreterId>2af0f10d-7135-4994-9156-5d01c9c11b7e</InterpreterId>
<InterpreterVersion>2.7</InterpreterVersion>
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)' == 'Debug' ">
<DebugSymbols>true</DebugSymbols>
<EnableUnmanagedDebugging>false</EnableUnmanagedDebugging>
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)' == 'Release' ">
<DebugSymbols>true</DebugSymbols>
<EnableUnmanagedDebugging>false</EnableUnmanagedDebugging>
</PropertyGroup>
<ItemGroup>
<Compile Include="codegenerator.py" />
</ItemGroup>
<ItemGroup>
<Content Include="blob_input.txt" />
<Content Include="hostedservices_input.txt" />
<Content Include="license.txt" />
<Content Include="queue_input.txt" />
<Content Include="servicebus_input.txt" />
<Content Include="sqlazure_input.txt" />
<Content Include="table_input.txt" />
</ItemGroup>
<Import Project="$(MSBuildToolsPath)\Microsoft.Common.targets" />
</Project>

Просмотреть файл

@ -0,0 +1,499 @@
[class]
HostedServiceManager
[x-ms-version]
2011-08-18
[init]
cert_file
[method]
list_storage_accounts
[return]
StorageServices
[url]
GET https://management.core.windows.net/<subscription-id>/services/storageservices
[method]
get_storage_account_properties
[return]
StorageService
[url]
GET https://management.core.windows.net/<subscription-id>/services/storageservices/<service-name>
[method]
get_storage_account_keys
[return]
StorageService
[url]
GET https://management.core.windows.net/<subscription-id>/services/storageservices/<service-name>/keys
[method]
regenerate_storage_account_keys
[return]
StorageService
[url]
POST https://management.core.windows.net/<subscription-id>/services/storageservices/<service-name>/keys?action=regenerate
[requestbody]
<?xml version="1.0" encoding="utf-8"?>
<RegenerateKeys xmlns="http://schemas.microsoft.com/windowsazure">
<KeyType>Primary|Secondary</KeyType>
</RegenerateKeys>
[method]
create_storage_account
[url]
POST https://management.core.windows.net/<subscription-id>/services/storageservices
[requestbody]
<?xml version="1.0" encoding="utf-8"?>
<CreateStorageServiceInput xmlns="http://schemas.microsoft.com/windowsazure">
<ServiceName>service-name</ServiceName>
<Description>service-description</Description>
<Label>base64-encoded-label</Label>
<AffinityGroup>affinity-group-name</AffinityGroup>
<Location>location-of-the-storage-account</Location>
</CreateStorageServiceInput>
[method]
delete_storage_account
[url]
DELETE https://management.core.windows.net/<subscription-id>/services/storageservices/<service-name>
[method]
update_storage_account
[url]
PUT https://management.core.windows.net/<subscription-id>/services/storageservices/<service-name>
[requestbody]
<?xml version="1.0" encoding="utf-8"?>
<UpdateStorageServiceInput xmlns="http://schemas.microsoft.com/windowsazure">
<Description>Description of the storage service</Description>
<Label>base64 encoded label</Label>
</UpdateStorageServiceInput>
[method]
list_hosted_services
[return]
HostedServices
[url]
GET https://management.core.windows.net/<subscription-id>/services/hostedservices
[method]
delete_hosted_service
[url]
DELETE https://management.core.windows.net/<subscription-id>/services/hostedservices/<service-name>
[method]
update_hosted_service
[url]
PUT https://management.core.windows.net/<subscription-id>/services/hostedservices/<service-name>
[requestbody]
<?xml version="1.0" encoding="utf-8"?>
<UpdateHostedService xmlns="http://schemas.microsoft.com/windowsazure">
<Label>base64-encoded-service-label</Label>
<Description>description</Description>
</UpdateHostedService>
[method]
create_hosted_service
[url]
POST https://management.core.windows.net/<subscription-id>/services/hostedservices
[requestbody]
<?xml version="1.0" encoding="utf-8"?>
<CreateHostedService xmlns="http://schemas.microsoft.com/windowsazure">
<ServiceName>service-name</ServiceName>
<Label>base64-encoded-service-label</Label>
<Description>description</Description>
<Location>location</Location>
<AffinityGroup>affinity-group</AffinityGroup>
</CreateHostedService>
[method]
get_hosted_service_properties
[return]
HostedService
[url]
GET https://management.core.windows.net/<subscription-id>/services/hostedservices/<service-name>
[query]
embed-detail=false
[method]
create_deployment
[url]
POST https://management.core.windows.net/<subscription-id>/services/hostedservices/<service-name>/deploymentslots/<deployment-slot-name>
[requestbody]
<?xml version="1.0" encoding="utf-8"?>
<CreateDeployment xmlns="http://schemas.microsoft.com/windowsazure">
<Name>deployment-name</Name>
<PackageUrl>package-url-in-blob-storage</PackageUrl>
<Label>base64-encoded-deployment-label</Label>
<Configuration>base64-encoded-configuration-file</Configuration>
<StartDeployment>true|false</StartDeployment>
<TreatWarningsAsError>true|false</TreatWarningsAsError>
</CreateDeployment>
[method]
get_deployment_by_slot
[return]
Deployment
[url]
GET https://management.core.windows.net/<subscription-id>/services/hostedservices/<service-name>/deploymentslots/<deployment-slot>
[method]
get_deployment_by_name
[return]
Deployment
[url]
GET https://management.core.windows.net/<subscription-id>/services/hostedservices/<service-name>/deployments/<deployment-name>
[method]
swap_deployment
[return]
Deployment
[url]
POST https://management.core.windows.net/<subscription-id>/services/hostedservices/<service-name>
[requestbody]
<?xml version="1.0" encoding="utf-8"?>
<Swap xmlns="http://schemas.microsoft.com/windowsazure">
<Production>production-deployment-name</Production>
<SourceDeployment>deployment-name-to-be-swapped-with-production</SourceDeployment>
</Swap>
[method]
delete_deployment_by_slot
[url]
DELETE https://management.core.windows.net/<subscription-id>/services/hostedservices/<service-name>/deploymentslots/<deployment-slot>
[method]
delete_deployment_by_name
[url]
DELETE https://management.core.windows.net/<subscription-id>/services/hostedservices/<service-name>/deployments/<deployment-name>
[method]
change_deployment_configuration_by_slot
[url]
POST https://management.core.windows.net/<subscription-id>/services/hostedservices/<service-name>/deploymentslots/<deployment-slot>/?comp=config
[requestbody]
<?xml version="1.0" encoding="utf-8"?>
<ChangeConfiguration xmlns="http://schemas.microsoft.com/windowsazure">
<Configuration>base-64-encoded-configuration-file</Configuration>
<TreatWarningsAsError>true|false</TreatWarningsAsError>
<Mode>Auto|Manual</Mode>
</ChangeConfiguration>
[method]
change_deployment_configuration_by_name
[url]
POST https://management.core.windows.net/<subscription-id>/services/hostedservices/<service-name>/deployments/<deployment-name>/?comp=config
[requestbody]
<?xml version="1.0" encoding="utf-8"?>
<ChangeConfiguration xmlns="http://schemas.microsoft.com/windowsazure">
<Configuration>base-64-encoded-configuration-file</Configuration>
<TreatWarningsAsError>true|false</TreatWarningsAsError>
<Mode>Auto|Manual</Mode>
</ChangeConfiguration>
[method]
update_deployment_status_by_slot
[url]
POST https://management.core.windows.net/<subscription-id>/services/hostedservices/<service-name>/deploymentslots/<deployment-slot>/?comp=status
[requestbody]
<?xml version="1.0" encoding="utf-8"?>
<UpdateDeploymentStatus xmlns="http://schemas.microsoft.com/windowsazure">
<Status>Running|Suspended</Status>
</UpdateDeploymentStatus>
[method]
update_deployment_status_by_name
[url]
POST https://management.core.windows.net/<subscription-id>/services/hostedservices/<service-name>/deployments/<deployment-name>/?comp=status
[requestbody]
<?xml version="1.0" encoding="utf-8"?>
<UpdateDeploymentStatus xmlns="http://schemas.microsoft.com/windowsazure">
<Status>Running|Suspended</Status>
</UpdateDeploymentStatus>
[method]
upgrade_deployment_by_slot
[url]
POST https://management.core.windows.net/<subscription-id>/services/hostedservices/<service-name>/deploymentslots/<deployment-slot>/?comp=upgrade
[requestbody]
<?xml version="1.0" encoding="utf-8"?>
<UpgradeDeployment xmlns="http://schemas.microsoft.com/windowsazure">
<Mode>auto|manual</Mode>
<PackageUrl>url-to-package</PackageUrl>
<Configuration>base64-encoded-config-file</Configuration>
<Label>base-64-encoded-label</Label>
<RoleToUpgrade>role-name</RoleToUpgrade>
<Force>true|false</Force>
</UpgradeDeployment>
[method]
upgrade_deployment_by_name
[url]
POST https://management.core.windows.net/<subscription-id>/services/hostedservices/<service-name>/deployments/<deployment-name>/?comp=upgrade
[requestbody]
<?xml version="1.0" encoding="utf-8"?>
<UpgradeDeployment xmlns="http://schemas.microsoft.com/windowsazure">
<Mode>auto|manual</Mode>
<PackageUrl>url-to-package</PackageUrl>
<Configuration>base64-encoded-config-file</Configuration>
<Label>base-64-encoded-label</Label>
<RoleToUpgrade>role-name</RoleToUpgrade>
<Force>true|false</Force>
</UpgradeDeployment>
[method]
walk_upgrade_domain_by_slot
[url]
POST https://management.core.windows.net/<subscription-id>/services/hostedservices/<service-name>/deploymentslots/<deployment-slot>/?comp=walkupgradedomain
[requestbody]
<?xml version="1.0" encoding="utf-8"?>
<WalkUpgradeDomain xmlns="http://schemas.microsoft.com/windowsazure">
<UpgradeDomain>upgrade-domain-id</UpgradeDomain>
</WalkUpgradeDomain>
[method]
walk_upgrade_domain_by_name
[url]
POST https://management.core.windows.net/<subscription-id>/services/hostedservices/<service-name>/deployments/<deployment-name>/?comp=walkupgradedomain
[requestbody]
<?xml version="1.0" encoding="utf-8"?>
<WalkUpgradeDomain xmlns="http://schemas.microsoft.com/windowsazure">
<UpgradeDomain>upgrade-domain-id</UpgradeDomain>
</WalkUpgradeDomain>
[method]
reboot_role_instance_by_slot
[url]
POST https://management.core.windows.net/<subscription-id>/services/hostedservices/<service-name>/deploymentslots/<deployment-slot>/roleinstances/<role-instance-name>?comp=reboot
[requestheader]
Content-Length=0
[method]
reboot_role_instance_by_name
[url]
POST https://management.core.windows.net/<subscription-id>/services/hostedservices/<service-name>/deployments/<deployment-name>/roleinstances/<role-instance-name>?comp=reboot
[requestheader]
Content-Length=0
[method]
reimage_role_instance_by_slot
[url]
POST https://management.core.windows.net/<subscription-id>/services/hostedservices/<service-name>/deploymentslots/<deployment-slot>/roleinstances/<role-instance-name>?comp=reimage
[requestheader]
Content-Length=0
[method]
reimage_role_instance_by_name
[url]
POST https://management.core.windows.net/<subscription-id>/services/hostedservices/<service-name>/deployments/<deployment-name>/roleinstances/<role-instance-name>?comp=reimage
[requestheader]
Content-Length=0
[method]
rollback_update_by_slot
[url]
POST https://management.core.windows.net/<subscription-id>/services/hostedservices/<service-name>/deploymentslots/<deployment-slot>/?comp=rollback
[requestbody]
<?xml version="1.0" encoding="utf-8"?>
<RollbackUpdateOrUpgrade xmlns="http://schemas.microsoft.com/windowsazure">
<Mode>auto|manual</Mode>
<Force>true|false</Force>
</RollbackUpdateOrUpgrade>
[method]
rollback_update_by_name
[url]
POST hhttps://management.core.windows.net/<subscription-id>/services/hostedservices/<service-name>/deployments/<deployment-name>/?comp=rollback
[requestbody]
<?xml version="1.0" encoding="utf-8"?>
<RollbackUpdateOrUpgrade xmlns="http://schemas.microsoft.com/windowsazure">
<Mode>auto|manual</Mode>
<Force>true|false</Force>
</RollbackUpdateOrUpgrade>
[method]
list_certificates
[return]
Certificates
[url]
GET https://management.core.windows.net/<subscription-id>/services/hostedservices/<service-DNS-name>/certificates
[method]
get_certificate
[return]
Certificate
[url]
GET https://management.core.windows.net/<subscription-id>/services/hostedservices/<service-name>/certificates/<thumbalgorithm-thumbprint>
[method]
add_certificate
[return]
Certificates
[url]
POST https://management.core.windows.net/<subscription-id>/services/hostedservices/<service-name>/certificates
[requestbody]
<?xml version="1.0" encoding="utf-8"?>
<CertificateFile xmlns="http://schemas.microsoft.com/windowsazure">
<Data>base64-encoded-pfx-file</Data>
<CertificateFormat>pfx</CertificateFormat>
<Password>pfx-file-password</Password>
</CertificateFile>
[method]
delete_certificate
[return]
Certificates
[url]
DELETE https://management.core.windows.net/<subscription-id>/services/hostedservices/<service-name>/certificates/<thumbprint>
[method]
list_affinity_groups
[return]
AffinityGroups
[url]
DELETE https://management.core.windows.net/<subscription-id>/affinitygroups
[method]
create_affinity_group
[url]
POST https://management.core.windows.net/<subscription-id>/affinitygroups
[requestbody]
<?xml version="1.0" encoding="utf-8"?>
<CreateAffinityGroup xmlns="http://schemas.microsoft.com/windowsazure">
<Name>affinity-group-name</Name>
<Label>base64-encoded-affinity-group-label</Label>
<Description>affinity-group-description</Description>
<Location>location</Location>
</CreateAffinityGroup>
[method]
delete_affinity_group
[return]
AffinityGroups
[url]
DELETE https://management.core.windows.net/<subscription-id>/affinitygroups/<affinity-group-name>
[method]
update_affinity_group
[url]
PUT https://management.core.windows.net/<subscription-id>/affinitygroups/<affinity-group-name>
[requestbody]
<?xml version="1.0" encoding="utf-8"?>
<UpdateAffinityGroup xmlns="http://schemas.microsoft.com/windowsazure">
<Label>base64-encoded-affinity-group-label</Label>
<Description>affinity-group-description</Description>
</UpdateAffinityGroup>
[method]
get_affinity_group_properties
[return]
AffinityGroup
[url]
GET https://management.core.windows.net/<subscription-id>/affinitygroups/<affinity-group-name>
[method]
list_locations
[return]
Locations
[url]
GET https://management.core.windows.net/<subscription-id>/locations
[method]
get_operation_status
[return]
OperationStatus
[url]
GET https://management.core.windows.net/<subscription-id>/operations/<request-id>
[method]
list_operating_systems
[return]
OperatingSystems
[url]
GET https://management.core.windows.net/<subscription-id>/operatingsystems
[method]
list_operating_system_families
[return]
OperatingSystemFamilies
[url]
GET https://management.core.windows.net/<subscription-id>/operatingsystemfamilies
[method]
list_subscription_operations
[return]
SubscriptionOperationCollection
[url]
GET https://management.core.windows.net/<subscription-id>/operations
[query]
StartTime=;required
EndTime=;required
ObjectIdFilter=
OperationResultFilter=
ContinuationToken=
[method]
get_subscription
[return]
Subscription
[url]
GET https://management.core.windows.net/<subscription-id>
[method]
create_profile
[url]
POST https://management.core.windows.net/<subscription-id>/services/WATM/profiles
[requestbody]
<Profile xmlns="http://schemas.microsoft.com/windowsazure">
<DomainName>[domain-name-for-the-profile]</DomainName>
<Name>[service-profile-name]</Name>
</Profile>
[method]
list_profiles
[return]
Profiles
[url]
GET https://management.core.windows.net/<subscription-id>/services/WATM/profiles
[method]
get_profile
[return]
Profile
[url]
GET https://management.core.windows.net/<subscription-id>/services/WATM/profiles/<profile-name>
[method]
delete_profile
[return]
Profile
[url]
DELETE https://management.core.windows.net/<subscription-id>/services/WATM/profiles/<profile-name>
[method]
list_definitions
[return]
Definitions
[url]
GET https://management.core.windows.net/<subscription-id>/services/WATM/profiles/<profile-name>/definitions
[method]
get_definition
[return]
Definition
[url]
GET https://management.core.windows.net/<subscription-id>/services/WATM/profiles/<profile-name>/definitions/<version>
[requestbody]
binary:blob
[method]
update_profile
[return]
[url]
PUT https://management.core.windows.net/<subscription-id>/services/WATM/profiles/<profile-name>
[requestbody]
class:profile
[end]

Просмотреть файл

@ -0,0 +1,238 @@
[class]
QueueService
[x-ms-version]
2011-08-18
[class-comment]
This is the main class managing queue resources.
account_name: your storage account name, required for all operations.
account_key: your storage account key, required for all operations.
[init]
account_name
account_key
[method]
get_queue_service_properties
[comment]
Gets the properties of a storage account's Queue Service, including Windows Azure
Storage Analytics.
timeout: Optional. The timeout parameter is expressed in seconds. For example, the
following value sets a timeout of 30 seconds for the request: timeout=30
[return]
StorageServiceProperties
[url]
GET http://<account-name>.queue.core.windows.net/?restype=service&comp=properties
[query]
timeout=
[method]
list_queues
[comment]
Lists all of the queues in a given storage account.
[return]
QueueEnumResults
[url]
GET http://<account-name>.queue.core.windows.net/?comp=list
[query]
prefix=
marker=
maxresults=
include=
[method]
create_queue
[comment]
Creates a queue under the given account.
queue_name: name of the queue.
x_ms_meta_name_values: Optional. A dict containing name-value pairs to associate
with the queue as metadata.
fail_on_exist: specify whether throw exception when queue exists.
[params]
fail_on_exist=False
[return]
None
[url]
PUT http://<account-name>.queue.core.windows.net/<queue-name>
[requestheader]
x-ms-meta-name-values=
[method]
delete_queue
[comment]
Permanently deletes the specified queue.
queue_name: name of the queue.
fail_not_exist: specify whether throw exception when queue doesn't exist.
[params]
fail_not_exist=False
[return]
None
[url]
DELETE http://<account-name>.queue.core.windows.net/<queue-name>
[method]
get_queue_metadata
[comment]
Retrieves user-defined metadata and queue properties on the specified queue.
Metadata is associated with the queue as name-values pairs.
queue_name: name of the queue.
[return]
dict
prefix='x-ms-meta'
[url]
GET http://<account-name>.queue.core.windows.net/<queue-name>?comp=metadata
[method]
set_queue_metadata
[comment]
Sets user-defined metadata on the specified queue. Metadata is associated
with the queue as name-value pairs.
queue_name: name of the queue.
x_ms_meta_name_values: Optional. A dict containing name-value pairs to associate
with the queue as metadata.
[url]
PUT http://<account-name>.queue.core.windows.net/<queue-name>?comp=metadata
[requestheader]
x-ms-meta-name-values=
[method]
put_message
[comment]
Adds a new message to the back of the message queue. A visibility timeout can
also be specified to make the message invisible until the visibility timeout
expires. A message must be in a format that can be included in an XML request
with UTF-8 encoding. The encoded message can be up to 64KB in size for versions
2011-08-18 and newer, or 8KB in size for previous versions.
queue_name: name of the queue.
visibilitytimeout: Optional. If specified, the request must be made using an
x-ms-version of 2011-08-18 or newer.
messagettl: Optional. Specifies the time-to-live interval for the message,
in seconds. The maximum time-to-live allowed is 7 days. If this parameter
is omitted, the default time-to-live is 7 days.
[return]
[url]
POST http://<account-name>.queue.core.windows.net/<queue-name>/messages
[query]
visibilitytimeout=
messagettl=
[requestbody]
<?xml version="1.0" encoding="utf-8"?>
<QueueMessage>
<MessageText>required</MessageText>
</QueueMessage>
[method]
get_messages
[comment]
Retrieves one or more messages from the front of the queue.
queue_name: name of the queue.
numofmessages: Optional. A nonzero integer value that specifies the number of
messages to retrieve from the queue, up to a maximum of 32. If fewer are
visible, the visible messages are returned. By default, a single message
is retrieved from the queue with this operation.
visibilitytimeout: Required. Specifies the new visibility timeout value, in
seconds, relative to server time. The new value must be larger than or
equal to 1 second, and cannot be larger than 7 days, or larger than 2
hours on REST protocol versions prior to version 2011-08-18. The visibility
timeout of a message can be set to a value later than the expiry time.
[return]
QueueMessagesList
[url]
GET http://<account-name>.queue.core.windows.net/<queue-name>/messages
[query]
numofmessages=
visibilitytimeout=
[method]
peek_messages
[comment]
Retrieves one or more messages from the front of the queue, but does not alter
the visibility of the message.
queue_name: name of the queue.
numofmessages: Optional. A nonzero integer value that specifies the number of
messages to peek from the queue, up to a maximum of 32. By default,
a single message is peeked from the queue with this operation.
[return]
QueueMessagesList
[url]
GET http://<account-name>.queue.core.windows.net/<queue-name>/messages?peekonly=true
[query]
numofmessages=
[method]
delete_message
[comment]
Deletes the specified message.
queue_name: name of the queue.
popreceipt: Required. A valid pop receipt value returned from an earlier call
to the Get Messages or Update Message operation.
[return]
[url]
DELETE http://<account-name>.queue.core.windows.net/<queue-name>/messages/<message-id>
[query]
popreceipt=;required
[method]
clear_messages
[comment]
Deletes all messages from the specified queue.
queue_name: name of the queue.
[return]
[url]
DELETE http://<account-name>.queue.core.windows.net/<queue-name>/messages
[method]
update_message
[comment]
Updates the visibility timeout of a message. You can also use this
operation to update the contents of a message.
queue_name: name of the queue.
popreceipt: Required. A valid pop receipt value returned from an earlier call
to the Get Messages or Update Message operation.
visibilitytimeout: Required. Specifies the new visibility timeout value, in
seconds, relative to server time. The new value must be larger than or
equal to 0, and cannot be larger than 7 days. The visibility timeout
of a message cannot be set to a value later than the expiry time. A
message can be updated until it has been deleted or has expired.
[return]
dict
filter=['x-ms-popreceipt', 'x-ms-time-next-visible']
[url]
PUT http://<account-name>.queue.core.windows.net/<queue-name>/messages/<message-id>
[query]
popreceipt=;required
visibilitytimeout=;required
[requestbody]
<?xml version="1.0" encoding="utf-8"?>
<QueueMessage>
<MessageText>required</MessageText>
</QueueMessage>
[method]
set_queue_service_properties
[comment]
Sets the properties of a storage account's Queue service, including Windows Azure
Storage Analytics.
storage_service_properties: a StorageServiceProperties object.
timeout: Optional. The timeout parameter is expressed in seconds.
[return]
[url]
PUT http://<account-name>.queue.core.windows.net/?restype=service&comp=properties
[query]
timeout=
[requestbody]
class:storage_service_properties;required
[end]

Просмотреть файл

@ -0,0 +1,480 @@
[class]
ServiceBusService
[x-ms-version]
2011-06-01
[init]
service_namespace
account_key
issuer
[method]
create_queue
[comment]
Creates a new queue. Once created, this queue's resource manifest is immutable.
queue: queue object to create.
queue_name: the name of the queue.
fail_on_exist: specify whether to throw an exception when the queue exists.
[params]
fail_on_exist=False
[return]
None
[url]
PUT https://<service-namespace>.servicebus.windows.net/<queue-name>
[requestbody]
feed:queue
[method]
delete_queue
[comment]
Deletes an existing queue. This operation will also remove all associated state
including messages in the queue.
fail_not_exist: specify whether to throw an exception if the queue doesn't exist.
[params]
fail_not_exist=False
[return]
None
[url]
DELETE https://<service-namespace>.servicebus.windows.net/<queue-name>
[method]
get_queue
[comment]
Retrieves an existing queue.
queue_name: name of the queue.
[return]
Feed('queue')
[url]
GET https://<service-namespace>.servicebus.windows.net/<queue-name>
[requestheader]
[method]
list_queues
[comment]
Enumerates the queues in the service namespace.
[return]
(Feed('queue'),)
[url]
GET https://<service-namespace>.servicebus.windows.net/$Resources/Queues
[requestheader]
[method]
create_topic
[comment]
Creates a new topic. Once created, this topic resource manifest is immutable.
topic_name: name of the topic.
topic: the Topic object to create.
fail_on_exist: specify whether to throw an exception when the topic exists.
[params]
fail_on_exist=False
[return]
None
[url]
PUT https://<service-namespace>.servicebus.windows.net/<topic_name>
[requestbody]
feed:topic
[method]
delete_topic
[comment]
Deletes an existing topic. This operation will also remove all associated state
including associated subscriptions.
topic_name: name of the topic.
fail_not_exist: specify whether throw exception when topic doesn't exist.
[params]
fail_not_exist=False
[return]
None
[url]
DELETE https://<service-namespace>.servicebus.windows.net/<topic_name>
[method]
get_topic
[comment]
Retrieves the description for the specified topic.
topic_name: name of the topic.
[return]
Feed('topic')
[url]
GET https://<service-namespace>.servicebus.windows.net/<topic_name>
[requestheader]
[method]
list_topics
[comment]
Retrieves the topics in the service namespace.
[return]
(Feed('topic'),)
[url]
GET https://<service-namespace>.servicebus.windows.net/$Resources/Topics
[requestheader]
[method]
create_rule
[comment]
Creates a new rule. Once created, this rule's resource manifest is immutable.
topic_name: the name of the topic
subscription_name: the name of the subscription
rule_name: name of the rule.
fail_on_exist: specify whether to throw an exception when the rule exists.
[params]
fail_on_exist=False
[return]
None
[url]
PUT https://<service-namespace>.servicebus.windows.net/<topic-name>/subscriptions/<subscription-name>/rules/<rule-name>
[requestbody]
feed:rule
[method]
delete_rule
[comment]
Deletes an existing rule.
topic_name: the name of the topic
subscription_name: the name of the subscription
rule_name: the name of the rule. DEFAULT_RULE_NAME=$Default. Use DEFAULT_RULE_NAME
to delete default rule for the subscription.
fail_not_exist: specify whether throw exception when rule doesn't exist.
[params]
fail_not_exist=False
[return]
None
[url]
DELETE https://<service-namespace>.servicebus.windows.net/<topic-name>/subscriptions/<subscription-name>/rules/<rule-name>
[method]
get_rule
[comment]
Retrieves the description for the specified rule.
topic_name: the name of the topic
subscription_name: the name of the subscription
rule_name: name of the rule
[return]
Feed('rule')
[url]
GET https://<service-namespace>.servicebus.windows.net/<topic-name>/subscriptions/<subscription-name>/rules/<rule-name>
[method]
list_rules
[comment]
Retrieves the rules that exist under the specified subscription.
topic_name: the name of the topic
subscription_name: the name of the subscription
[return]
(Feed('rule'),)
[url]
GET https://<service-namespace>.servicebus.windows.net/<topic-name>/subscriptions/<subscription-name>/rules/
[method]
create_subscription
[comment]
Creates a new subscription. Once created, this subscription resource manifest is
immutable.
topic_name: the name of the topic
subscription_name: the name of the subscription
fail_on_exist: specify whether throw exception when subscription exists.
[params]
fail_on_exist=False
[return]
None
[url]
PUT https://<service-namespace>.servicebus.windows.net/<topic-name>/subscriptions/<subscription-name>
[requestbody]
feed:subscription
[method]
delete_subscription
[comment]
Deletes an existing subscription.
topic_name: the name of the topic
subscription_name: the name of the subscription
fail_not_exist: specify whether to throw an exception when the subscription doesn't exist.
[params]
fail_not_exist=False
[return]
None
[url]
DELETE https://<service-namespace>.servicebus.windows.net/<topic-name>/subscriptions/<subscription-name>
[method]
get_subscription
[comment]
Gets an existing subscription.
topic_name: the name of the topic
subscription_name: the name of the subscription
[return]
Feed('subscription')
[url]
GET https://<service-namespace>.servicebus.windows.net/<topic-name>/subscriptions/<subscription-name>
[method]
list_subscriptions
[comment]
Retrieves the subscriptions in the specified topic.
topic_name: the name of the topic
[return]
(Feed('subscription'),)
[url]
GET https://<service-namespace>.servicebus.windows.net/<topic-name>/subscriptions/
[method]
send_topic_message
[comment]
Enqueues a message into the specified topic. The limit to the number of messages
which may be present in the topic is governed by the message size in MaxTopicSizeInBytes.
If this message causes the topic to exceed its quota, a quota exceeded error is
returned and the message will be rejected.
topic_name: name of the topic.
message: the Message object containing message body and properties.
[return]
[url]
POST https://<service-namespace>.servicebus.windows.net/<topic-name>/messages
[requestbody]
binary:message
[method]
peek_lock_subscription_message
[comment]
This operation is used to atomically retrieve and lock a message for processing.
The message is guaranteed not to be delivered to other receivers during the lock
duration period specified in buffer description. Once the lock expires, the
message will be available to other receivers (on the same subscription only)
during the lock duration period specified in the topic description. Once the lock
expires, the message will be available to other receivers. In order to complete
processing of the message, the receiver should issue a delete command with the
lock ID received from this operation. To abandon processing of the message and
unlock it for other receivers, an Unlock Message command should be issued, or
the lock duration period can expire.
topic_name: the name of the topic
subscription_name: the name of the subscription
[return]
Message
[url]
POST https://<service-namespace>.servicebus.windows.net/<topic-name>/subscriptions/<subscription-name>/messages/head
[query]
timeout=60
[method]
unlock_subscription_message
[comment]
Unlock a message for processing by other receivers on a given subscription.
This operation deletes the lock object, causing the message to be unlocked.
A message must have first been locked by a receiver before this operation
is called.
topic_name: the name of the topic
subscription_name: the name of the subscription
sequence_name: The sequence number of the message to be unlocked as returned
in BrokerProperties['SequenceNumber'] by the Peek Message operation.
lock_token: The ID of the lock as returned by the Peek Message operation in
BrokerProperties['LockToken']
[return]
[url]
PUT https://<service-namespace>.servicebus.windows.net/<topic-name>/subscriptions/<subscription-name>/messages/<sequence-number>/<lock-token>
[method]
read_delete_subscription_message
[comment]
Read and delete a message from a subscription as an atomic operation. This
operation should be used when a best-effort guarantee is sufficient for an
application; that is, using this operation it is possible for messages to
be lost if processing fails.
topic_name: the name of the topic
subscription_name: the name of the subscription
[return]
Message
[url]
DELETE https://<service-namespace>.servicebus.windows.net/<topic-name>/subscriptions/<subscription-name>/messages/head
[query]
timeout=60
[method]
delete_subscription_message
[comment]
Completes processing on a locked message and delete it from the subscription.
This operation should only be called after processing a previously locked
message is successful to maintain At-Least-Once delivery assurances.
topic_name: the name of the topic
subscription_name: the name of the subscription
sequence_name: The sequence number of the message to be deleted as returned
in BrokerProperties['SequenceNumber'] by the Peek Message operation.
lock_token: The ID of the lock as returned by the Peek Message operation in
BrokerProperties['LockToken']
[return]
[url]
DELETE https://<service-namespace>.servicebus.windows.net/<topic-name>/subscriptions/<subscription-name>/messages/<sequence-number>/<lock-token>
[method]
send_queue_message
[comment]
Sends a message into the specified queue. The limit to the number of messages
which may be present in the topic is governed by the message size the
MaxTopicSizeInMegaBytes. If this message will cause the queue to exceed its
quota, a quota exceeded error is returned and the message will be rejected.
queue_name: name of the queue
message: the Message object containing message body and properties.
[return]
[url]
POST https://<service-namespace>.servicebus.windows.net/<queue-name>/messages
[requestbody]
binary:message
[method]
peek_lock_queue_message
[comment]
Automically retrieves and locks a message from a queue for processing. The
message is guaranteed not to be delivered to other receivers (on the same
subscription only) during the lock duration period specified in the queue
description. Once the lock expires, the message will be available to other
receivers. In order to complete processing of the message, the receiver
should issue a delete command with the lock ID received from this operation.
To abandon processing of the message and unlock it for other receivers,
an Unlock Message command should be issued, or the lock duration period
can expire.
queue_name: name of the queue
[return]
Message
[url]
POST https://<service-namespace>.servicebus.windows.net/<queue-name>/messages/head
[query]
timeout=60
[method]
unlock_queue_message
[comment]
Unlocks a message for processing by other receivers on a given subscription.
This operation deletes the lock object, causing the message to be unlocked.
A message must have first been locked by a receiver before this operation is
called.
queue_name: name of the queue
sequence_name: The sequence number of the message to be unlocked as returned
in BrokerProperties['SequenceNumber'] by the Peek Message operation.
lock_token: The ID of the lock as returned by the Peek Message operation in
BrokerProperties['LockToken']
[return]
[url]
PUT https://<service-namespace>.servicebus.windows.net/<queue-name>/messages/<sequence-number>/<lock-token>
[method]
read_delete_queue_message
[comment]
Reads and deletes a message from a queue as an atomic operation. This operation
should be used when a best-effort guarantee is sufficient for an application;
that is, using this operation it is possible for messages to be lost if
processing fails.
queue_name: name of the queue
[return]
Message
[url]
DELETE https://<service-namespace>.servicebus.windows.net/<queue-name>/messages/head
[query]
timeout=60
[method]
delete_queue_message
[comment]
Completes processing on a locked message and delete it from the queue. This
operation should only be called after processing a previously locked message
is successful to maintain At-Least-Once delivery assurances.
queue_name: name of the queue
sequence_name: The sequence number of the message to be deleted as returned
in BrokerProperties['SequenceNumber'] by the Peek Message operation.
lock_token: The ID of the lock as returned by the Peek Message operation in
BrokerProperties['LockToken']
[return]
[url]
DELETE https://<service-namespace>.servicebus.windows.net/<queue-name>/messages/<sequence_number>/<lock-token>
[methods_code]
def receive_queue_message(self, queue_name, peek_lock=True, timeout=60):
if peek_lock:
return self.peek_lock_queue_message(queue_name, timeout)
else:
return self.read_delete_queue_message(queue_name, timeout)
def receive_subscription_message(self, topic_name, subscription_name, peek_lock=True, timeout=60):
if peek_lock:
return self.peek_lock_subscription_message(topic_name, subscription_name, timeout)
else:
return self.read_delete_subscription_message(topic_name, subscription_name, timeout)
def __init__(self, service_namespace=None, account_key=None, issuer=None, x_ms_version='2011-06-01'):
self.requestid = None
self.service_namespace = service_namespace
self.account_key = account_key
self.issuer = issuer
#get service namespace, account key and issuer. If they are set when constructing, then use them.
#else find them from environment variables.
if not service_namespace:
if os.environ.has_key(AZURE_SERVICEBUS_NAMESPACE):
self.service_namespace = os.environ[AZURE_SERVICEBUS_NAMESPACE]
if not account_key:
if os.environ.has_key(AZURE_SERVICEBUS_ACCESS_KEY):
self.account_key = os.environ[AZURE_SERVICEBUS_ACCESS_KEY]
if not issuer:
if os.environ.has_key(AZURE_SERVICEBUS_ISSUER):
self.issuer = os.environ[AZURE_SERVICEBUS_ISSUER]
if not self.service_namespace or not self.account_key or not self.issuer:
raise WindowsAzureError('You need to provide servicebus namespace, access key and Issuer')
self.x_ms_version = x_ms_version
self._httpclient = _HTTPClient(service_instance=self, service_namespace=service_namespace, account_key=account_key, issuer=issuer, x_ms_version=self.x_ms_version)
self._filter = self._httpclient.perform_request
def with_filter(self, filter):
'''Returns a new service which will process requests with the
specified filter. Filtering operations can include logging, automatic
retrying, etc... The filter is a lambda which receives the HTTPRequest
and another lambda. The filter can perform any pre-processing on the
request, pass it off to the next lambda, and then perform any post-processing
on the response.'''
res = ServiceBusService(self.service_namespace, self.account_key,
self.issuer, self.x_ms_version)
old_filter = self._filter
def new_filter(request):
return filter(request, old_filter)
res._filter = new_filter
return res
def _perform_request(self, request):
try:
resp = self._filter(request)
except HTTPError as e:
return _service_bus_error_handler(e)
if not resp:
return None
return resp
[end]

Просмотреть файл

@ -0,0 +1,50 @@
[class]
SqlAzureManager
[x-ms-version]
2011-06-01
[init]
account_name
account_key
[method]
create_server
[return]
ServerName
[url]
POST https://management.database.windows.net:8443/<subscription-id>/servers
[requestbody]
<?xml version="1.0" encoding="utf-8"?>
<Server xmlns="http://schemas.microsoft.com/sqlazure/2010/12/">
<AdministratorLogin>MyAdminAccount</AdministratorLogin>
<AdministratorLoginPassword>MyAdminPassword</AdministratorLoginPassword>
<Location>North Central US | South Central US | North Europe | West Europe | East Asia | Southeast Asia</Location>
</Server>
[method]
enumerate_servers
[return]
Servers
[url]
GET https://management.database.windows.net:8443/<subscription-id>/servers
[requestbody]
<?xml version="1.0" encoding="utf-8"?>
<Server xmlns="http://schemas.microsoft.com/sqlazure/2010/12/">
<AdministratorLogin>MyAdminAccount</AdministratorLogin>
<AdministratorLoginPassword>MyAdminPassword</AdministratorLoginPassword>
<Location>North Central US | South Central US | North Europe | West Europe | East Asia | Southeast Asia</Location>
</Server>
[method]
drop_server
[url]
DELETE https://management.database.windows.net:8443/<subscription-id>/servers/<servername>
[method]
set_admin_password
[url]
POST https://management.database.windows.net:8443/<subscription-id>/servers/<servername>?op=ResetPassword
[requestbody]
<?xml version="1.0" encoding="utf-8"?>
<AdministratorLoginPassword xmlns="http://schemas.microsoft.com/sqlazure/2010/12/">TheNewPassword</AdministratorLoginPassword>
[end]

Просмотреть файл

@ -0,0 +1,221 @@
[class]
TableService
[x-ms-version]
2011-08-18
[class-comment]
This is the main class managing Table resources.
account_name: your storage account name, required for all operations.
account_key: your storage account key, required for all operations.
[init]
account_name
account_key
[method]
get_table_service_properties
[comment]
Gets the properties of a storage account's Table service, including Windows Azure
Storage Analytics.
[return]
StorageServiceProperties
[url]
GET http://<account-name>.table.core.windows.net/?restype=service&comp=properties
[method]
set_table_service_properties
[comment]
Sets the properties of a storage account's Table Service, including Windows Azure Storage Analytics.
storage_service_properties: a StorageServiceProperties object.
[return]
dict
[url]
PUT http://<account-name>.table.core.windows.net/?restype=service&comp=properties
[requestbody]
class:storage_service_properties;required
[method]
query_tables
[comment]
Returns a list of tables under the specified account.
table_name: optional, the specific table to query
top: the maximum number of tables to return
[return]
(Feed('table'), )
[url]
GET http://<account-name>.table.core.windows.net/Tables<?table_name:('[table_name]')>
[query]
$top=
[method]
create_table
[comment]
Creates a new table in the storage account.
table: name of the table to create.
fail_on_exist: specify whether throw exception when table exists.
[params]
fail_on_exist=False
[return]
None
[url]
POST http://<account-name>.table.core.windows.net/Tables
[requestbody]
feed:table;required:feed
[method]
delete_table
[comment]
table_name: name of the table to delete.
fail_not_exist: specify whether throw exception when table doesn't exist.
[params]
fail_not_exist=False
[return]
[url]
DELETE http://<account-name>.table.core.windows.net/Tables(\'<table-name>\')
[method]
get_entity
[comment]
Get an entity in a table; includes the $select options.
partition_key: PartitionKey of the entity.
row_key: RowKey of the entity.
select: the property names to select.
[return]
Feed('entity')
[url]
GET http://<account-name>.table.core.windows.net/<table-name>(PartitionKey=\'<partition-key>\',RowKey=\'<row-key>\')?$select=<select=''>
[method]
query_entities
[comment]
Get entities in a table; includes the $filter and $select options.
table_name: the table to query
filter: a filter as described at http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx
select: the property names to select from the entities
top: the maximum number of entities to return
[return]
(Feed('entity'), )
[url]
GET http://<account-name>.table.core.windows.net/<table-name>()
[query]
$filter=
$select=
$top=
[method]
insert_entity
[comment]
Inserts a new entity into a table.
entity: Required. The entity object to insert. Could be a dict format or entity object.
[return]
[url]
POST http://<account-name>.table.core.windows.net/<table-name>
[requestheader]
Content-Type=application/atom+xml;required:application/atom+xml|#this is required and has to be set to application/atom+xml
[requestbody]
feed:entity;required:feed
[method]
update_entity
[comment]
Updates an existing entity in a table. The Update Entity operation replaces the entire
entity and can be used to remove properties.
entity: Required. The entity object to insert. Could be a dict format or entity object.
partition_key: PartitionKey of the entity.
row_key: RowKey of the entity.
[return]
[url]
PUT http://<account-name>.table.core.windows.net/<table-name>(PartitionKey=\'<partition-key>\',RowKey=\'<row-key>\')
[requestheader]
Content-Type=application/atom+xml;required:application/atom+xml|#this is required and has to be set to application/atom+xml
If-Match=*
[requestbody]
feed:entity;required:feed
[method]
merge_entity
[comment]
Updates an existing entity by updating the entity's properties. This operation does
not replace the existing entity as the Update Entity operation does.
entity: Required. The entity object to insert. Can be a dict format or entity object.
partition_key: PartitionKey of the entity.
row_key: RowKey of the entity.
[return]
[url]
MERGE http://<account-name>.table.core.windows.net/<table-name>(PartitionKey=\'<partition-key>\',RowKey=\'<row-key>\')
[requestheader]
Content-Type=application/atom+xml;required:application/atom+xml|#this is required and has to be set to application/atom+xml
If-Match=*
[requestbody]
feed:entity;required:feed
[method]
delete_entity
[comment]
Deletes an existing entity in a table.
partition_key: PartitionKey of the entity.
row_key: RowKey of the entity.
if_match: Required. Specifies the condition for which the delete should be performed.
To force an unconditional delete, set If-Match to the wildcard character (*).
[return]
[url]
DELETE http://<account-name>.table.core.windows.net/<table-name>(PartitionKey=\'<partition-key>\',RowKey=\'<row-key>\')
[requestheader]
Content-Type=application/atom+xml;required:application/atom+xml|#this is required and has to be set to application/atom+xml
If-Match=*;required
[method]
insert_or_replace_entity
[comment]
Replaces an existing entity or inserts a new entity if it does not exist in the table.
Because this operation can insert or update an entity, it is also known as an "upsert"
operation.
entity: Required. The entity object to insert. Could be a dict format or entity object.
partition_key: PartitionKey of the entity.
row_key: RowKey of the entity.
[return]
[url]
PUT http://<account-name>.table.core.windows.net/<table-name>(PartitionKey=\'<partition-key>\',RowKey=\'<row-key>\')
[requestheader]
Content-Type=application/atom+xml;required:application/atom+xml|#this is required and has to be set to application/atom+xml
[requestbody]
feed:entity;required:feed
[method]
insert_or_merge_entity
[comment]
Merges an existing entity or inserts a new entity if it does not exist in the table.
Because this operation can insert or update an entity, it is also known as an "upsert"
operation.
entity: Required. The entity object to insert. Could be a dict format or entity object.
partition_key: PartitionKey of the entity.
row_key: RowKey of the entity.
[return]
[url]
MERGE http://<account-name>.table.core.windows.net/<table-name>(PartitionKey=\'<partition-key>\',RowKey=\'<row-key>\')
[requestheader]
Content-Type=application/atom+xml;required:application/atom+xml|#this is required and has to be set to application/atom+xml
If-Match=*
[requestbody]
feed:entity;required:feed
[methods_code]
def _perform_request_worker(self, request):
auth = _sign_storage_table_request(request,
self.account_name,
self.account_key)
request.headers.append(('Authorization', auth))
return self._httpclient.perform_request(request)
[end]

16
src/install.bat Normal file
Просмотреть файл

@ -0,0 +1,16 @@
@echo OFF
REM----------------------------------------------------------------------------
REM Copyright (c) Microsoft Corporation.
REM
REM This source code is subject to terms and conditions of the Apache License,
REM Version 2.0. A copy of the license can be found in the License.html file at
REM the root of this distribution. If you cannot locate the Apache License,
REM Version 2.0, please send an email to vspython@microsoft.com. By using this
REM source code in any fashion, you are agreeing to be bound by the terms of the
REM Apache License, Version 2.0.
REM
REM You must not remove this notice, or any other, from this software.
REM----------------------------------------------------------------------------
cls
%SystemDrive%\Python27\python.exe setup.py install

16
src/installfrompip.bat Normal file
Просмотреть файл

@ -0,0 +1,16 @@
@echo OFF
REM----------------------------------------------------------------------------
REM Copyright (c) Microsoft Corporation.
REM
REM This source code is subject to terms and conditions of the Apache License,
REM Version 2.0. A copy of the license can be found in the License.html file at
REM the root of this distribution. If you cannot locate the Apache License,
REM Version 2.0, please send an email to vspython@microsoft.com. By using this
REM source code in any fashion, you are agreeing to be bound by the terms of the
REM Apache License, Version 2.0.
REM
REM You must not remove this notice, or any other, from this software.
REM----------------------------------------------------------------------------
cls
%SystemDrive%\Python27\Scripts\pip.exe install azure --upgrade

26
src/setup.py Normal file
Просмотреть файл

@ -0,0 +1,26 @@
#!/usr/bin/env python
#------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation.
#
# This source code is subject to terms and conditions of the Apache License,
# Version 2.0. A copy of the license can be found in the License.html file at
# the root of this distribution. If you cannot locate the Apache License,
# Version 2.0, please send an email to vspython@microsoft.com. By using this
# source code in any fashion, you are agreeing to be bound by the terms of the
# Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#------------------------------------------------------------------------------
from distutils.core import setup
setup(name='azure',
version='0.2.3',
description='Windows Azure client APIs',
url='https://github.com/WindowsAzure/azure-sdk-for-python',
packages=['azure',
'azure.http',
'azure.servicebus',
'azure.storage']
)

18
src/upload.bat Normal file
Просмотреть файл

@ -0,0 +1,18 @@
@echo OFF
REM----------------------------------------------------------------------------
REM Copyright (c) Microsoft Corporation.
REM
REM This source code is subject to terms and conditions of the Apache License,
REM Version 2.0. A copy of the license can be found in the License.html file at
REM the root of this distribution. If you cannot locate the Apache License,
REM Version 2.0, please send an email to vspython@microsoft.com. By using this
REM source code in any fashion, you are agreeing to be bound by the terms of the
REM Apache License, Version 2.0.
REM
REM You must not remove this notice, or any other, from this software.
REM----------------------------------------------------------------------------
cls
REM %SystemDrive%\Python27\python.exe setup.py register
%SystemDrive%\Python27\python.exe setup.py sdist upload

48
test/azuretest.pyproj Normal file
Просмотреть файл

@ -0,0 +1,48 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup>
<Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
<SchemaVersion>2.0</SchemaVersion>
<ProjectGuid>{c0742a2d-4862-40e4-8a28-036eecdbc614}</ProjectGuid>
<ProjectHome>
</ProjectHome>
<StartupFile>azuretest\test_tableservice.py</StartupFile>
<WorkingDirectory>.</WorkingDirectory>
<OutputPath>.</OutputPath>
<Name>azuretest</Name>
<RootNamespace>windowsazuretest</RootNamespace>
<LaunchProvider>Standard Python launcher</LaunchProvider>
<CommandLineArguments>-v </CommandLineArguments>
<InterpreterPath />
<InterpreterArguments />
<ClusterPublishBeforeRun>True</ClusterPublishBeforeRun>
<ClusterRunEnvironment>localhost/1/Core/</ClusterRunEnvironment>
<ClusterTargetPlatform>X86</ClusterTargetPlatform>
<IsWindowsApplication>False</IsWindowsApplication>
<InterpreterId>2af0f10d-7135-4994-9156-5d01c9c11b7e</InterpreterId>
<InterpreterVersion>2.7</InterpreterVersion>
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)' == 'Debug' ">
<DebugSymbols>true</DebugSymbols>
<EnableUnmanagedDebugging>false</EnableUnmanagedDebugging>
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)' == 'Release' ">
<DebugSymbols>true</DebugSymbols>
<EnableUnmanagedDebugging>false</EnableUnmanagedDebugging>
</PropertyGroup>
<ItemGroup>
<Folder Include="azuretest" />
</ItemGroup>
<ItemGroup>
<Compile Include="azuretest\test_blobservice.py" />
<Compile Include="azuretest\test_queueservice.py" />
<Compile Include="azuretest\test_tableservice.py" />
<Compile Include="azuretest\test_servicebusservice.py" />
<Compile Include="azuretest\util.py" />
<Compile Include="azuretest\__init__.py" />
</ItemGroup>
<ItemGroup>
<Content Include="run.bat" />
</ItemGroup>
<Import Project="$(MSBuildToolsPath)\Microsoft.Common.targets" />
</Project>

Просмотреть файл

@ -0,0 +1,14 @@
#-------------------------------------------------------------------------
# Copyright 2011 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------

Просмотреть файл

@ -0,0 +1,786 @@
#-------------------------------------------------------------------------
# Copyright 2011 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from azure.storage.blobservice import *
from azure.storage import Metrics, BlockList
from azure import WindowsAzureError
from azuretest.util import *
from azure.http import HTTPRequest, HTTPResponse
import unittest
import time
#------------------------------------------------------------------------------
class BlobServiceTest(unittest.TestCase):
def setUp(self):
self.bc = BlobService(account_name=credentials.getStorageServicesName(),
account_key=credentials.getStorageServicesKey())
# TODO: it may be overkill to use the machine name from
# getUniqueTestRunID, current time may be unique enough
__uid = getUniqueTestRunID()
container_base_name = u'mytestcontainer%s' % (__uid)
self.container_name = getUniqueNameBasedOnCurrentTime(container_base_name)
def tearDown(self):
self.cleanup()
return super(BlobServiceTest, self).tearDown()
def cleanup(self):
try:
self.bc.delete_container(self.container_name)
except: pass
#--Helpers-----------------------------------------------------------------
# TODO: move this function out of here so other tests can use them
# TODO: find out how to import/use safe_repr instead repr
def assertNamedItemInContainer(self, container, item_name, msg=None):
for item in container:
if item.name == item_name:
return
standardMsg = '%s not found in %s' % (repr(item_name), repr(container))
self.fail(self._formatMessage(msg, standardMsg))
# TODO: move this function out of here so other tests can use them
# TODO: find out how to import/use safe_repr instead repr
def assertNamedItemNotInContainer(self, container, item_name, msg=None):
for item in container:
if item.name == item_name:
standardMsg = '%s unexpectedly found in %s' % (repr(item_name), repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def _create_container(self, container_name):
self.bc.create_container(container_name, None, None, True)
def _create_container_and_block_blob(self, container_name, blob_name, blob_data):
self._create_container(container_name)
resp = self.bc.put_blob(container_name, blob_name, blob_data, 'BlockBlob')
self.assertIsNone(resp)
def _create_container_and_page_blob(self, container_name, blob_name, content_length):
self._create_container(container_name)
resp = self.bc.put_blob(self.container_name, blob_name, '', 'PageBlob', x_ms_blob_content_length=str(content_length))
self.assertIsNone(resp)
#--Test cases for containers -----------------------------------------
def test_create_container_no_options(self):
# Arrange
# Act
created = self.bc.create_container(self.container_name)
# Assert
self.assertTrue(created)
def test_create_container_no_options_fail_on_exist(self):
# Arrange
# Act
created = self.bc.create_container(self.container_name, None, None, True)
# Assert
self.assertTrue(created)
def test_create_container_with_already_existing_container_fail_on_exist(self):
# Arrange
# Act
created = self.bc.create_container(self.container_name)
with self.assertRaises(WindowsAzureError):
self.bc.create_container(self.container_name, None, None, True)
# Assert
self.assertTrue(created)
def test_create_container_with_public_access_container(self):
# Arrange
# Act
created = self.bc.create_container(self.container_name, None, 'container')
# Assert
self.assertTrue(created)
acl = self.bc.get_container_acl(self.container_name)
self.assertIsNotNone(acl)
def test_create_container_with_public_access_blob(self):
# Arrange
# Act
created = self.bc.create_container(self.container_name, None, 'blob')
# Assert
self.assertTrue(created)
acl = self.bc.get_container_acl(self.container_name)
self.assertIsNotNone(acl)
def test_create_container_with_metadata(self):
# Arrange
# Act
created = self.bc.create_container(self.container_name, {'hello':'world', 'foo':'42'})
# Assert
self.assertTrue(created)
md = self.bc.get_container_metadata(self.container_name)
self.assertIsNotNone(md)
self.assertEquals(md['x-ms-meta-hello'], 'world')
self.assertEquals(md['x-ms-meta-foo'], '42')
def test_list_containers_no_options(self):
# Arrange
self.bc.create_container(self.container_name)
# Act
containers = self.bc.list_containers()
for container in containers:
name = container.name
# Assert
self.assertIsNotNone(containers)
self.assertNamedItemInContainer(containers, self.container_name)
def test_set_container_metadata(self):
# Arrange
self.bc.create_container(self.container_name)
# Act
resp = self.bc.set_container_metadata(self.container_name, {'hello':'world', 'bar':'43'})
# Assert
self.assertIsNone(resp)
md = self.bc.get_container_metadata(self.container_name)
self.assertIsNotNone(md)
self.assertEquals(md['x-ms-meta-hello'], 'world')
self.assertEquals(md['x-ms-meta-bar'], '43')
def test_set_container_metadata_with_non_existing_container(self):
# Arrange
# Act
with self.assertRaises(WindowsAzureError):
self.bc.set_container_metadata(self.container_name, {'hello':'world', 'bar':'43'})
# Assert
def test_get_container_metadata(self):
# Arrange
self.bc.create_container(self.container_name)
# Act
md = self.bc.get_container_metadata(self.container_name)
# Assert
self.assertIsNotNone(md)
def test_get_container_metadata_with_non_existing_container(self):
# Arrange
# Act
with self.assertRaises(WindowsAzureError):
self.bc.get_container_metadata(self.container_name)
# Assert
def test_get_container_properties(self):
# Arrange
self.bc.create_container(self.container_name)
# Act
props = self.bc.get_container_properties(self.container_name)
# Assert
self.assertIsNotNone(props)
def test_get_container_properties_with_non_existing_container(self):
# Arrange
# Act
with self.assertRaises(WindowsAzureError):
self.bc.get_container_properties(self.container_name)
# Assert
def test_get_container_acl(self):
# Arrange
self.bc.create_container(self.container_name)
# Act
acl = self.bc.get_container_acl(self.container_name)
# Assert
self.assertIsNotNone(acl)
self.assertEqual(len(acl.signed_identifiers), 0)
def test_get_container_acl_with_non_existing_container(self):
# Arrange
# Act
with self.assertRaises(WindowsAzureError):
self.bc.get_container_acl(self.container_name)
# Assert
def test_set_container_acl(self):
# Arrange
self.bc.create_container(self.container_name)
# Act
resp = self.bc.set_container_acl(self.container_name)
# Assert
self.assertIsNone(resp)
acl = self.bc.get_container_acl(self.container_name)
self.assertIsNotNone(acl)
def test_set_container_acl_with_public_access_container(self):
# Arrange
self.bc.create_container(self.container_name)
# Act
resp = self.bc.set_container_acl(self.container_name, None, 'container')
# Assert
self.assertIsNone(resp)
acl = self.bc.get_container_acl(self.container_name)
self.assertIsNotNone(acl)
def test_set_container_acl_with_public_access_blob(self):
# Arrange
self.bc.create_container(self.container_name)
# Act
resp = self.bc.set_container_acl(self.container_name, None, 'blob')
# Assert
self.assertIsNone(resp)
acl = self.bc.get_container_acl(self.container_name)
self.assertIsNotNone(acl)
def test_set_container_acl_with_non_existing_container(self):
# Arrange
# Act
with self.assertRaises(WindowsAzureError):
self.bc.set_container_acl(self.container_name, None, 'container')
# Assert
def test_delete_container_with_existing_container(self):
# Arrange
self.bc.create_container(self.container_name)
# Act
deleted = self.bc.delete_container(self.container_name)
# Assert
self.assertTrue(deleted)
containers = self.bc.list_containers()
self.assertNamedItemNotInContainer(containers, self.container_name)
def test_delete_container_with_existing_container_fail_not_exist(self):
# Arrange
self.bc.create_container(self.container_name)
# Act
deleted = self.bc.delete_container(self.container_name, True)
# Assert
self.assertTrue(deleted)
containers = self.bc.list_containers()
self.assertNamedItemNotInContainer(containers, self.container_name)
def test_delete_container_with_non_existing_container(self):
# Arrange
# Act
deleted = self.bc.delete_container(self.container_name)
# Assert
self.assertFalse(deleted)
def test_delete_container_with_non_existing_container_fail_not_exist(self):
# Arrange
# Act
with self.assertRaises(WindowsAzureError):
self.bc.delete_container(self.container_name, True)
# Assert
#--Test cases for blob service ---------------------------------------
def test_set_blob_service_properties(self):
# Arrange
# Act
props = StorageServiceProperties()
props.metrics.enabled = False
resp = self.bc.set_blob_service_properties(props)
# Assert
self.assertIsNone(resp)
received_props = self.bc.get_blob_service_properties()
self.assertFalse(received_props.metrics.enabled)
def test_set_blob_service_properties_with_timeout(self):
# Arrange
# Act
props = StorageServiceProperties()
props.logging.write = True
resp = self.bc.set_blob_service_properties(props, 5)
# Assert
self.assertIsNone(resp)
received_props = self.bc.get_blob_service_properties()
self.assertTrue(received_props.logging.write)
def test_get_blob_service_properties(self):
# Arrange
# Act
props = self.bc.get_blob_service_properties()
# Assert
self.assertIsNotNone(props)
self.assertIsInstance(props.logging, Logging)
self.assertIsInstance(props.metrics, Metrics)
def test_get_blob_service_properties_with_timeout(self):
# Arrange
# Act
props = self.bc.get_blob_service_properties(5)
# Assert
self.assertIsNotNone(props)
self.assertIsInstance(props.logging, Logging)
self.assertIsInstance(props.metrics, Metrics)
#--Test cases for blobs ----------------------------------------------
def test_list_blobs(self):
# Arrange
self._create_container(self.container_name)
data = 'hello world'
resp = self.bc.put_blob(self.container_name, 'blob1', data, 'BlockBlob')
resp = self.bc.put_blob(self.container_name, 'blob2', data, 'BlockBlob')
# Act
blobs = self.bc.list_blobs(self.container_name)
for blob in blobs:
name = blob.name
# Assert
self.assertIsNotNone(blobs)
self.assertNamedItemInContainer(blobs, 'blob1')
self.assertNamedItemInContainer(blobs, 'blob2')
def test_put_blob_block_blob(self):
# Arrange
self._create_container(self.container_name)
# Act
data = 'hello world'
resp = self.bc.put_blob(self.container_name, 'blob1', data, 'BlockBlob')
# Assert
self.assertIsNone(resp)
def test_put_blob_page_blob(self):
# Arrange
self._create_container(self.container_name)
# Act
resp = self.bc.put_blob(self.container_name, 'blob1', '', 'PageBlob', x_ms_blob_content_length='1024')
# Assert
self.assertIsNone(resp)
def test_get_blob_with_existing_blob(self):
# Arrange
self._create_container_and_block_blob(self.container_name, 'blob1', 'hello world')
# Act
blob = self.bc.get_blob(self.container_name, 'blob1')
# Assert
self.assertEqual(type(blob), str)
self.assertEquals(blob, 'hello world')
def test_get_blob_with_non_existing_container(self):
# Arrange
# Act
with self.assertRaises(WindowsAzureError):
self.bc.get_blob(self.container_name, 'blob1')
# Assert
def test_get_blob_with_non_existing_blob(self):
# Arrange
self._create_container(self.container_name)
# Act
with self.assertRaises(WindowsAzureError):
self.bc.get_blob(self.container_name, 'blob1')
# Assert
def test_set_blob_properties_with_existing_blob(self):
# Arrange
self._create_container_and_block_blob(self.container_name, 'blob1', 'hello world')
# Act
resp = self.bc.set_blob_properties(self.container_name, 'blob1', x_ms_blob_content_language='spanish')
# Assert
self.assertIsNone(resp)
props = self.bc.get_blob_properties(self.container_name, 'blob1')
self.assertEquals(props['Content-Language'], 'spanish')
def test_set_blob_properties_with_non_existing_container(self):
# Arrange
# Act
with self.assertRaises(WindowsAzureError):
self.bc.set_blob_properties(self.container_name, 'blob1', x_ms_blob_content_language='spanish')
# Assert
def test_set_blob_properties_with_non_existing_blob(self):
# Arrange
self._create_container(self.container_name)
# Act
with self.assertRaises(WindowsAzureError):
self.bc.set_blob_properties(self.container_name, 'blob1', x_ms_blob_content_language='spanish')
# Assert
def test_get_blob_properties_with_existing_blob(self):
# Arrange
self._create_container_and_block_blob(self.container_name, 'blob1', 'hello world')
# Act
props = self.bc.get_blob_properties(self.container_name, 'blob1')
# Assert
self.assertIsNotNone(props)
self.assertEquals(props['x-ms-blob-type'], 'BlockBlob')
self.assertEquals(props['x-ms-lease-status'], 'unlocked')
def test_get_blob_properties_with_non_existing_container(self):
# Arrange
# Act
with self.assertRaises(WindowsAzureError):
self.bc.get_blob_properties(self.container_name, 'blob1')
# Assert
def test_get_blob_properties_with_non_existing_blob(self):
# Arrange
self._create_container(self.container_name)
# Act
with self.assertRaises(WindowsAzureError):
self.bc.get_blob_properties(self.container_name, 'blob1')
# Assert
def test_get_blob_metadata_with_existing_blob(self):
# Arrange
self._create_container_and_block_blob(self.container_name, 'blob1', 'hello world')
# Act
md = self.bc.get_blob_metadata(self.container_name, 'blob1')
# Assert
self.assertIsNotNone(md)
def test_set_blob_metadata_with_existing_blob(self):
# Arrange
self._create_container_and_block_blob(self.container_name, 'blob1', 'hello world')
# Act
resp = self.bc.set_blob_metadata(self.container_name, 'blob1', {'hello':'world', 'foo':'42'})
# Assert
self.assertIsNone(resp)
md = self.bc.get_blob_metadata(self.container_name, 'blob1')
self.assertEquals(md['x-ms-meta-hello'], 'world')
self.assertEquals(md['x-ms-meta-foo'], '42')
def test_delete_blob_with_existing_blob(self):
# Arrange
self._create_container_and_block_blob(self.container_name, 'blob1', 'hello world')
# Act
resp = self.bc.delete_blob(self.container_name, 'blob1')
# Assert
self.assertIsNone(resp)
def test_delete_blob_with_non_existing_blob(self):
# Arrange
self._create_container(self.container_name)
# Act
with self.assertRaises(WindowsAzureError):
self.bc.delete_blob(self.container_name, 'blob1')
# Assert
def test_copy_blob_with_existing_blob(self):
# Arrange
self._create_container_and_block_blob(self.container_name, 'blob1', 'hello world')
# Act
sourceblob = '/%s/%s/%s' % (credentials.getStorageServicesName(),
self.container_name,
'blob1')
resp = self.bc.copy_blob(self.container_name, 'blob1copy', sourceblob)
# Assert
self.assertIsNone(resp)
copy = self.bc.get_blob(self.container_name, 'blob1copy')
self.assertEquals(copy, 'hello world')
def test_snapshot_blob(self):
# Arrange
self._create_container_and_block_blob(self.container_name, 'blob1', 'hello world')
# Act
resp = self.bc.snapshot_blob(self.container_name, 'blob1')
# Assert
self.assertIsNone(resp)
def test_lease_blob_acquire_and_release(self):
# Arrange
self._create_container_and_block_blob(self.container_name, 'blob1', 'hello world')
# Act
resp1 = self.bc.lease_blob(self.container_name, 'blob1', 'acquire')
resp2 = self.bc.lease_blob(self.container_name, 'blob1', 'release', resp1['x-ms-lease-id'])
# Assert
self.assertIsNotNone(resp1)
self.assertIsNotNone(resp2)
def test_lease_blob_acquire_twice_fails(self):
# Arrange
self._create_container_and_block_blob(self.container_name, 'blob1', 'hello world')
resp1 = self.bc.lease_blob(self.container_name, 'blob1', 'acquire')
# Act
with self.assertRaises(WindowsAzureError):
self.bc.lease_blob(self.container_name, 'blob1', 'acquire')
resp2 = self.bc.lease_blob(self.container_name, 'blob1', 'release', resp1['x-ms-lease-id'])
# Assert
self.assertIsNotNone(resp1)
self.assertIsNotNone(resp2)
def test_put_block(self):
# Arrange
self._create_container_and_block_blob(self.container_name, 'blob1', '')
# Act
for i in xrange(5):
resp = self.bc.put_block(self.container_name,
'blob1',
'block %d' % (i),
str(i))
self.assertIsNone(resp)
# Assert
def test_put_block_list(self):
# Arrange
self._create_container_and_block_blob(self.container_name, 'blob1', '')
self.bc.put_block(self.container_name, 'blob1', 'AAA', '1')
self.bc.put_block(self.container_name, 'blob1', 'BBB', '2')
self.bc.put_block(self.container_name, 'blob1', 'CCC', '3')
# Act
resp = self.bc.put_block_list(self.container_name, 'blob1', ['1', '2', '3'])
# Assert
self.assertIsNone(resp)
def test_get_block_list_no_blocks(self):
# Arrange
self._create_container_and_block_blob(self.container_name, 'blob1', '')
# Act
block_list = self.bc.get_block_list(self.container_name, 'blob1', None, 'all')
# Assert
self.assertIsNotNone(block_list)
self.assertIsInstance(block_list, BlobBlockList)
self.assertEquals(len(block_list.uncommitted_blocks), 0)
self.assertEquals(len(block_list.committed_blocks), 0)
def test_get_block_list_uncommitted_blocks(self):
# Arrange
self._create_container_and_block_blob(self.container_name, 'blob1', '')
self.bc.put_block(self.container_name, 'blob1', 'AAA', '1')
self.bc.put_block(self.container_name, 'blob1', 'BBB', '2')
self.bc.put_block(self.container_name, 'blob1', 'CCC', '3')
# Act
block_list = self.bc.get_block_list(self.container_name, 'blob1', None, 'all')
# Assert
self.assertIsNotNone(block_list)
self.assertIsInstance(block_list, BlobBlockList)
self.assertEquals(len(block_list.uncommitted_blocks), 3)
self.assertEquals(len(block_list.committed_blocks), 0)
def test_get_block_list_committed_blocks(self):
# Arrange
self._create_container_and_block_blob(self.container_name, 'blob1', '')
self.bc.put_block(self.container_name, 'blob1', 'AAA', '1')
self.bc.put_block(self.container_name, 'blob1', 'BBB', '2')
self.bc.put_block(self.container_name, 'blob1', 'CCC', '3')
self.bc.put_block_list(self.container_name, 'blob1', ['1', '2', '3'])
# Act
block_list = self.bc.get_block_list(self.container_name, 'blob1', None, 'all')
# Assert
self.assertIsNotNone(block_list)
self.assertIsInstance(block_list, BlobBlockList)
self.assertEquals(len(block_list.uncommitted_blocks), 0)
self.assertEquals(len(block_list.committed_blocks), 3)
def test_put_page_update(self):
# Arrange
self._create_container_and_page_blob(self.container_name, 'blob1', 1024)
# Act
data = 'abcdefghijklmnop' * 32
resp = self.bc.put_page(self.container_name, 'blob1', data, 'bytes=0-511', 'update')
# Assert
self.assertIsNone(resp)
def test_put_page_clear(self):
# Arrange
self._create_container_and_page_blob(self.container_name, 'blob1', 1024)
# Act
resp = self.bc.put_page(self.container_name, 'blob1', '', 'bytes=0-511', 'clear')
# Assert
self.assertIsNone(resp)
def test_get_page_ranges_no_pages(self):
# Arrange
self._create_container_and_page_blob(self.container_name, 'blob1', 1024)
# Act
ranges = self.bc.get_page_ranges(self.container_name, 'blob1')
# Assert
self.assertIsNotNone(ranges)
self.assertIsInstance(ranges, PageList)
self.assertEquals(len(ranges.page_ranges), 0)
def test_get_page_ranges_2_pages(self):
# Arrange
self._create_container_and_page_blob(self.container_name, 'blob1', 2048)
data = 'abcdefghijklmnop' * 32
resp1 = self.bc.put_page(self.container_name, 'blob1', data, 'bytes=0-511', 'update')
resp2 = self.bc.put_page(self.container_name, 'blob1', data, 'bytes=1024-1535', 'update')
# Act
ranges = self.bc.get_page_ranges(self.container_name, 'blob1')
# Assert
self.assertIsNotNone(ranges)
self.assertIsInstance(ranges, PageList)
self.assertEquals(len(ranges.page_ranges), 2)
self.assertEquals(ranges.page_ranges[0].start, 0)
self.assertEquals(ranges.page_ranges[0].end, 511)
self.assertEquals(ranges.page_ranges[1].start, 1024)
self.assertEquals(ranges.page_ranges[1].end, 1535)
def test_with_filter(self):
# Single filter
called = []
def my_filter(request, next):
called.append(True)
self.assertIsInstance(request, HTTPRequest)
for header in request.headers:
self.assertIsInstance(header, tuple)
for item in header:
self.assertIsInstance(item, (str, unicode, type(None)))
self.assertIsInstance(request.host, (str, unicode))
self.assertIsInstance(request.method, (str, unicode))
self.assertIsInstance(request.path, (str, unicode))
self.assertIsInstance(request.query, list)
self.assertIsInstance(request.body, (str, unicode))
response = next(request)
self.assertIsInstance(response, HTTPResponse)
self.assertIsInstance(response.body, (str, type(None)))
self.assertIsInstance(response.headers, list)
for header in response.headers:
self.assertIsInstance(header, tuple)
for item in header:
self.assertIsInstance(item, (str, unicode))
self.assertIsInstance(response.status, int)
return response
bc = self.bc.with_filter(my_filter)
bc.create_container(self.container_name + '0', None, None, False)
self.assertTrue(called)
del called[:]
bc.delete_container(self.container_name + '0')
self.assertTrue(called)
del called[:]
# Chained filters
def filter_a(request, next):
called.append('a')
return next(request)
def filter_b(request, next):
called.append('b')
return next(request)
bc = self.bc.with_filter(filter_a).with_filter(filter_b)
bc.create_container(self.container_name + '1', None, None, False)
self.assertEqual(called, ['b', 'a'])
bc.delete_container(self.container_name + '1')
self.assertEqual(called, ['b', 'a', 'b', 'a'])
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()

Просмотреть файл

@ -0,0 +1,336 @@
#-------------------------------------------------------------------------
# Copyright 2011 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from azure.storage.queueservice import *
from azuretest.util import *
import unittest
import time
#------------------------------------------------------------------------------
TEST_QUEUE_PREFIX = 'mytestqueue'
#------------------------------------------------------------------------------
class QueueServiceTest(unittest.TestCase):
def setUp(self):
self.queue_client = QueueService(account_name=credentials.getStorageServicesName(),
account_key=credentials.getStorageServicesKey())
# TODO: it may be overkill to use the machine name from
# getUniqueTestRunID, current time may be unique enough
__uid = getUniqueTestRunID()
queue_base_name = u'%s' % (__uid)
self.test_queues = []
self.creatable_queues = []
for i in range(10):
self.test_queues.append(TEST_QUEUE_PREFIX + getUniqueNameBasedOnCurrentTime(queue_base_name))
for i in range(4):
self.creatable_queues.append('mycreatablequeue' + getUniqueNameBasedOnCurrentTime(queue_base_name))
for queue_name in self.test_queues:
self.queue_client.create_queue(queue_name)
def tearDown(self):
self.cleanup()
return super(QueueServiceTest, self).tearDown()
def cleanup(self):
for queue_name in self.test_queues:
try:
self.queue_client.delete_queue(queue_name)
except:
pass
for queue_name in self.creatable_queues:
try:
self.queue_client.delete_queue(queue_name)
except:
pass
def test_get_service_properties(self):
#This api doesn't apply to local storage
if self.queue_client.use_local_storage:
return
#Action
properties = self.queue_client.get_queue_service_properties()
#Asserts
self.assertIsNotNone(properties)
self.assertIsNotNone(properties.logging)
self.assertIsNotNone(properties.logging.retention_policy)
self.assertIsNotNone(properties.logging.version)
self.assertIsNotNone(properties.metrics)
self.assertIsNotNone(properties.metrics.retention_policy)
self.assertIsNotNone(properties.metrics.version)
def test_set_service_properties(self):
#This api doesn't apply to local storage
if self.queue_client.use_local_storage:
return
#Action
queue_properties = self.queue_client.get_queue_service_properties()
queue_properties.logging.read=True
self.queue_client.set_queue_service_properties(queue_properties)
properties = self.queue_client.get_queue_service_properties()
#Asserts
self.assertIsNotNone(properties)
self.assertIsNotNone(properties.logging)
self.assertIsNotNone(properties.logging.retention_policy)
self.assertIsNotNone(properties.logging.version)
self.assertIsNotNone(properties.metrics)
self.assertIsNotNone(properties.metrics.retention_policy)
self.assertIsNotNone(properties.metrics.version)
self.assertTrue(properties.logging.read)
def test_create_queue(self):
#Action
self.queue_client.create_queue(self.creatable_queues[0])
result = self.queue_client.get_queue_metadata(self.creatable_queues[0])
self.queue_client.delete_queue(self.creatable_queues[0])
#Asserts
self.assertIsNotNone(result)
self.assertEqual(result['x-ms-approximate-messages-count'], '0')
def test_create_queue_with_options(self):
#Action
self.queue_client.create_queue(self.creatable_queues[1], x_ms_meta_name_values = {'foo':'test', 'bar':'blah'})
result = self.queue_client.get_queue_metadata(self.creatable_queues[1])
#Asserts
self.assertIsNotNone(result)
self.assertEqual(result['x-ms-approximate-messages-count'], '0')
self.assertEqual('test', result['x-ms-meta-foo'])
self.assertEqual('blah', result['x-ms-meta-bar'])
def test_list_queues(self):
#Action
queues = self.queue_client.list_queues()
#Asserts
self.assertIsNotNone(queues)
self.assertEqual('', queues.marker)
self.assertEqual(0, queues.max_results)
self.assertTrue(len(self.test_queues) <= len(queues))
def test_list_queues_with_options(self):
#Action
queues_1 = self.queue_client.list_queues(prefix=TEST_QUEUE_PREFIX, maxresults=3)
queues_2 = self.queue_client.list_queues(prefix=TEST_QUEUE_PREFIX, marker=queues_1.next_marker, include='metadata')
#Asserts
self.assertIsNotNone(queues_1)
self.assertEqual(3, len(queues_1))
self.assertEqual(3, queues_1.max_results)
self.assertEqual('', queues_1.marker)
self.assertIsNotNone(queues_1[0])
self.assertIsNone(queues_1[0].metadata)
self.assertNotEqual('', queues_1[0].name)
self.assertNotEqual('', queues_1[0].url)
#Asserts
self.assertIsNotNone(queues_2)
self.assertTrue(len(self.test_queues) -3 <= len(queues_2))
self.assertEqual(0, queues_2.max_results)
self.assertEqual(queues_1.next_marker, queues_2.marker)
self.assertIsNotNone(queues_2[0])
self.assertIsNotNone(queues_2[0].metadata)
self.assertNotEqual('', queues_2[0].name)
self.assertNotEqual('', queues_2[0].url)
def test_set_queue_metadata(self):
#Action
self.queue_client.create_queue(self.creatable_queues[2])
self.queue_client.set_queue_metadata(self.creatable_queues[2], x_ms_meta_name_values={'foo':'test', 'bar':'blah'})
result = self.queue_client.get_queue_metadata(self.creatable_queues[2])
self.queue_client.delete_queue(self.creatable_queues[2])
#Asserts
self.assertIsNotNone(result)
self.assertEqual('0', result['x-ms-approximate-messages-count'])
self.assertEqual('test', result['x-ms-meta-foo'])
self.assertEqual('blah', result['x-ms-meta-bar'])
def test_put_message(self):
#Action. No exception means pass. No asserts needed.
self.queue_client.put_message(self.test_queues[0], 'message1')
self.queue_client.put_message(self.test_queues[0], 'message2')
self.queue_client.put_message(self.test_queues[0], 'message3')
self.queue_client.put_message(self.test_queues[0], 'message4')
def test_get_messges(self):
#Action
self.queue_client.put_message(self.test_queues[1], 'message1')
self.queue_client.put_message(self.test_queues[1], 'message2')
self.queue_client.put_message(self.test_queues[1], 'message3')
self.queue_client.put_message(self.test_queues[1], 'message4')
result = self.queue_client.get_messages(self.test_queues[1])
#Asserts
self.assertIsNotNone(result)
self.assertEqual(1, len(result))
message = result[0]
self.assertIsNotNone(message)
self.assertNotEqual('', message.message_id)
self.assertEqual('message1', message.message_text)
self.assertNotEqual('', message.pop_receipt)
self.assertEqual('1', message.dequeue_count)
self.assertNotEqual('', message.insertion_time)
self.assertNotEqual('', message.expiration_time)
self.assertNotEqual('', message.time_next_visible)
def test_get_messages_with_options(self):
#Action
self.queue_client.put_message(self.test_queues[2], 'message1')
self.queue_client.put_message(self.test_queues[2], 'message2')
self.queue_client.put_message(self.test_queues[2], 'message3')
self.queue_client.put_message(self.test_queues[2], 'message4')
result = self.queue_client.get_messages(self.test_queues[2], numofmessages=4, visibilitytimeout=20)
#Asserts
self.assertIsNotNone(result)
self.assertEqual(4, len(result))
for message in result:
self.assertIsNotNone(message)
self.assertNotEqual('', message.message_id)
self.assertNotEqual('', message.message_text)
self.assertNotEqual('', message.pop_receipt)
self.assertEqual('1', message.dequeue_count)
self.assertNotEqual('', message.insertion_time)
self.assertNotEqual('', message.expiration_time)
self.assertNotEqual('', message.time_next_visible)
def test_peek_messages(self):
#Action
self.queue_client.put_message(self.test_queues[3], 'message1')
self.queue_client.put_message(self.test_queues[3], 'message2')
self.queue_client.put_message(self.test_queues[3], 'message3')
self.queue_client.put_message(self.test_queues[3], 'message4')
result = self.queue_client.peek_messages(self.test_queues[3])
#Asserts
self.assertIsNotNone(result)
self.assertEqual(1, len(result))
message = result[0]
self.assertIsNotNone(message)
self.assertNotEqual('', message.message_id)
self.assertNotEqual('', message.message_text)
self.assertEqual('', message.pop_receipt)
self.assertEqual('0', message.dequeue_count)
self.assertNotEqual('', message.insertion_time)
self.assertNotEqual('', message.expiration_time)
self.assertEqual('', message.time_next_visible)
def test_peek_messages_with_options(self):
#Action
self.queue_client.put_message(self.test_queues[4], 'message1')
self.queue_client.put_message(self.test_queues[4], 'message2')
self.queue_client.put_message(self.test_queues[4], 'message3')
self.queue_client.put_message(self.test_queues[4], 'message4')
result = self.queue_client.peek_messages(self.test_queues[4], numofmessages=4)
#Asserts
self.assertIsNotNone(result)
self.assertEqual(4, len(result))
for message in result:
self.assertIsNotNone(message)
self.assertNotEqual('', message.message_id)
self.assertNotEqual('', message.message_text)
self.assertEqual('', message.pop_receipt)
self.assertEqual('0', message.dequeue_count)
self.assertNotEqual('', message.insertion_time)
self.assertNotEqual('', message.expiration_time)
self.assertEqual('', message.time_next_visible)
def test_clear_messages(self):
#Action
self.queue_client.put_message(self.test_queues[5], 'message1')
self.queue_client.put_message(self.test_queues[5], 'message2')
self.queue_client.put_message(self.test_queues[5], 'message3')
self.queue_client.put_message(self.test_queues[5], 'message4')
self.queue_client.clear_messages(self.test_queues[5])
result = self.queue_client.peek_messages(self.test_queues[5])
#Asserts
self.assertIsNotNone(result)
self.assertEqual(0, len(result))
def test_delete_message(self):
#Action
self.queue_client.put_message(self.test_queues[6], 'message1')
self.queue_client.put_message(self.test_queues[6], 'message2')
self.queue_client.put_message(self.test_queues[6], 'message3')
self.queue_client.put_message(self.test_queues[6], 'message4')
result = self.queue_client.get_messages(self.test_queues[6])
self.queue_client.delete_message(self.test_queues[6], result[0].message_id, result[0].pop_receipt)
result2 = self.queue_client.get_messages(self.test_queues[6], numofmessages=32)
#Asserts
self.assertIsNotNone(result2)
self.assertEqual(3, len(result2))
def test_update_message(self):
#Action
self.queue_client.put_message(self.test_queues[7], 'message1')
list_result1 = self.queue_client.get_messages(self.test_queues[7])
self.queue_client.update_message(self.test_queues[7], list_result1[0].message_id, 'new text', list_result1[0].pop_receipt, visibilitytimeout=0)
list_result2 = self.queue_client.get_messages(self.test_queues[7])
#Asserts
self.assertIsNotNone(list_result2)
message = list_result2[0]
self.assertIsNotNone(message)
self.assertNotEqual('', message.message_id)
self.assertEqual('new text', message.message_text)
self.assertNotEqual('', message.pop_receipt)
self.assertEqual('2', message.dequeue_count)
self.assertNotEqual('', message.insertion_time)
self.assertNotEqual('', message.expiration_time)
self.assertNotEqual('', message.time_next_visible)
def test_with_filter(self):
# Single filter
called = []
def my_filter(request, next):
called.append(True)
return next(request)
qc = self.queue_client.with_filter(my_filter)
qc.put_message(self.test_queues[7], 'message1')
self.assertTrue(called)
del called[:]
# Chained filters
def filter_a(request, next):
called.append('a')
return next(request)
def filter_b(request, next):
called.append('b')
return next(request)
qc = self.queue_client.with_filter(filter_a).with_filter(filter_b)
qc.put_message(self.test_queues[7], 'message1')
self.assertEqual(called, ['b', 'a'])
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()

Просмотреть файл

@ -0,0 +1,867 @@
#------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation.
#
# This source code is subject to terms and conditions of the Apache License,
# Version 2.0. A copy of the license can be found in the License.html file at
# the root of this distribution. If you cannot locate the Apache License,
# Version 2.0, please send an email to vspython@microsoft.com. By using this
# source code in any fashion, you are agreeing to be bound by the terms of the
# Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#------------------------------------------------------------------------------
from azure import *
from azure.servicebus import *
from azuretest.util import *
import unittest
#------------------------------------------------------------------------------
class ServiceBusTest(unittest.TestCase):
def setUp(self):
self.sbs = ServiceBusService(credentials.getServiceBusNamespace(),
credentials.getServiceBusKey(),
'owner')
# TODO: it may be overkill to use the machine name from
# getUniqueTestRunID, current time may be unique enough
__uid = getUniqueTestRunID()
queue_base_name = u'mytestqueue%s' % (__uid)
topic_base_name = u'mytesttopic%s' % (__uid)
self.queue_name = getUniqueNameBasedOnCurrentTime(queue_base_name)
self.topic_name = getUniqueNameBasedOnCurrentTime(topic_base_name)
def tearDown(self):
self.cleanup()
return super(ServiceBusTest, self).tearDown()
def cleanup(self):
try:
self.sbs.delete_queue(self.queue_name)
except: pass
try:
self.sbs.delete_topic(self.topic_name)
except: pass
#--Helpers-----------------------------------------------------------------
# TODO: move this function out of here so other tests can use them
# TODO: find out how to import/use safe_repr instead repr
def assertNamedItemInContainer(self, container, item_name, msg=None):
for item in container:
if item.name == item_name:
return
standardMsg = '%s not found in %s' % (repr(item_name), repr(container))
self.fail(self._formatMessage(msg, standardMsg))
# TODO: move this function out of here so other tests can use them
# TODO: find out how to import/use safe_repr instead repr
def assertNamedItemNotInContainer(self, container, item_name, msg=None):
for item in container:
if item.name == item_name:
standardMsg = '%s unexpectedly found in %s' % (repr(item_name), repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def _create_queue(self, queue_name):
self.sbs.create_queue(queue_name, None, True)
def _create_queue_and_send_msg(self, queue_name, msg):
self._create_queue(queue_name)
self.sbs.send_queue_message(queue_name, msg)
def _create_topic(self, topic_name):
self.sbs.create_topic(topic_name, None, True)
def _create_topic_and_subscription(self, topic_name, subscription_name):
self._create_topic(topic_name)
self._create_subscription(topic_name, subscription_name)
def _create_subscription(self, topic_name, subscription_name):
self.sbs.create_subscription(topic_name, subscription_name, None, True)
#--Test cases for queues --------------------------------------------------
def test_create_queue_no_options(self):
# Arrange
# Act
created = self.sbs.create_queue(self.queue_name)
# Assert
self.assertTrue(created)
def test_create_queue_no_options_fail_on_exist(self):
# Arrange
# Act
created = self.sbs.create_queue(self.queue_name, None, True)
# Assert
self.assertTrue(created)
def test_create_queue_with_options(self):
# Arrange
# Act
queue_options = Queue()
queue_options.max_size_in_megabytes = 5120
queue_options.default_message_time_to_live = 'PT1M'
created = self.sbs.create_queue(self.queue_name, queue_options)
# Assert
self.assertTrue(created)
def test_create_queue_with_already_existing_queue(self):
# Arrange
# Act
created1 = self.sbs.create_queue(self.queue_name)
created2 = self.sbs.create_queue(self.queue_name)
# Assert
self.assertTrue(created1)
self.assertFalse(created2)
def test_create_queue_with_already_existing_queue_fail_on_exist(self):
# Arrange
# Act
created = self.sbs.create_queue(self.queue_name)
with self.assertRaises(WindowsAzureError):
self.sbs.create_queue(self.queue_name, None, True)
# Assert
self.assertTrue(created)
def test_get_queue_with_existing_queue(self):
# Arrange
self._create_queue(self.queue_name)
# Act
queue = self.sbs.get_queue(self.queue_name)
# Assert
self.assertIsNotNone(queue)
self.assertEquals(queue.name, self.queue_name)
def test_get_queue_with_non_existing_queue(self):
# Arrange
# Act
with self.assertRaises(WindowsAzureError):
resp = self.sbs.get_queue(self.queue_name)
# Assert
def test_list_queues(self):
# Arrange
self._create_queue(self.queue_name)
# Act
queues = self.sbs.list_queues()
for queue in queues:
name = queue.name
# Assert
self.assertIsNotNone(queues)
self.assertNamedItemInContainer(queues, self.queue_name)
def test_delete_queue_with_existing_queue(self):
# Arrange
self._create_queue(self.queue_name)
# Act
deleted = self.sbs.delete_queue(self.queue_name)
# Assert
self.assertTrue(deleted)
queues = self.sbs.list_queues()
self.assertNamedItemNotInContainer(queues, self.queue_name)
def test_delete_queue_with_existing_queue_fail_not_exist(self):
# Arrange
self._create_queue(self.queue_name)
# Act
deleted = self.sbs.delete_queue(self.queue_name, True)
# Assert
self.assertTrue(deleted)
queues = self.sbs.list_queues()
self.assertNamedItemNotInContainer(queues, self.queue_name)
def test_delete_queue_with_non_existing_queue(self):
# Arrange
# Act
deleted = self.sbs.delete_queue(self.queue_name)
# Assert
self.assertFalse(deleted)
def test_delete_queue_with_non_existing_queue_fail_not_exist(self):
# Arrange
# Act
with self.assertRaises(WindowsAzureError):
self.sbs.delete_queue(self.queue_name, True)
# Assert
def test_send_queue_message(self):
# Arrange
self._create_queue(self.queue_name)
sent_msg = Message('send message')
# Act
self.sbs.send_queue_message(self.queue_name, sent_msg)
# Assert
def test_receive_queue_message_read_delete_mode(self):
# Assert
sent_msg = Message('receive message')
self._create_queue_and_send_msg(self.queue_name, sent_msg)
# Act
received_msg = self.sbs.receive_queue_message(self.queue_name, False)
# Assert
self.assertIsNotNone(received_msg)
self.assertEquals(sent_msg.body, received_msg.body)
def test_receive_queue_message_read_delete_mode_throws_on_delete(self):
# Assert
sent_msg = Message('receive message')
self._create_queue_and_send_msg(self.queue_name, sent_msg)
# Act
received_msg = self.sbs.receive_queue_message(self.queue_name, False)
with self.assertRaises(WindowsAzureError):
received_msg.delete()
# Assert
def test_receive_queue_message_read_delete_mode_throws_on_unlock(self):
# Assert
sent_msg = Message('receive message')
self._create_queue_and_send_msg(self.queue_name, sent_msg)
# Act
received_msg = self.sbs.receive_queue_message(self.queue_name, False)
with self.assertRaises(WindowsAzureError):
received_msg.unlock()
# Assert
def test_receive_queue_message_peek_lock_mode(self):
# Arrange
sent_msg = Message('peek lock message')
self._create_queue_and_send_msg(self.queue_name, sent_msg)
# Act
received_msg = self.sbs.receive_queue_message(self.queue_name, True)
# Assert
self.assertIsNotNone(received_msg)
self.assertEquals(sent_msg.body, received_msg.body)
def test_receive_queue_message_delete(self):
# Arrange
sent_msg = Message('peek lock message delete')
self._create_queue_and_send_msg(self.queue_name, sent_msg)
# Act
received_msg = self.sbs.receive_queue_message(self.queue_name, True)
received_msg.delete()
# Assert
self.assertIsNotNone(received_msg)
self.assertEquals(sent_msg.body, received_msg.body)
def test_receive_queue_message_unlock(self):
# Arrange
sent_msg = Message('peek lock message unlock')
self._create_queue_and_send_msg(self.queue_name, sent_msg)
# Act
received_msg = self.sbs.receive_queue_message(self.queue_name, True)
received_msg.unlock()
# Assert
received_again_msg = self.sbs.receive_queue_message(self.queue_name, True)
received_again_msg.delete()
self.assertIsNotNone(received_msg)
self.assertIsNotNone(received_again_msg)
self.assertEquals(sent_msg.body, received_msg.body)
self.assertEquals(received_again_msg.body, received_msg.body)
def test_send_queue_message_with_custom_message_type(self):
# Arrange
self._create_queue(self.queue_name)
# Act
sent_msg = Message('<text>peek lock message custom message type</text>', type='text/xml')
self.sbs.send_queue_message(self.queue_name, sent_msg)
received_msg = self.sbs.receive_queue_message(self.queue_name, True, 5)
received_msg.delete()
# Assert
self.assertIsNotNone(received_msg)
self.assertEquals('text/xml', received_msg.type)
def test_send_queue_message_with_custom_message_properties(self):
# Arrange
self._create_queue(self.queue_name)
# Act
sent_msg = Message('message with properties', custom_properties={'hello':'world', 'foo':42})
self.sbs.send_queue_message(self.queue_name, sent_msg)
received_msg = self.sbs.receive_queue_message(self.queue_name, True, 5)
received_msg.delete()
# Assert
self.assertIsNotNone(received_msg)
self.assertEquals(received_msg.custom_properties['hello'], 'world')
self.assertEquals(received_msg.custom_properties['foo'], '42') # TODO: note that the integer became a string
#--Test cases for topics/subscriptions ------------------------------------
def test_create_topic_no_options(self):
# Arrange
# Act
created = self.sbs.create_topic(self.topic_name)
# Assert
self.assertTrue(created)
def test_create_topic_no_options_fail_on_exist(self):
# Arrange
# Act
created = self.sbs.create_topic(self.topic_name, None, True)
# Assert
self.assertTrue(created)
def test_create_topic_with_options(self):
# Arrange
# Act
topic_options = Topic()
topic_options.max_size_in_megabytes = '5120'
topic_options.default_message_time_to_live = 'PT1M'
created = self.sbs.create_topic(self.topic_name, topic_options)
# Assert
self.assertTrue(created)
def test_create_topic_with_already_existing_topic(self):
# Arrange
# Act
created1 = self.sbs.create_topic(self.topic_name)
created2 = self.sbs.create_topic(self.topic_name)
# Assert
self.assertTrue(created1)
self.assertFalse(created2)
def test_create_topic_with_already_existing_topic_fail_on_exist(self):
# Arrange
# Act
created = self.sbs.create_topic(self.topic_name)
with self.assertRaises(WindowsAzureError):
self.sbs.create_topic(self.topic_name, None, True)
# Assert
self.assertTrue(created)
def test_get_topic_with_existing_topic(self):
# Arrange
self._create_topic(self.topic_name)
# Act
topic = self.sbs.get_topic(self.topic_name)
# Assert
self.assertIsNotNone(topic)
self.assertEquals(topic.name, self.topic_name)
def test_get_topic_with_non_existing_topic(self):
# Arrange
# Act
with self.assertRaises(WindowsAzureError):
self.sbs.get_topic(self.topic_name)
# Assert
def test_list_topics(self):
# Arrange
self._create_topic(self.topic_name)
# Act
topics = self.sbs.list_topics()
for topic in topics:
name = topic.name
# Assert
self.assertIsNotNone(topics)
self.assertNamedItemInContainer(topics, self.topic_name)
def test_delete_topic_with_existing_topic(self):
# Arrange
self._create_topic(self.topic_name)
# Act
deleted = self.sbs.delete_topic(self.topic_name)
# Assert
self.assertTrue(deleted)
topics = self.sbs.list_topics()
self.assertNamedItemNotInContainer(topics, self.topic_name)
def test_delete_topic_with_existing_topic_fail_not_exist(self):
# Arrange
self._create_topic(self.topic_name)
# Act
deleted = self.sbs.delete_topic(self.topic_name, True)
# Assert
self.assertTrue(deleted)
topics = self.sbs.list_topics()
self.assertNamedItemNotInContainer(topics, self.topic_name)
def test_delete_topic_with_non_existing_topic(self):
# Arrange
# Act
deleted = self.sbs.delete_topic(self.topic_name)
# Assert
self.assertFalse(deleted)
def test_delete_topic_with_non_existing_topic_fail_not_exist(self):
# Arrange
# Act
with self.assertRaises(WindowsAzureError):
self.sbs.delete_topic(self.topic_name, True)
# Assert
def test_create_subscription(self):
# Arrange
self._create_topic(self.topic_name)
# Act
created = self.sbs.create_subscription(self.topic_name, 'MySubscription')
# Assert
self.assertTrue(created)
def test_create_subscription_fail_on_exist(self):
# Arrange
self._create_topic(self.topic_name)
# Act
created = self.sbs.create_subscription(self.topic_name, 'MySubscription', None, True)
# Assert
self.assertTrue(created)
def test_create_subscription_with_already_existing_subscription(self):
# Arrange
self._create_topic(self.topic_name)
# Act
created1 = self.sbs.create_subscription(self.topic_name, 'MySubscription')
created2 = self.sbs.create_subscription(self.topic_name, 'MySubscription')
# Assert
self.assertTrue(created1)
self.assertFalse(created2)
def test_create_subscription_with_already_existing_subscription_fail_on_exist(self):
# Arrange
self._create_topic(self.topic_name)
# Act
created = self.sbs.create_subscription(self.topic_name, 'MySubscription')
with self.assertRaises(WindowsAzureError):
self.sbs.create_subscription(self.topic_name, 'MySubscription', None, True)
# Assert
self.assertTrue(created)
def test_list_subscriptions(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription2')
# Act
subscriptions = self.sbs.list_subscriptions(self.topic_name)
# Assert
self.assertIsNotNone(subscriptions)
self.assertEquals(len(subscriptions), 1)
self.assertEquals(subscriptions[0].name, 'MySubscription2')
def test_get_subscription_with_existing_subscription(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription3')
# Act
subscription = self.sbs.get_subscription(self.topic_name, 'MySubscription3')
# Assert
self.assertIsNotNone(subscription)
self.assertEquals(subscription.name, 'MySubscription3')
def test_get_subscription_with_non_existing_subscription(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription3')
# Act
with self.assertRaises(WindowsAzureError):
self.sbs.get_subscription(self.topic_name, 'MySubscription4')
# Assert
def test_delete_subscription_with_existing_subscription(self):
# Arrange
self._create_topic(self.topic_name)
self._create_subscription(self.topic_name, 'MySubscription4')
self._create_subscription(self.topic_name, 'MySubscription5')
# Act
deleted = self.sbs.delete_subscription(self.topic_name, 'MySubscription4')
# Assert
self.assertTrue(deleted)
subscriptions = self.sbs.list_subscriptions(self.topic_name)
self.assertIsNotNone(subscriptions)
self.assertEquals(len(subscriptions), 1)
self.assertEquals(subscriptions[0].name, 'MySubscription5')
def test_delete_subscription_with_existing_subscription_fail_not_exist(self):
# Arrange
self._create_topic(self.topic_name)
self._create_subscription(self.topic_name, 'MySubscription4')
self._create_subscription(self.topic_name, 'MySubscription5')
# Act
deleted = self.sbs.delete_subscription(self.topic_name, 'MySubscription4', True)
# Assert
self.assertTrue(deleted)
subscriptions = self.sbs.list_subscriptions(self.topic_name)
self.assertIsNotNone(subscriptions)
self.assertEquals(len(subscriptions), 1)
self.assertEquals(subscriptions[0].name, 'MySubscription5')
def test_delete_subscription_with_non_existing_subscription(self):
# Arrange
self._create_topic(self.topic_name)
# Act
deleted = self.sbs.delete_subscription(self.topic_name, 'MySubscription')
# Assert
self.assertFalse(deleted)
def test_delete_subscription_with_non_existing_subscription_fail_not_exist(self):
# Arrange
self._create_topic(self.topic_name)
# Act
with self.assertRaises(WindowsAzureError):
self.sbs.delete_subscription(self.topic_name, 'MySubscription', True)
# Assert
def test_create_rule_no_options(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
# Act
created = self.sbs.create_rule(self.topic_name, 'MySubscription', 'MyRule1')
# Assert
self.assertTrue(created)
def test_create_rule_no_options_fail_on_exist(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
# Act
created = self.sbs.create_rule(self.topic_name, 'MySubscription', 'MyRule1', None, True)
# Assert
self.assertTrue(created)
def test_create_rule_with_already_existing_rule(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
# Act
created1 = self.sbs.create_rule(self.topic_name, 'MySubscription', 'MyRule1')
created2 = self.sbs.create_rule(self.topic_name, 'MySubscription', 'MyRule1')
# Assert
self.assertTrue(created1)
self.assertFalse(created2)
def test_create_rule_with_already_existing_rule_fail_on_exist(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
# Act
created = self.sbs.create_rule(self.topic_name, 'MySubscription', 'MyRule1')
with self.assertRaises(WindowsAzureError):
self.sbs.create_rule(self.topic_name, 'MySubscription', 'MyRule1', None, True)
# Assert
self.assertTrue(created)
def test_create_rule_with_options(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
# Act
rule1 = Rule()
rule1.filter_type = 'SqlFilter'
rule1.filter_expression = 'foo > 40'
created = self.sbs.create_rule(self.topic_name, 'MySubscription', 'MyRule1', rule1)
# Assert
self.assertTrue(created)
def test_list_rules(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
resp = self.sbs.create_rule(self.topic_name, 'MySubscription', 'MyRule2')
# Act
rules = self.sbs.list_rules(self.topic_name, 'MySubscription')
# Assert
self.assertEquals(len(rules), 2)
def test_get_rule_with_existing_rule(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
# Act
rule = self.sbs.get_rule(self.topic_name, 'MySubscription', '$Default')
# Assert
self.assertIsNotNone(rule)
self.assertEquals(rule.name, '$Default')
def test_get_rule_with_non_existing_rule(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
# Act
with self.assertRaises(WindowsAzureError):
self.sbs.get_rule(self.topic_name, 'MySubscription', 'NonExistingRule')
# Assert
def test_delete_rule_with_existing_rule(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
resp = self.sbs.create_rule(self.topic_name, 'MySubscription', 'MyRule3')
resp = self.sbs.create_rule(self.topic_name, 'MySubscription', 'MyRule4')
# Act
deleted1 = self.sbs.delete_rule(self.topic_name, 'MySubscription', 'MyRule4')
deleted2 = self.sbs.delete_rule(self.topic_name, 'MySubscription', '$Default')
# Assert
self.assertTrue(deleted1)
self.assertTrue(deleted2)
rules = self.sbs.list_rules(self.topic_name, 'MySubscription')
self.assertIsNotNone(rules)
self.assertEquals(len(rules), 1)
self.assertEquals(rules[0].name, 'MyRule3')
def test_delete_rule_with_existing_rule_fail_not_exist(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
resp = self.sbs.create_rule(self.topic_name, 'MySubscription', 'MyRule3')
resp = self.sbs.create_rule(self.topic_name, 'MySubscription', 'MyRule4')
# Act
deleted1 = self.sbs.delete_rule(self.topic_name, 'MySubscription', 'MyRule4', True)
deleted2 = self.sbs.delete_rule(self.topic_name, 'MySubscription', '$Default', True)
# Assert
self.assertTrue(deleted1)
self.assertTrue(deleted2)
rules = self.sbs.list_rules(self.topic_name, 'MySubscription')
self.assertIsNotNone(rules)
self.assertEquals(len(rules), 1)
self.assertEquals(rules[0].name, 'MyRule3')
def test_delete_rule_with_non_existing_rule(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
# Act
deleted = self.sbs.delete_rule(self.topic_name, 'MySubscription', 'NonExistingRule')
# Assert
self.assertFalse(deleted)
def test_delete_rule_with_non_existing_rule_fail_not_exist(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
# Act
with self.assertRaises(WindowsAzureError):
self.sbs.delete_rule(self.topic_name, 'MySubscription', 'NonExistingRule', True)
# Assert
def test_send_topic_message(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
sent_msg = Message('subscription message')
# Act
self.sbs.send_topic_message(self.topic_name, sent_msg)
# Assert
def test_receive_subscription_message_read_delete_mode(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
sent_msg = Message('subscription message')
self.sbs.send_topic_message(self.topic_name, sent_msg)
# Act
received_msg = self.sbs.receive_subscription_message(self.topic_name, 'MySubscription', False)
# Assert
self.assertIsNotNone(received_msg)
self.assertEquals(sent_msg.body, received_msg.body)
def test_receive_subscription_message_read_delete_mode_throws_on_delete(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
sent_msg = Message('subscription message')
self.sbs.send_topic_message(self.topic_name, sent_msg)
# Act
received_msg = self.sbs.receive_subscription_message(self.topic_name, 'MySubscription', False)
with self.assertRaises(WindowsAzureError):
received_msg.delete()
# Assert
def test_receive_subscription_message_read_delete_mode_throws_on_unlock(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
sent_msg = Message('subscription message')
self.sbs.send_topic_message(self.topic_name, sent_msg)
# Act
received_msg = self.sbs.receive_subscription_message(self.topic_name, 'MySubscription', False)
with self.assertRaises(WindowsAzureError):
received_msg.unlock()
# Assert
def test_receive_subscription_message_peek_lock_mode(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
sent_msg = Message('subscription message')
self.sbs.send_topic_message(self.topic_name, sent_msg)
# Act
received_msg = self.sbs.receive_subscription_message(self.topic_name, 'MySubscription', True, 5)
# Assert
self.assertIsNotNone(received_msg)
self.assertEquals(sent_msg.body, received_msg.body)
def test_receive_subscription_message_delete(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
sent_msg = Message('subscription message')
self.sbs.send_topic_message(self.topic_name, sent_msg)
# Act
received_msg = self.sbs.receive_subscription_message(self.topic_name, 'MySubscription', True, 5)
received_msg.delete()
# Assert
self.assertIsNotNone(received_msg)
self.assertEquals(sent_msg.body, received_msg.body)
def test_receive_subscription_message_unlock(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
sent_msg = Message('subscription message')
self.sbs.send_topic_message(self.topic_name, sent_msg)
# Act
received_msg = self.sbs.receive_subscription_message(self.topic_name, 'MySubscription', True)
received_msg.unlock()
# Assert
received_again_msg = self.sbs.receive_subscription_message(self.topic_name, 'MySubscription', True)
received_again_msg.delete()
self.assertIsNotNone(received_msg)
self.assertIsNotNone(received_again_msg)
self.assertEquals(sent_msg.body, received_msg.body)
self.assertEquals(received_again_msg.body, received_msg.body)
def test_with_filter(self):
# Single filter
called = []
def my_filter(request, next):
called.append(True)
return next(request)
sbs = self.sbs.with_filter(my_filter)
sbs.create_topic(self.topic_name + '0', None, True)
self.assertTrue(called)
del called[:]
sbs.delete_topic(self.topic_name + '0')
self.assertTrue(called)
del called[:]
# Chained filters
def filter_a(request, next):
called.append('a')
return next(request)
def filter_b(request, next):
called.append('b')
return next(request)
sbs = self.sbs.with_filter(filter_a).with_filter(filter_b)
sbs.create_topic(self.topic_name + '0', None, True)
self.assertEqual(called, ['b', 'a'])
sbs.delete_topic(self.topic_name + '0')
self.assertEqual(called, ['b', 'a', 'b', 'a'])
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()

Просмотреть файл

@ -0,0 +1,658 @@
#-------------------------------------------------------------------------
# Copyright 2011 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from azure.storage.tableservice import *
from azure.storage import EntityProperty, Entity, StorageServiceProperties
from azure import WindowsAzureError
from azuretest.util import (credentials,
getUniqueTestRunID,
STATUS_OK,
STATUS_CREATED,
STATUS_ACCEPTED,
STATUS_NO_CONTENT,
getUniqueNameBasedOnCurrentTime)
import unittest
import time
from datetime import datetime
#------------------------------------------------------------------------------
__uid = getUniqueTestRunID()
TABLE_TO_DELETE = 'mytesttabletodelete%s' % (__uid)
TABLE_NO_DELETE = 'mytesttablenodelete%s' % (__uid)
ENTITY_TO_DELETE = 'mytestentitytodelete%s' % (__uid)
ENTITY_NO_DELETE = 'mytestentitynodelete%s' % (__uid)
BATCH_TABLE = 'mytestbatchtable%s' % (__uid)
FILTER_TABLE = 'mytestfiltertable%s' % (__uid)
#------------------------------------------------------------------------------
class StorageTest(unittest.TestCase):
'''
TODO:
- comprehensive, positive test cases for all table client methods
- comprehensive, negative test cases all table client methods
- missing coverage for begin_batch
- missing coverage for cancel_batch
- missing coverage for commit_batch
- get_table_service_properties busted
- set_table_service_properties busted
'''
def setUp(self):
self.tc = TableService(account_name=credentials.getStorageServicesName().encode('ascii', 'ignore'),
account_key=credentials.getStorageServicesKey().encode('ascii', 'ignore'))
__uid = getUniqueTestRunID()
test_table_base_name = u'testtable%s' % (__uid)
self.test_table = getUniqueNameBasedOnCurrentTime(test_table_base_name)
self.tc.create_table(self.test_table)
#time.sleep(10)
def tearDown(self):
self.cleanup()
return super(StorageTest, self).tearDown()
def cleanup(self):
for cont in [TABLE_NO_DELETE, TABLE_TO_DELETE]:
try: self.tc.delete_table(cont)
except: pass
self.tc.delete_table(self.test_table)
def test_sanity(self):
self.sanity_create_table()
time.sleep(10)
self.sanity_query_tables()
self.sanity_delete_table()
self.sanity_insert_entity()
self.sanity_get_entity()
self.sanity_query_entities()
self.sanity_update_entity()
self.sanity_insert_or_merge_entity()
self.sanity_insert_or_replace_entity()
self.sanity_merge_entity()
self.sanity_delete_entity()
self.sanity_begin_batch()
self.sanity_commit_batch()
self.sanity_cancel_batch()
def test_sanity_get_set_table_service_properties(self):
table_properties = self.tc.get_table_service_properties()
self.tc.set_table_service_properties(table_properties)
tests = [('logging.delete', True),
('logging.delete', False),
('logging.read', True),
('logging.read', False),
('logging.write', True),
('logging.write', False),
]
for path, value in tests:
#print path
cur = table_properties
for component in path.split('.')[:-1]:
cur = getattr(cur, component)
last_attr = path.split('.')[-1]
setattr(cur, last_attr, value)
self.tc.set_table_service_properties(table_properties)
table_properties = self.tc.get_table_service_properties()
cur = table_properties
for component in path.split('.'):
cur = getattr(cur, component)
self.assertEquals(value, cur)
def test_table_service_retention_single_set(self):
table_properties = self.tc.get_table_service_properties()
table_properties.logging.retention_policy.enabled = False
table_properties.logging.retention_policy.days = 5
# TODO: Better error, ValueError?
self.assertRaises(WindowsAzureError,
self.tc.set_table_service_properties,
table_properties)
table_properties = self.tc.get_table_service_properties()
table_properties.logging.retention_policy.days = None
table_properties.logging.retention_policy.enabled = True
# TODO: Better error, ValueError?
self.assertRaises(WindowsAzureError,
self.tc.set_table_service_properties,
table_properties)
def test_table_service_set_both(self):
table_properties = self.tc.get_table_service_properties()
table_properties.logging.retention_policy.enabled = True
table_properties.logging.retention_policy.days = 5
self.tc.set_table_service_properties(table_properties)
table_properties = self.tc.get_table_service_properties()
self.assertEquals(True, table_properties.logging.retention_policy.enabled)
self.assertEquals(5, table_properties.logging.retention_policy.days)
#--Helpers-----------------------------------------------------------------
def sanity_create_table(self):
resp = self.tc.create_table(TABLE_TO_DELETE)
self.assertTrue(resp)
#self.assertEqual(resp.cache_control, u'no-cache')
resp = self.tc.create_table(TABLE_NO_DELETE)
self.assertTrue(resp)
#self.assertEqual(resp.cache_control, u'no-cache')
def sanity_query_tables(self):
resp = self.tc.query_tables()
self.assertEqual(type(resp), list)
tableNames = [x.name for x in resp]
self.assertGreaterEqual(len(tableNames), 2)
self.assertIn(TABLE_NO_DELETE, tableNames)
self.assertIn(TABLE_TO_DELETE, tableNames)
def sanity_delete_table(self):
resp = self.tc.delete_table(TABLE_TO_DELETE)
self.assertTrue(resp)
def sanity_insert_entity(self):
resp = self.tc.insert_entity(TABLE_NO_DELETE, {'PartitionKey':'Lastname',
'RowKey':'Firstname',
'age':39,
'sex':'male',
'birthday':datetime(1973,10,04)})
self.assertEquals(resp, None)
entity = Entity()
entity.PartitionKey = 'Lastname'
entity.RowKey = 'Firstname1'
entity.age = 39
entity.Birthday = EntityProperty('Edm.Int64', 20)
resp = self.tc.insert_entity(TABLE_NO_DELETE, entity)
self.assertEquals(resp, None)
def sanity_get_entity(self):
ln = u'Lastname'
fn1 = u'Firstname1'
resp = self.tc.get_entity(TABLE_NO_DELETE,
ln,
fn1,
'')
self.assertEquals(resp.PartitionKey, ln)
self.assertEquals(resp.RowKey, fn1)
self.assertEquals(resp.age, 39)
self.assertEquals(resp.Birthday, 20)
def sanity_query_entities(self):
resp = self.tc.query_entities(TABLE_NO_DELETE, '', '')
self.assertEquals(len(resp), 2)
self.assertEquals(resp[0].birthday, datetime(1973, 10, 04))
self.assertEquals(resp[1].Birthday, 20)
def sanity_update_entity(self):
ln = u'Lastname'
fn = u'Firstname'
resp = self.tc.update_entity(TABLE_NO_DELETE,
ln,
fn,
{'PartitionKey':'Lastname',
'RowKey':'Firstname',
'age':21,
'sex':'female',
'birthday':datetime(1991,10,04)})
self.assertEquals(resp, None)
resp = self.tc.get_entity(TABLE_NO_DELETE,
ln,
fn,
'')
self.assertEquals(resp.PartitionKey, ln)
self.assertEquals(resp.RowKey, fn)
self.assertEquals(resp.age, 21)
self.assertEquals(resp.sex, u'female')
self.assertEquals(resp.birthday, datetime(1991, 10, 04))
def sanity_insert_or_merge_entity(self):
ln = u'Lastname'
fn = u'Firstname'
resp = self.tc.insert_or_merge_entity(TABLE_NO_DELETE,
ln,
fn,
{'PartitionKey':'Lastname',
'RowKey':'Firstname',
'age': u'abc', #changed type
'sex':'male', #changed value
'birthday':datetime(1991,10,04),
'sign' : 'aquarius' #new
})
self.assertEquals(resp, None)
resp = self.tc.get_entity(TABLE_NO_DELETE,
ln,
fn,
'')
self.assertEquals(resp.PartitionKey, ln)
self.assertEquals(resp.RowKey, fn)
self.assertEquals(resp.age, u'abc')
self.assertEquals(resp.sex, u'male')
self.assertEquals(resp.birthday, datetime(1991, 10, 4))
self.assertEquals(resp.sign, u'aquarius')
def sanity_insert_or_replace_entity(self):
ln = u'Lastname'
fn = u'Firstname'
resp = self.tc.insert_or_replace_entity(TABLE_NO_DELETE,
ln,
fn,
{'PartitionKey':'Lastname',
'RowKey':'Firstname',
'age':1,
'sex':'male'})
self.assertEquals(resp, None)
resp = self.tc.get_entity(TABLE_NO_DELETE,
ln,
fn,
'')
self.assertEquals(resp.PartitionKey, ln)
self.assertEquals(resp.RowKey, fn)
self.assertEquals(resp.age, 1)
self.assertEquals(resp.sex, u'male')
self.assertFalse(hasattr(resp, "birthday"))
self.assertFalse(hasattr(resp, "sign"))
def sanity_merge_entity(self):
ln = u'Lastname'
fn = u'Firstname'
resp = self.tc.merge_entity(TABLE_NO_DELETE,
ln,
fn,
{'PartitionKey':'Lastname',
'RowKey':'Firstname',
'sex':'female',
'fact': 'nice person'})
self.assertEquals(resp, None)
resp = self.tc.get_entity(TABLE_NO_DELETE,
ln,
fn,
'')
self.assertEquals(resp.PartitionKey, ln)
self.assertEquals(resp.RowKey, fn)
self.assertEquals(resp.age, 1)
self.assertEquals(resp.sex, u'female')
self.assertEquals(resp.fact, u'nice person')
self.assertFalse(hasattr(resp, "birthday"))
def sanity_delete_entity(self):
ln = u'Lastname'
fn = u'Firstname'
resp = self.tc.delete_entity(TABLE_NO_DELETE,
ln,
fn)
self.assertEquals(resp, None)
self.assertRaises(WindowsAzureError,
lambda: self.tc.get_entity(TABLE_NO_DELETE, ln, fn, ''))
def test_batch_partition_key(self):
tn = BATCH_TABLE + 'pk'
self.tc.create_table(tn)
try:
self.tc.begin_batch()
self.tc.insert_entity(TABLE_NO_DELETE, {'PartitionKey':'Lastname',
'RowKey':'Firstname',
'age':39,
'sex':'male',
'birthday':datetime(1973,10,04)})
self.tc.insert_entity(TABLE_NO_DELETE, {'PartitionKey':'Lastname',
'RowKey':'Firstname2',
'age':39,
'sex':'male',
'birthday':datetime(1973,10,04)})
self.tc.commit_batch()
finally:
self.tc.delete_table(tn)
def test_sanity_batch(self):
return
self.tc.create_table(BATCH_TABLE)
#resp = self.tc.begin_batch()
#self.assertEquals(resp, None)
resp = self.tc.insert_entity(BATCH_TABLE, {'PartitionKey':'Lastname',
'RowKey':'Firstname',
'age':39,
'sex':'male',
'birthday':datetime(1973,10,04)})
#resp = self.tc.insert_entity(BATCH_TABLE, {'PartitionKey':'Lastname',
# 'RowKey':'Firstname2',
# 'age':35,
# 'sex':'female',
# 'birthday':datetime(1977,12,5)})
#
resp = self.tc.query_entities(BATCH_TABLE, '', '')
self.assertEquals(len(resp), 0)
#self.tc.commit_batch()
return
resp = self.tc.query_entities(BATCH_TABLE, '', '')
self.assertEquals(len(resp), 2)
self.tc.delete_table(BATCH_TABLE)
def sanity_begin_batch(self):
resp = self.tc.begin_batch()
self.assertEquals(resp, None)
def sanity_commit_batch(self):
resp = self.tc.commit_batch()
self.assertEquals(resp, None)
def sanity_cancel_batch(self):
resp = self.tc.cancel_batch()
self.assertEquals(resp, None)
def test_query_tables_top(self):
table_id = getUniqueTestRunID()
for i in xrange(20):
self.tc.create_table(table_id + str(i))
res = self.tc.query_tables(top = 5)
self.assertEqual(len(res), 5)
def test_with_filter(self):
# Single filter
called = []
def my_filter(request, next):
called.append(True)
return next(request)
tc = self.tc.with_filter(my_filter)
tc.create_table(FILTER_TABLE)
self.assertTrue(called)
del called[:]
tc.delete_table(FILTER_TABLE)
self.assertTrue(called)
del called[:]
# Chained filters
def filter_a(request, next):
called.append('a')
return next(request)
def filter_b(request, next):
called.append('b')
return next(request)
tc = self.tc.with_filter(filter_a).with_filter(filter_b)
tc.create_table(FILTER_TABLE + '0')
self.assertEqual(called, ['b', 'a'])
tc.delete_table(FILTER_TABLE + '0')
def test_batch_insert(self):
#Act
entity = Entity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_insert'
entity.test = EntityProperty('Edm.Boolean', 'true')
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty('Edm.Int64', '1234567890')
entity.test5 = datetime.utcnow()
self.tc.begin_batch()
self.tc.insert_entity(self.test_table, entity)
self.tc.commit_batch()
#Assert
result = self.tc.get_entity(self.test_table, '001', 'batch_insert')
self.assertIsNotNone(result)
def test_batch_update(self):
#Act
entity = Entity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_update'
entity.test = EntityProperty('Edm.Boolean', 'true')
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty('Edm.Int64', '1234567890')
entity.test5 = datetime.utcnow()
self.tc.insert_entity(self.test_table, entity)
entity = self.tc.get_entity(self.test_table, '001', 'batch_update')
self.assertEqual(3, entity.test3)
entity.test2 = 'value1'
self.tc.begin_batch()
self.tc.update_entity(self.test_table, '001', 'batch_update', entity)
self.tc.commit_batch()
entity = self.tc.get_entity(self.test_table, '001', 'batch_update')
#Assert
self.assertEqual('value1', entity.test2)
def test_batch_merge(self):
#Act
entity = Entity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_merge'
entity.test = EntityProperty('Edm.Boolean', 'true')
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty('Edm.Int64', '1234567890')
entity.test5 = datetime.utcnow()
self.tc.insert_entity(self.test_table, entity)
entity = self.tc.get_entity(self.test_table, '001', 'batch_merge')
self.assertEqual(3, entity.test3)
entity = Entity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_merge'
entity.test2 = 'value1'
self.tc.begin_batch()
self.tc.merge_entity(self.test_table, '001', 'batch_merge', entity)
self.tc.commit_batch()
entity = self.tc.get_entity(self.test_table, '001', 'batch_merge')
#Assert
self.assertEqual('value1', entity.test2)
self.assertEqual(1234567890, entity.test4)
def test_batch_insert_replace(self):
#Act
entity = Entity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_insert_replace'
entity.test = EntityProperty('Edm.Boolean', 'true')
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty('Edm.Int64', '1234567890')
entity.test5 = datetime.utcnow()
self.tc.begin_batch()
self.tc.insert_or_replace_entity(self.test_table, entity.PartitionKey, entity.RowKey, entity)
self.tc.commit_batch()
entity = self.tc.get_entity(self.test_table, '001', 'batch_insert_replace')
#Assert
self.assertIsNotNone(entity)
self.assertEqual('value', entity.test2)
self.assertEqual(1234567890, entity.test4)
def test_batch_insert_merge(self):
#Act
entity = Entity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_insert_merge'
entity.test = EntityProperty('Edm.Boolean', 'true')
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty('Edm.Int64', '1234567890')
entity.test5 = datetime.utcnow()
self.tc.begin_batch()
self.tc.insert_or_merge_entity(self.test_table, entity.PartitionKey, entity.RowKey, entity)
self.tc.commit_batch()
entity = self.tc.get_entity(self.test_table, '001', 'batch_insert_merge')
#Assert
self.assertIsNotNone(entity)
self.assertEqual('value', entity.test2)
self.assertEqual(1234567890, entity.test4)
def test_batch_delete(self):
#Act
entity = Entity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_delete'
entity.test = EntityProperty('Edm.Boolean', 'true')
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty('Edm.Int64', '1234567890')
entity.test5 = datetime.utcnow()
self.tc.insert_entity(self.test_table, entity)
entity = self.tc.get_entity(self.test_table, '001', 'batch_delete')
#self.assertEqual(3, entity.test3)
self.tc.begin_batch()
self.tc.delete_entity(self.test_table, '001', 'batch_delete')
self.tc.commit_batch()
def test_batch_inserts(self):
#Act
entity = Entity()
entity.PartitionKey = 'batch_inserts'
entity.test = EntityProperty('Edm.Boolean', 'true')
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty('Edm.Int64', '1234567890')
self.tc.begin_batch()
for i in range(100):
entity.RowKey = str(i)
self.tc.insert_entity(self.test_table, entity)
self.tc.commit_batch()
entities = self.tc.query_entities(self.test_table, "PartitionKey eq 'batch_inserts'", '')
#Assert
self.assertIsNotNone(entities);
self.assertEqual(100, len(entities))
def test_batch_all_operations_together(self):
#Act
entity = Entity()
entity.PartitionKey = '003'
entity.RowKey = 'batch_all_operations_together-1'
entity.test = EntityProperty('Edm.Boolean', 'true')
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty('Edm.Int64', '1234567890')
entity.test5 = datetime.utcnow()
self.tc.insert_entity(self.test_table, entity)
entity.RowKey = 'batch_all_operations_together-2'
self.tc.insert_entity(self.test_table, entity)
entity.RowKey = 'batch_all_operations_together-3'
self.tc.insert_entity(self.test_table, entity)
entity.RowKey = 'batch_all_operations_together-4'
self.tc.insert_entity(self.test_table, entity)
self.tc.begin_batch()
entity.RowKey = 'batch_all_operations_together'
self.tc.insert_entity(self.test_table, entity)
entity.RowKey = 'batch_all_operations_together-1'
self.tc.delete_entity(self.test_table, entity.PartitionKey, entity.RowKey)
entity.RowKey = 'batch_all_operations_together-2'
entity.test3 = 10
self.tc.update_entity(self.test_table, entity.PartitionKey, entity.RowKey, entity)
entity.RowKey = 'batch_all_operations_together-3'
entity.test3 = 100
self.tc.merge_entity(self.test_table, entity.PartitionKey, entity.RowKey, entity)
entity.RowKey = 'batch_all_operations_together-4'
entity.test3 = 10
self.tc.insert_or_replace_entity(self.test_table, entity.PartitionKey, entity.RowKey, entity)
entity.RowKey = 'batch_all_operations_together-5'
self.tc.insert_or_merge_entity(self.test_table, entity.PartitionKey, entity.RowKey, entity)
self.tc.commit_batch()
#Assert
entities = self.tc.query_entities(self.test_table, "PartitionKey eq '003'", '')
self.assertEqual(5, len(entities))
def test_batch_negative(self):
#Act
entity = Entity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_negative_1'
entity.test = 1
self.tc.insert_entity(self.test_table, entity)
entity.test = 2
entity.RowKey = 'batch_negative_2'
self.tc.insert_entity(self.test_table, entity)
entity.test = 3
entity.RowKey = 'batch_negative_3'
self.tc.insert_entity(self.test_table, entity)
entity.test = -2
self.tc.update_entity(self.test_table, entity.PartitionKey, entity.RowKey, entity)
try:
self.tc.begin_batch()
entity.RowKey = 'batch_negative_1'
self.tc.update_entity(self.test_table, entity.PartitionKey, entity.RowKey, entity)
self.tc.merge_entity(self.test_table, entity.PartitionKey, entity.RowKey, entity)
self.fail('Should raise WindowsAzueError exception')
self.tc.commit_batch()
except:
self.tc.cancel_batch()
pass
try:
self.tc.begin_batch()
entity.PartitionKey = '001'
entity.RowKey = 'batch_negative_1'
self.tc.update_entity(self.test_table, entity.PartitionKey, entity.RowKey, entity)
entity.PartitionKey = '002'
entity.RowKey = 'batch_negative_1'
self.tc.insert_entity(self.test_table, entity)
self.fail('Should raise WindowsAzueError exception')
self.tc.commit_batch()
except:
self.tc.cancel_batch()
pass
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()

98
test/azuretest/util.py Normal file
Просмотреть файл

@ -0,0 +1,98 @@
#------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation.
#
# This source code is subject to terms and conditions of the Apache License,
# Version 2.0. A copy of the license can be found in the License.html file at
# the root of this distribution. If you cannot locate the Apache License,
# Version 2.0, please send an email to vspython@microsoft.com. By using this
# source code in any fashion, you are agreeing to be bound by the terms of the
# Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#------------------------------------------------------------------------------
import json
import os
import time
from exceptions import EnvironmentError
STATUS_OK = 200
STATUS_CREATED = 201
STATUS_ACCEPTED = 202
STATUS_NO_CONTENT = 204
STATUS_NOT_FOUND = 404
STATUS_CONFLICT = 409
DEFAULT_SLEEP_TIME = 60
DEFAULT_LEASE_TIME = 65
#------------------------------------------------------------------------------
class Credentials(object):
'''
Azure credentials needed to run Azure client tests.
'''
def __init__(self):
credentialsFilename = "windowsazurecredentials.json"
tmpName = os.path.join(os.getcwd(), credentialsFilename)
if not os.path.exists(tmpName):
if os.environ.has_key("USERPROFILE"):
tmpName = os.path.join(os.environ["USERPROFILE"],
credentialsFilename)
elif os.environ.has_key("HOME"):
tmpName = os.path.join(os.environ["HOME"],
credentialsFilename)
if not os.path.exists(tmpName):
errMsg = "Cannot run Azure tests when the expected config file containing Azure credentials, '%s', does not exist!" % (tmpName)
raise EnvironmentError(errMsg)
with open(tmpName, "r") as f:
self.ns = json.load(f)
def getServiceBusKey(self):
return self.ns[u'servicebuskey']
def getServiceBusNamespace(self):
return self.ns[u'servicebusns']
def getStorageServicesKey(self):
return self.ns[u'storageserviceskey']
def getStorageServicesName(self):
return self.ns[u'storageservicesname']
def getHostServiceID(self):
return self.ns[u'hostserviceid']
credentials = Credentials()
def getUniqueTestRunID():
'''
Returns a unique identifier for this particular test run so
parallel test runs using the same Azure keys do not interfere
with one another.
TODO:
- not really unique now; just machine specific
'''
from os import environ
if environ.has_key("COMPUTERNAME"):
ret_val = environ["COMPUTERNAME"]
else:
import socket
ret_val = socket.gethostname()
for bad in ["-", "_", " ", "."]:
ret_val = ret_val.replace(bad, "")
ret_val = ret_val.lower().strip()
return ret_val
def getUniqueNameBasedOnCurrentTime(base_name):
'''
Returns a unique identifier for this particular test run so
parallel test runs using the same Azure keys do not interfere
with one another.
'''
cur_time = str(time.time())
for bad in ["-", "_", " ", "."]:
cur_time = cur_time.replace(bad, "")
cur_time = cur_time.lower().strip()
return base_name + cur_time

6
test/run.bash Normal file
Просмотреть файл

@ -0,0 +1,6 @@
#!/bin/bash
export PYTHONPATH=$PYTHONPATH:../src
echo "Running tests..."
python -m unittest discover -p "test_*.py"

53
test/run.bat Normal file
Просмотреть файл

@ -0,0 +1,53 @@
@echo OFF
SETLOCAL
REM----------------------------------------------------------------------------
REM Copyright (c) Microsoft Corporation.
REM
REM This source code is subject to terms and conditions of the Apache License,
REM Version 2.0. A copy of the license can be found in the License.html file at
REM the root of this distribution. If you cannot locate the Apache License,
REM Version 2.0, please send an email to vspython@microsoft.com. By using this
REM source code in any fashion, you are agreeing to be bound by the terms of the
REM Apache License, Version 2.0.
REM
REM You must not remove this notice, or any other, from this software.
REM----------------------------------------------------------------------------
cls
if "%PYTHONPATH%" == "" (
set PYTHONPATH=..\src
) else (
set PYTHONPATH=%PYTHONPATH%:..\src
)
echo Running tests...
%SystemDrive%\Python27\python.exe -m unittest discover -p "test_*.py"
set UNITTEST_EC=%ERRORLEVEL%
echo Finished running tests!
if exist "%SystemDrive%\Python27\Scripts\coverage.exe" (
goto :coverage
)
REM ---------------------------------------------------------------------------
if not exist "%SystemDrive%\Python27\Scripts\pip.exe" (
echo Cannot do a code coverage run when neither 'coverage' nor 'pip' are installed.
goto :exit_door
)
echo Installing 'coverage' package...
%SystemDrive%\Python27\Scripts\pip.exe install coverage==3.5.2
echo Finished installing 'coverage' package
REM ---------------------------------------------------------------------------
:coverage
echo Starting coverage run...
%SystemDrive%\Python27\Scripts\coverage.exe run -m unittest discover -p "test_*.py"
%SystemDrive%\Python27\Scripts\coverage.exe html
start %CD%\htmlcov\index.html
echo Finished coverage run!
REM ---------------------------------------------------------------------------
:exit_door
exit /B %UNITTEST_EC%