Renamed ncj module to batch_extensions

This commit is contained in:
annatisch 2017-02-24 13:43:17 -08:00
Коммит fb8bf38c9a
41 изменённых файлов: 4336 добавлений и 0 удалений

10
HISTORY.rst Normal file
Просмотреть файл

@ -0,0 +1,10 @@
.. :changelog:
Release History
===============
0.1.1b1 (2017-01-17)
+++++++++++++++++++++
* Initial preview release.

1
MANIFEST.in Normal file
Просмотреть файл

@ -0,0 +1 @@
include *.rst

7
README.rst Normal file
Просмотреть файл

@ -0,0 +1,7 @@
Microsoft Azure CLI 'batch' Command Module
==========================================
This package is for the 'batch' module custom extensions.
i.e. 'az batch'

7
azure/__init__.py Normal file
Просмотреть файл

@ -0,0 +1,7 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import pkg_resources
pkg_resources.declare_namespace(__name__)

7
azure/cli/__init__.py Normal file
Просмотреть файл

@ -0,0 +1,7 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import pkg_resources
pkg_resources.declare_namespace(__name__)

Просмотреть файл

@ -0,0 +1,7 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import pkg_resources
pkg_resources.declare_namespace(__name__)

Просмотреть файл

@ -0,0 +1,12 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import azure.cli.command_modules.batch_extensions._help # pylint: disable=unused-import
def load_params(_):
import azure.cli.command_modules.batch_extensions._params #pylint: disable=redefined-outer-name
def load_commands():
import azure.cli.command_modules.batch_extensions.commands #pylint: disable=redefined-outer-name

Просмотреть файл

@ -0,0 +1,26 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.mgmt.batch import BatchManagementClient
import azure.batch.batch_service_client as batch
import azure.batch.batch_auth as batchauth
from azure.cli.core.commands.client_factory import get_mgmt_service_client
def account_mgmt_client_factory(kwargs):
return batch_client_factory(**kwargs).batch_account
def batch_client_factory(**_):
return get_mgmt_service_client(BatchManagementClient)
def batch_data_service_factory(kwargs):
account_name = kwargs.pop('account_name', None)
account_key = kwargs.pop('account_key', None)
account_endpoint = kwargs.pop('account_endpoint', None)
credentials = batchauth.SharedKeyCredentials(account_name, account_key)
return batch.BatchServiceClient(credentials, base_url=account_endpoint)

Просмотреть файл

@ -0,0 +1,312 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import re
import glob
import hashlib
import datetime
import copy
from six.moves.urllib.parse import urlsplit # pylint: disable=import-error
from azure.mgmt.storage import StorageManagementClient
from azure.storage import CloudStorageAccount
from azure.storage.blob import BlobPermissions, BlockBlobService
from azure.cli.core.commands.client_factory import get_mgmt_service_client
import azure.cli.core.azlogging as azlogging
logger = azlogging.get_az_logger(__name__)
def construct_sas_url(blob, uri):
"""Make up blob URL with container URL"""
newuri = copy.copy(uri)
newuri.pathname = '{}/{}'.format(uri.path, blob.name)
return newuri.geturl()
def convert_blobs_to_resource_files(blobs, resource_properties):
"""Convert a list of blobs to a list of ResourceFiles"""
resource_files = []
if not blobs:
raise ValueError('No input data found with reference {}'.
format(resource_properties['source']['prefix']))
prefix = resource_properties.get('source', {}).get('prefix')
if len(blobs) == 1 and blobs[0]['filePath'] == prefix:
# Single file reference: filePath should be treated as file path
file_path = resource_properties.get('filePath', blobs[0]['filePath'])
resource_files.append({
'blobSource': blobs[0]['url'],
'filePath': file_path
})
else:
# Multiple file reference: filePath should be treated as a directory
base_file_path = ''
if 'filePath' in resource_properties:
base_file_path = '{}/'.format(
FileUtils.STRIP_PATH.sub('', resource_properties['filePath']))
for blob in blobs:
file_path = '{}{}'.format(base_file_path, blob['filePath'])
resource_files.append({
'blobSource': blob['url'],
'filePath': file_path
})
# Add filemode to every resourceFile
if 'fileMode' in resource_properties:
for f in resource_files:
f['fileMode'] = resource_properties['fileMode']
return resource_files
def resolve_file_paths(local_path):
"""Generate list of files to upload and the relative directory"""
local_path = FileUtils.STRIP_PATH.sub("", local_path)
files = []
if local_path.find('*') > -1:
# Supplied path is a pattern - relative directory will be the
# path up to the first wildcard
ref_dir = local_path.split('*')[0]
files = [f for f in glob.glob(local_path, recursive=True) if os.path.isfile(f)]
local_path = FileUtils.STRIP_PATH.sub("", ref_dir)
if not os.path.isdir(local_path):
local_path = os.path.dirname(local_path)
else:
if os.path.isdir(local_path):
# Supplied path is a directory
files = [os.path.join(local_path, f) for f in os.listdir(local_path)
if os.path.isfile(os.path.join(local_path, f))]
elif os.path.isfile(local_path):
# Supplied path is a file
files.append(local_path)
local_path = os.path.dirname(local_path)
return local_path, files
def generate_container_name(file_group):
"""Generate valid container name from file group name."""
file_group = file_group.lower()
# Check for any chars that aren't 'a-z', '0-9' or '-'
valid_chars = r'^[a-z0-9][-a-z0-9]*$'
# Replace any underscores or double-hyphens with single hyphen
underscores_and_hyphens = r'[_-]+'
clean_group = re.sub(underscores_and_hyphens, '-', file_group)
clean_group = clean_group.rstrip('-')
if not re.match(valid_chars, clean_group):
raise ValueError('File group name \'{}\' contains illegal characters. '
'File group names only support alphanumeric characters, '
'underscores and hyphens.'.format(file_group))
if clean_group == file_group and len(file_group) <= FileUtils.MAX_GROUP_LENGTH:
# If specified group name is clean, no need to add hash
return file_group
else:
# If we had to transform the group name, add hash of original name
hash_str = hashlib.sha1(file_group.encode()).hexdigest()
new_group = '{}-{}'.format(clean_group, hash_str)
if len(new_group) > FileUtils.MAX_GROUP_LENGTH:
return '{}-{}'.format(clean_group[0:15], hash_str)
return new_group
def get_container_name(file_group):
"""Get valid container name from file group name with prefix."""
return '{}{}'.format(FileUtils.GROUP_PREFIX, generate_container_name(file_group))
def upload_blob(source, destination, file_name, # pylint: disable=too-many-arguments
blob_service, remote_path=None, flatten=None):
"""Upload the specified file to the specified container"""
if not os.path.isfile(source):
raise ValueError('Failed to locate file {}'.format(source))
statinfo = os.stat(source)
if statinfo.st_size > 50000 * 4 * 1024 * 1024:
raise ValueError('The local file size {} exceeds the Azure blob size limit'.
format(statinfo.st_size))
if flatten:
# Flatten local directory structure
file_name = os.path.basename(file_name)
# Create upload container with sanitized file group name
container_name = get_container_name(destination)
blob_service.create_container(container_name)
blob_name = file_name
if remote_path:
# Add any specified virtual directories
blob_prefix = FileUtils.STRIP_PATH.sub('', remote_path)
blob_name = '{}/{}'.format(blob_prefix, FileUtils.STRIP_PATH.sub('', file_name))
blob_name = blob_name.replace('\\', '/')
# We store the lastmodified timestamp in order to prevent overwriting with
# out-dated or duplicate data. TODO: Investigate cleaner options for handling this.
file_time = str(os.path.getmtime(source))
metadata = None
try:
metadata = blob_service.get_blob_metadata(container_name, blob_name)
except Exception: # pylint: disable=broad-except
# check notfound
pass
else:
if metadata and metadata['lastmodified']:
if metadata['lastmodified'] == file_time:
logger.warning('File \'%s\' already exists and up-to-date - skipping', blob_name)
return
logger.warning('Uploading %s to blob %s in container %s', source, blob_name, container_name)
# Upload block blob
# TODO: Investigate compression + chunking performance enhancement proposal.
blob_service.create_blob_from_path(
container_name=container_name,
blob_name=blob_name,
file_path=source,
progress_callback=lambda c, t: None,
metadata={'lastmodified': file_time},
# We want to validate the file as we upload, and only complete the operation
#if all the data transfers successfully
validate_content=True,
max_connections=FileUtils.PARALLEL_OPERATION_THREAD_COUNT)
class FileUtils(object):
STRIP_PATH = re.compile(r"^[/\\]+|[/\\]+$")
GROUP_PREFIX = 'fgrp-'
MAX_GROUP_LENGTH = 63 - len(GROUP_PREFIX)
MAX_FILE_SIZE = 50000 * 4 * 1024 * 1024
PARALLEL_OPERATION_THREAD_COUNT = 5
SAS_PERMISSIONS = BlobPermissions.READ # Read permission
SAS_EXPIRY_DAYS = 7 # 7 days
ROUND_DATE = 2 * 60 * 1000 # Round to nearest 2 minutes
def __init__(self, client, resource_group_name, account_name):
self.resource_file_cache = {}
self.resource_group_name = resource_group_name
self.account_name = account_name
self.resolved_storage_client = None
self.mgmt_client = client
def filter_resource_cache(self, container, prefix):
"""Return all blob refeferences in a container cache that meet a prefix requirement."""
filtered = []
for blob in self.resource_file_cache[container]:
if not prefix:
filtered.append(blob)
elif blob['filePath'].startswith(prefix):
filtered.append(blob)
return filtered
def list_container_contents(self, source, container, blob_service):
"""List blob references in container."""
if not self.resource_file_cache[container]:
self.resource_file_cache[container] = []
blobs = blob_service.list_blobs(container)
for blob in blobs:
blob_sas = self.generate_sas_token(blob, container, blob_service) \
if 'fileGroup' in source else \
construct_sas_url(blob, urlsplit(source['containerUrl']))
file_name = os.path.basename(blob.name)
file_name_only = os.path.splitext(file_name)[0]
self.resource_file_cache[container].append(
{'url' : blob_sas,
'filePath' : blob.name,
'fileName' : file_name,
'fileNameWithoutExtension' : file_name_only})
return self.filter_resource_cache(container, source.get('prefix'))
def generate_sas_token(self, blob, container, blob_service):
"""Generate a blob URL with SAS token."""
sas_token = blob_service.generate_blob_shared_access_signature(
container, blob,
permission=self.SAS_PERMISSIONS,
start=datetime.datetime.utcnow(),
expiry=datetime.datetime.utcnow() + datetime.timedelta(days=FileUtils.SAS_EXPIRY_DAYS))
return blob_service.make_blob_url(container, blob, sas_token=sas_token)
def get_container_list(self, source):
"""List blob references in container."""
storage_client = None
container = None
if 'fileGroup' in source:
# Input data stored in auto-storage
storage_client = self.resolve_storage_account()
container = get_container_name(source['fileGroup'])
elif 'containerUrl' in source:
uri = urlsplit.urlsplit(source['containerUrl'])
if not uri.query:
raise ValueError('Invalid container url.')
storage_account_name = uri.netloc.split('.')[0]
sas_token = uri.query
storage_client = BlockBlobService(account_name=storage_account_name,
sas_token=sas_token)
container = uri.pathname.split('/')[1]
else:
raise ValueError('Unknown source.')
return self.list_container_contents(source, container, storage_client)
def resolve_resource_file(self, resource_file):
"""Convert new resourceFile reference to server-supported reference"""
if 'blobSource' in resource_file:
# Support original resourceFile reference
if not 'filePath' in resource_file:
raise ValueError('Malformed ResourceFile: \'blobSource\' must '
'also have \'filePath\' attribute')
return dict(resource_file)
if not 'source' in resource_file:
raise ValueError('Malformed ResourceFile: Must have either '
' \'source\' or \'blobSource\'')
storage_client = self.resolve_storage_account()
container = None
blobs = []
if 'fileGroup' in resource_file['source']:
# Input data stored in auto-storage
container = get_container_name(resource_file['source']['fileGroup'])
blobs = self.list_container_contents(resource_file['source'], container, storage_client)
return convert_blobs_to_resource_files(blobs, resource_file)
elif 'containerUrl' in resource_file['source']:
# Input data storage in arbitrary container
uri = urlsplit.urlsplit(resource_file['source']['containerUrl'])
container = uri.pathname.split('/')[1]
blobs = self.list_container_contents(resource_file['source'], container, storage_client)
return convert_blobs_to_resource_files(blobs, resource_file)
elif 'url' in resource_file['source']:
# TODO: Input data from an arbitrary HTTP GET source
raise ValueError('Not implemented')
else:
raise ValueError('Malformed ResourceFile')
def resolve_storage_account(self):
"""Resolve Auto-Storage account from supplied Batch Account"""
account = self.mgmt_client.get(self.resource_group_name, self.account_name)
if not account.auto_storage:
raise ValueError('No linked auto-storage for account {}'.format(self.account_name))
storage_account_info = account.auto_storage.storage_account_id.split('/')
storage_resource_group = storage_account_info[4]
storage_account = storage_account_info[8]
storage_client = get_mgmt_service_client(StorageManagementClient)
keys = storage_client.storage_accounts.list_keys(storage_resource_group, storage_account)
storage_key = keys.keys[0].value # pylint: disable=no-member
return CloudStorageAccount(storage_account, storage_key).create_block_blob_service()

Просмотреть файл

@ -0,0 +1,48 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.help_files import helps
#pylint: disable=line-too-long
helps['batch'] = """
type: group
short-summary: Commands for working with Azure Batch.
"""
helps['batch job'] = """
type: group
short-summary: Commands to manage your Batch jobs.
"""
helps['batch job create'] = """
type: command
short-summary: Adds a job and associated task(s) to the specified account.
"""
helps['batch pool'] = """
type: group
short-summary: Commands to manage your Batch pools.
"""
helps['batch pool create'] = """
type: command
short-summary: Create a Batch pool.
"""
helps['batch file'] = """
type: group
short-summary: Commands to manage your Batch input files.
"""
helps['batch file upload'] = """
type: command
short-summary: Upload a specified file or directory of files to the specified storage path.
"""
helps['batch file download'] = """
type: command
short-summary: Download a specified file or directory of files to the specified storage path.
"""

Просмотреть файл

@ -0,0 +1,75 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from msrest.exceptions import ValidationError, ClientRequestError
from azure.batch.models import BatchErrorException
from azure.cli.core._util import CLIError
def _handle_batch_exception(action):
try:
return action()
except BatchErrorException as ex:
try:
message = ex.error.message.value
if ex.error.values:
for detail in ex.error.values:
message += "\n{}: {}".format(detail.key, detail.value)
raise CLIError(message)
except AttributeError:
raise CLIError(ex)
except (ValidationError, ClientRequestError) as ex:
raise CLIError(ex)
def deploy_tasks(client, job_id, tasks):
MAX_TASKS_COUNT_IN_BATCH = 100
def add_task():
start = 0
while start < len(tasks):
end = min(start + MAX_TASKS_COUNT_IN_BATCH, len(tasks))
client.task.add_collection(job_id, tasks[start:end])
start = end
_handle_batch_exception(add_task)
def get_task_counts(client, job_id):
task_counts = {
'active' : 0,
'running' : 0,
'completed' : 0
}
def action():
result = client.task.list(job_id, select='id, state')
for task in result:
if task.state in ['active', 'running', 'completed']:
task_counts[task.state] += 1
else:
raise ValueError('Invalid task state')
return task_counts
return _handle_batch_exception(action)
def get_target_pool(client, job):
def action():
pool_result = client.pool.get(job['poolInfo']['poolId'])
return client._serialize(pool_result, 'CloudPool') # pylint: disable=protected-access
if not job.get('poolInfo'):
raise ValueError('Missing required poolInfo.')
pool = None
if 'poolId' in job['poolInfo']:
pool = _handle_batch_exception(action)
elif 'autoPoolSpecification' in job['poolInfo'] \
and job['poolInfo']['autoPoolSpecification'].get('pool'):
pool = job['poolInfo']['autoPoolSpecification']['pool']
else:
raise ValueError('Missing required poolId or autoPoolSpecification.pool.')
return pool

Просмотреть файл

@ -0,0 +1,55 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from argcomplete.completers import FilesCompleter
from azure.cli.core.commands import \
(register_cli_argument, CliArgumentType, register_extra_cli_argument)
from azure.cli.core.commands.parameters import \
(resource_group_name_type,
get_resource_name_completion_list, file_type)
from azure.cli.command_modules.batch_extensions._validators import \
(application_enabled, validate_client_parameters, metadata_item_format,
certificate_reference_format, validate_json_file, load_node_agent_skus)
# pylint: disable=line-too-long
# ARGUMENT DEFINITIONS
batch_name_type = CliArgumentType(help='Name of the Batch account.', options_list=('--account-name',), completer=get_resource_name_completion_list('Microsoft.Batch/batchAccounts'), id_part=None)
# PARAMETER REGISTRATIONS
register_cli_argument('batch pool create', 'json_file', type=file_type, help='The file containing the pool to create in JSON format, if this parameter is specified, all other parameters are ignored.', validator=validate_json_file, completer=FilesCompleter())
register_cli_argument('batch pool create', 'pool_id', help='The ID of the pool to be updated.')
register_cli_argument('batch pool create', 'application_package_references', nargs='+') # type=application_package_reference_format)
register_cli_argument('batch pool create', 'certificate_references', nargs='+', type=certificate_reference_format)
register_cli_argument('batch pool create', 'metadata', nargs='+', type=metadata_item_format)
register_cli_argument('batch pool create', 'image', completer=load_node_agent_skus, arg_group="Pool: Virtual Machine Configuration",
help="OS image URN in 'publisher:offer:sku[:version]' format. Version is optional and if omitted latest will be used.\n\tValues from 'az batch pool node-agent-skus list'.\n\tExample: 'MicrosoftWindowsServer:WindowsServer:2012-R2-Datacenter:latest'")
register_cli_argument('batch file upload', 'resource_group_name', resource_group_name_type, completer=None, validator=application_enabled)
register_cli_argument('batch file upload', 'account_name', batch_name_type, options_list=('--name', '-n'))
register_cli_argument('batch file upload', 'local_path', type=file_type, help='Path to a local file or directory to be uploaded - can include wildcard patterns.')
register_cli_argument('batch file upload', 'file_group', help='Name of a file group under which the files will be stored.')
register_cli_argument('batch file upload', 'remote_path', help='Group subdirectory under which files will be uploaded.')
register_cli_argument('batch file upload', 'flatten', action='store_true', help='If set, will not retain local directory structure in storage.')
register_cli_argument('batch file download', 'resource_group_name', resource_group_name_type, completer=None, validator=application_enabled)
register_cli_argument('batch file download', 'account_name', batch_name_type, options_list=('--name', '-n'))
register_cli_argument('batch file download', 'local_path', type=file_type, help='Path to a local file or directory to be uploaded - can include wildcard patterns.')
register_cli_argument('batch file download', 'file_group', help='Name of a file group under which the files will be stored.')
register_cli_argument('batch file download', 'remote_path', help='Group subdirectory under which files will be uploaded.')
register_cli_argument('batch file download', 'flatten', action='store_true', help='If set, will not retain local directory structure in storage.')
for item in ['batch pool create', 'batch job create']:
register_extra_cli_argument(item, 'account_name', arg_group='Batch Account',
validator=validate_client_parameters,
help='The Batch account name. Alternatively, set by environment variable: AZURE_BATCH_ACCOUNT')
register_extra_cli_argument(item, 'account_key', arg_group='Batch Account',
help='The Batch account key. Alternatively, set by environment variable: AZURE_BATCH_ACCESS_KEY')
register_extra_cli_argument(item, 'account_endpoint', arg_group='Batch Account',
help='Batch service endpoint. Alternatively, set by environment variable: AZURE_BATCH_ENDPOINT')

Просмотреть файл

@ -0,0 +1,21 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from enum import Enum
class PoolOperatingSystemFlavor(Enum):
WINDOWS = 'windows'
LINUX = 'linux'
def get_pool_target_os_type(pool):
try:
image_publisher = pool['virtualMachineConfiguration']['imageReference']['publisher']
except KeyError:
image_publisher = None
return PoolOperatingSystemFlavor.WINDOWS \
if image_publisher and image_publisher.find('MicrosoftWindowsServer') >= 0 \
else PoolOperatingSystemFlavor.LINUX

Просмотреть файл

@ -0,0 +1,38 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import datetime
AZURE_FILE_VOLUME_TYPE = 'azurefile'
def _get_autostorage_credentials_label():
"""Gets the label of the AutoStorage account to use in the credentials.json file.
:returns: The label of the AutoStorage account to use in the credentials.json file.
"""
return 'autostorage_account'
def _generate_datavolume_label(task_id, datavolume_index):
"""Generates a label for a task data volume.
:returns: A label for a task data volume based on the task id
and the index of the data volume.
"""
return str(task_id) + '_' + str(datavolume_index)
def _generate_temp_config_dir_name():
"""Generates a name for a temporary directory to hold the Batch Shipyard config files.
This function uses the current time to create a directory name. If the user performs
multiple invocations of the script within less than < .001 seconds, there will be a
conflict.
:returns: A temporary directory name based on the current time.
"""
now = datetime.datetime.now()
formatted_datestring = now.isoformat()
# ISO format is YYYY-MM-DDTHH:mm:ss.sssZ
# Replace all ':' chars with '.' chars to get a cleaner dir name.
formatted_datestring = formatted_datestring.replace(':', '.')
return 'BatchShipyardConfigs_' + formatted_datestring

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,263 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import json
try:
from urllib.parse import urlsplit
except ImportError:
from urlparse import urlsplit # pylint: disable=import-error
from msrest.serialization import Deserializer
from msrest.exceptions import DeserializationError
from azure.mgmt.batch import BatchManagementClient
from azure.mgmt.storage import StorageManagementClient
from azure.cli.core._config import az_config
from azure.cli.core.commands.client_factory import get_mgmt_service_client
# COMPLETER
def load_node_agent_skus(prefix, **kwargs): # pylint: disable=unused-argument
from msrest.exceptions import ClientRequestError
from azure.batch.models import BatchErrorException
from azure.cli.command_modules.batch._client_factory import account_client_factory
all_images = []
client_creds = {}
client_creds['account_name'] = az_config.get('batch', 'account', None)
client_creds['account_key'] = az_config.get('batch', 'access_key', None)
client_creds['account_endpoint'] = az_config.get('batch', 'endpoint', None)
try:
client = account_client_factory(client_creds)
skus = client.list_node_agent_skus()
for sku in skus:
for image in sku['verifiedImageReferences']:
all_images.append("{}:{}:{}:{}".format(
image['publisher'],
image['offer'],
image['sku'],
image['version']))
return all_images
except (ClientRequestError, BatchErrorException):
return []
# TYPES VALIDATORS
def datetime_format(value):
"""Validate the correct format of a datetime string and deserialize."""
try:
datetime_obj = Deserializer.deserialize_iso(value)
except DeserializationError:
message = "Argument {} is not a valid ISO-8601 datetime format"
raise ValueError(message.format(value))
else:
return datetime_obj
def duration_format(value):
"""Validate the correct format of a timespan string and deserilize."""
try:
duration_obj = Deserializer.deserialize_duration(value)
except DeserializationError:
message = "Argument {} is not in a valid ISO-8601 duration format"
raise ValueError(message.format(value))
else:
return duration_obj
def metadata_item_format(value):
"""Validate listed metadata arguments"""
try:
data_name, data_value = value.split('=')
except ValueError:
message = ("Incorrectly formatted metadata. "
"Argmuent values should be in the format a=b c=d")
raise ValueError(message)
else:
return {'name': data_name, 'value': data_value}
def environment_setting_format(value):
"""Validate listed enviroment settings arguments"""
try:
env_name, env_value = value.split('=')
except ValueError:
message = ("Incorrectly formatted enviroment settings. "
"Argmuent values should be in the format a=b c=d")
raise ValueError(message)
else:
return {'name': env_name, 'value': env_value}
def application_package_reference_format(value):
"""Validate listed application package reference arguments"""
app_reference = value.split('#', 1)
package = {'application_id': app_reference[0]}
try:
package['version'] = app_reference[1]
except IndexError: # No specified version - ignore
pass
return package
def certificate_reference_format(value):
"""Validate listed certificate reference arguments"""
cert = {'thumbprint': value, 'thumbprint_algorithm': 'sha1'}
return cert
# COMMAND NAMESPACE VALIDATORS
def validate_required_parameter(ns, parser):
"""Validates required parameters in Batch complex objects"""
if not parser.done:
parser.parse(ns)
def storage_account_id(namespace):
"""Validate storage account name"""
if namespace.storage_account_name:
if not namespace.storage_account_id:
storage_client = get_mgmt_service_client(StorageManagementClient)
acc = storage_client.storage_accounts.get_properties(namespace.resource_group_name,
namespace.storage_account_name)
if not acc:
raise ValueError("Batch account '{}' not found in the resource group '{}'.". \
format(namespace.storage_account_name, namespace.resource_group_name))
namespace.storage_account_id = acc.id #pylint: disable=no-member
del namespace.storage_account_name
def application_enabled(namespace):
"""Validates account has auto-storage enabled"""
client = get_mgmt_service_client(BatchManagementClient)
acc = client.batch_account.get(namespace.resource_group_name, namespace.account_name)
if not acc:
raise ValueError("Batch account '{}' not found.".format(namespace.account_name))
if not acc.auto_storage or not acc.auto_storage.storage_account_id: #pylint: disable=no-member
raise ValueError("Batch account '{}' needs auto-storage enabled.".
format(namespace.account_name))
def validate_pool_resize_parameters(namespace):
"""Validate pool resize parameters correct"""
if not namespace.abort:
if not namespace.target_dedicated:
raise ValueError("The target-dedicated parameter is required to resize the pool.")
def validate_json_file(namespace):
"""Validate the give json file existing"""
if namespace.json_file:
try:
with open(namespace.json_file) as file_handle:
json.load(file_handle)
except EnvironmentError:
raise ValueError("Cannot access JSON request file: " + namespace.json_file)
except ValueError as err:
raise ValueError("Invalid JSON file: {}".format(err))
def validate_cert_file(namespace):
"""Validate the give cert file existing"""
try:
with open(namespace.cert_file, "rb"):
pass
except EnvironmentError:
raise ValueError("Cannot access certificate file: " + namespace.cert_file)
def validate_options(namespace):
"""Validate any flattened request header option arguments."""
try:
start = namespace.start_range
end = namespace.end_range
except AttributeError:
return
else:
namespace.ocp_range = None
del namespace.start_range
del namespace.end_range
if start or end:
start = start if start else 0
end = end if end else ""
namespace.ocp_range = "bytes={}-{}".format(start, end)
def validate_file_destination(namespace):
"""Validate the destination path for a file download."""
try:
path = namespace.destination
except AttributeError:
return
else:
# TODO: Need to confirm this logic...
file_path = path
file_dir = os.path.dirname(path)
if os.path.isdir(path):
file_name = os.path.basename(namespace.file_name)
file_path = os.path.join(path, file_name)
elif not os.path.isdir(file_dir):
try:
os.mkdir(file_dir)
except EnvironmentError as exp:
message = "Directory {} does not exist, and cannot be created: {}"
raise ValueError(message.format(file_dir, exp))
if os.path.isfile(file_path):
raise ValueError("File {} already exists.".format(file_path))
namespace.destination = file_path
def validate_client_parameters(namespace):
"""Retrieves Batch connection parameters from environment variables"""
# simply try to retrieve the remaining variables from environment variables
if not namespace.account_name:
namespace.account_name = az_config.get('batch', 'account', None)
if not namespace.account_key:
namespace.account_key = az_config.get('batch', 'access_key', None)
if not namespace.account_endpoint:
namespace.account_endpoint = az_config.get('batch', 'endpoint', None)
# if account name is specified but no key, attempt to query
if namespace.account_name and namespace.account_endpoint and not namespace.account_key:
endpoint = urlsplit(namespace.account_endpoint)
host = endpoint.netloc
client = get_mgmt_service_client(BatchManagementClient)
acc = next((x for x in client.batch_account.list() \
if x.name == namespace.account_name and x.account_endpoint == host), None)
if acc:
from azure.cli.core.commands.arm import parse_resource_id
rg = parse_resource_id(acc.id)['resource_group']
namespace.account_key = \
client.batch_account.get_keys(rg, namespace.account_name).primary #pylint: disable=no-member
else:
raise ValueError("Batch account '{}' not found.".format(namespace.account_name))
else:
if not namespace.account_name:
raise ValueError("Need specifiy batch account in command line or enviroment variable.")
if not namespace.account_endpoint:
raise ValueError("Need specifiy batch endpoint in command line or enviroment variable.")
# CUSTOM REQUEST VALIDATORS
def validate_pool_settings(ns, parser):
"""Custom parsing to enfore that either PaaS or IaaS instances are configured
in the add pool request body.
"""
if not ns.json_file:
groups = ['pool.cloud_service_configuration', 'pool.virtual_machine_configuration']
parser.parse_mutually_exclusive(ns, True, groups)
paas_sizes = ['small', 'medium', 'large', 'extralarge']
if ns.vm_size and ns.vm_size.lower() in paas_sizes and not ns.os_family:
message = ("The selected VM size in incompatible with Virtual Machine Configuration. "
"Please swap for the IaaS equivalent: Standard_A1 (small), Standard_A2 "
"(medium), Standard_A3 (large), or Standard_A4 (extra large).")
raise ValueError(message)

Просмотреть файл

@ -0,0 +1,22 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.commands import cli_command
from azure.cli.command_modules.batch_extensions._client_factory import (
account_mgmt_client_factory, batch_data_service_factory)
#from ._validators import validate_pool_settings
custom_path = 'azure.cli.command_modules.batch_extensions.custom#{}'
# pylint: disable=line-too-long
# NCJ Commands
cli_command(__name__, 'batch file upload', custom_path.format('upload_file'), account_mgmt_client_factory)
cli_command(__name__, 'batch file download', custom_path.format('download_file'), account_mgmt_client_factory)
cli_command(__name__, 'batch pool create', custom_path.format('create_pool'), batch_data_service_factory)
cli_command(__name__, 'batch job create', custom_path.format('create_job'), batch_data_service_factory)

Просмотреть файл

@ -0,0 +1,275 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import json
import os
from azure.batch.models import (
PoolAddParameter, CloudServiceConfiguration, VirtualMachineConfiguration,
ImageReference, PoolInformation, JobAddParameter, JobManagerTask,
JobConstraints, StartTask)
from azure.cli.command_modules.batch_extensions._file_utils import (
FileUtils, resolve_file_paths, upload_blob)
import azure.cli.command_modules.batch_extensions._template_utils as template_utils
import azure.cli.command_modules.batch_extensions._pool_utils as pool_utils
import azure.cli.command_modules.batch_extensions._job_utils as job_utils
import azure.cli.core.azlogging as azlogging
logger = azlogging.get_az_logger(__name__)
# NCJ custom commands
def create_pool(client, template=None, parameters=None, json_file=None, # pylint:disable=too-many-arguments, too-many-locals
id=None, vm_size=None, target_dedicated=None, auto_scale_formula=None, # pylint: disable=redefined-builtin
enable_inter_node_communication=False, os_family=None, image=None,
node_agent_sku_id=None, resize_timeout=None, start_task_command_line=None,
start_task_resource_files=None, start_task_run_elevated=False,
start_task_wait_for_success=False, certificate_references=None,
application_package_references=None, metadata=None):
# pylint: disable=too-many-branches, too-many-statements
if template or json_file:
if template:
logger.warning('You are using an experimental feature {Pool Template}.')
expanded_pool_object = template_utils.expand_template(template, parameters)
if not 'pool' in expanded_pool_object:
raise ValueError('Missing pool element in the template.')
if not 'properties' in expanded_pool_object['pool']:
raise ValueError('Missing pool properties element in the template.')
# bulid up the jsonFile object to hand to the batch service.
json_obj = expanded_pool_object['pool']['properties']
else:
with open(json_file) as f:
json_obj = json.load(f)
# validate the json file
pool = client._deserialize('PoolAddParameter', json_obj) # pylint:disable=protected-access
if pool is None:
raise ValueError("JSON file '{}' is not in correct format.".format(json_file))
# Handle package manangement
if 'packageReferences' in json_obj:
logger.warning('You are using an experimental feature {Package Management}.')
pool_os_flavor = pool_utils.get_pool_target_os_type(json_obj)
cmds = [template_utils.process_pool_package_references(json_obj)]
# Update the start up command
json_obj['startTask'] = template_utils.construct_setup_task(
json_obj.get('startTask'), cmds, pool_os_flavor)
# Handle any special post-processing steps.
# - Resource Files
# - etc
json_obj = template_utils.post_processing(json_obj)
# Batch Shipyard integration
if 'clientExtensions' in json_obj and 'dockerOptions' in json_obj['clientExtensions']:
logger.warning('You are using an experimental feature {Batch Shipyard}.')
# batchShipyardUtils.createPool(json_obj, options, cli)
# return
# We deal all NCJ work with pool, now convert back to original type
pool = client._deserialize('PoolAddParameter', json_obj) # pylint: disable=protected-access
else:
if not id:
raise ValueError('Need either template, json_file, or id')
pool = PoolAddParameter(id, vm_size=vm_size)
if target_dedicated is not None:
pool.target_dedicated = target_dedicated
pool.enable_auto_scale = False
else:
pool.auto_scale_formula = auto_scale_formula
pool.enable_auto_scale = True
pool.enable_inter_node_communication = enable_inter_node_communication
if os_family:
pool.cloud_service_configuration = CloudServiceConfiguration(os_family)
else:
if image:
version = 'latest'
try:
publisher, offer, sku = image.split(':', 2)
except ValueError:
message = ("Incorrect format for VM image URN. Should be in the format: \n"
"'publisher:offer:sku[:version]'")
raise ValueError(message)
try:
sku, version = sku.split(':', 1)
except ValueError:
pass
pool.virtual_machine_configuration = VirtualMachineConfiguration(
ImageReference(publisher, offer, sku, version),
node_agent_sku_id)
if start_task_command_line:
pool.start_task = StartTask(start_task_command_line)
pool.start_task.run_elevated = start_task_run_elevated
pool.start_task.wait_for_success = start_task_wait_for_success
pool.start_task.resource_files = start_task_resource_files
if resize_timeout:
pool.resize_timeout = resize_timeout
if metadata:
pool.metadata = metadata
if certificate_references:
pool.certificate_references = certificate_references
if application_package_references:
pool.application_package_references = application_package_references
# TODO: add _handle_exception
client.pool.add(pool)
return client.pool.get(pool.id)
create_pool.__doc__ = PoolAddParameter.__doc__
def create_job(client, template=None, parameters=None, json_file=None, id=None, #pylint:disable=too-many-arguments, redefined-builtin, too-many-locals
pool_id=None, priority=None, uses_task_dependencies=False, metadata=None,
job_max_wall_clock_time=None, job_max_task_retry_count=None,
job_manager_task_command_line=None, job_manager_task_environment_settings=None,
job_manager_task_id=None, job_manager_task_resource_files=None,
job_manager_task_run_elevated=False):
# pylint: disable=too-many-branches, too-many-statements
if template or json_file:
working_folder = '.'
if template:
logger.warning('You are using an experimental feature {Job Template}.')
expanded_job_object = template_utils.expand_template(template, parameters)
if not 'job' in expanded_job_object:
raise ValueError('Missing job element in the template.')
if not 'properties' in expanded_job_object['expanded_job_object']:
raise ValueError('Missing job properties element in the template.')
# bulid up the jsonFile object to hand to the batch service.
json_obj = expanded_job_object['job']['properties']
working_folder = os.path.dirname(template)
else:
with open(json_file) as f:
json_obj = json.load(f)
# validate the json file
job = client._deserialize('JobAddParameter', json_obj) # pylint: disable=protected-access
if job is None:
raise ValueError("JSON file '{}' is not in correct format.".format(json_file))
working_folder = os.path.dirname(json_file)
if 'applicationTemplateInfo' in json_obj:
logger.warning('You are using an experimental feature {Application Templates}.')
json_obj = template_utils.expand_application_template(json_obj, working_folder)
auto_complete = False
task_collection = []
if 'taskFactory' in json_obj:
logger.warning('You are using an experimental feature {Task Factory}.')
task_collection = template_utils.expand_task_factory(json_obj)
# If job has a task factory and terminate job on all tasks complete is set, the job will
# already be terminated when we add the tasks, so we need to set to noAction, then patch
# the job once the tasks have been submitted.
if 'onAllTasksComplete' in json_obj and json_obj['onAllTasksComplete'] != 'noaction':
auto_complete = json_obj['onAllTasksComplete']
json_obj['onAllTasksComplete'] = 'noaction'
should_get_pool = template_utils.should_get_pool(task_collection)
pool_os_flavor = None
if should_get_pool:
pool = job_utils.get_target_pool(client, json_obj)
pool_os_flavor = pool_utils.get_pool_target_os_type(pool)
# Handle package management on autopool
if 'poolInfo' in json_obj and 'autoPoolSpecification' in json_obj['poolInfo'] \
and 'pool' in json_obj['poolInfo']['autoPoolSpecification'] \
and 'packageReferences' in json_obj['poolInfo']['autoPoolSpecification']['pool']:
logger.warning('You are using an experimental feature {Package Management}.')
pool = json_obj['poolInfo']['autoPoolSpecification']['pool']
cmds = [template_utils.process_pool_package_references(pool)]
pool_os_flavor = pool_utils.get_pool_target_os_type(pool)
pool['startTask'] = template_utils.construct_setup_task(
pool.get('startTask'), cmds, pool_os_flavor)
commands = []
# Handle package management on tasks
commands.append(template_utils.process_task_package_references(
task_collection, pool_os_flavor))
# Handle any special post-processing steps.
# - Application templates
# - Resource Files
# - Output Files
# - etc
json_obj = template_utils.post_processing(json_obj)
if task_collection:
task_collection = template_utils.post_processing(task_collection)
commands.append(template_utils.process_job_for_output_files(
json_obj, task_collection, pool_os_flavor))
json_obj['jobPreparationTask'] = template_utils.construct_setup_task(
json_obj.get('jobPreparationTask'), commands, pool_os_flavor)
# Batch Shipyard integration
if any(t.get('clientExtensions', {}).get('dockerOptions') for t in task_collection):
logger.warning('You are using an experimental feature'
' {Job and task creation with Batch Shipyard}.')
# batchShipyardUtils.createJobAndAddTasks(
# parsedJson, taskCollection, pool, options, cli, _);
# return
# We deal all NCJ work with pool, now convert back to original type
job = client._deserialize('JobAddParameter', json_obj) #pylint:disable=W0212
else:
if not id:
raise ValueError('Need either template, json_file, or id')
pool = PoolInformation(pool_id=pool_id)
job = JobAddParameter(id, pool, priority=priority)
job.uses_task_dependencies = uses_task_dependencies
if job_max_wall_clock_time is not None or job_max_task_retry_count is not None:
constraints = JobConstraints(max_wall_clock_time=job_max_wall_clock_time,
max_task_retry_count=job_max_task_retry_count)
job.constraints = constraints
if metadata:
job.metadata = metadata
if job_manager_task_command_line and job_manager_task_id:
job_manager_task = JobManagerTask(job_manager_task_id,
job_manager_task_command_line,
resource_files=job_manager_task_resource_files,
run_elevated=job_manager_task_run_elevated,
environment_settings=\
job_manager_task_environment_settings)
job.job_manager_task = job_manager_task
client.job.add(job)
if task_collection:
job_utils.deploy_tasks(client, job.id, task_collection)
if auto_complete:
# If the option to terminate the job was set, we need to reapply it with a patch
# now that the tasks have been added.
client.job.patch(job.id, {'onAllTasksComplete': auto_complete})
return client.get(job.id)
create_job.__doc__ = JobAddParameter.__doc__ + "\n" + JobConstraints.__doc__
def upload_file(client, resource_group_name, account_name, # pylint: disable=too-many-arguments
local_path, file_group, remote_path=None, flatten=None):
"""Upload local file or directory of files to storage"""
file_utils = FileUtils(client, resource_group_name, account_name)
blob_client = file_utils.resolve_storage_account()
path, files = resolve_file_paths(local_path)
if len(files) > 0:
for f in files:
file_name = os.path.relpath(f, path)
upload_blob(f, file_group, file_name, blob_client,
remote_path=remote_path, flatten=flatten)
else:
raise ValueError('No files or directories found matching local path {}'.format(local_path))
def download_file(client, resource_group_name, account_name, # pylint: disable=too-many-arguments, unused-argument
local_path, file_group, remote_path=None, flatten=None): # pylint: disable=unused-argument
pass

Просмотреть файл

@ -0,0 +1,4 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------

Просмотреть файл

@ -0,0 +1,41 @@
{
"templateMetadata": {
"description": "A test application template that makes use of multiple parameters after properly declaring them."
},
"jobManagerTask": {
"id":"mytask1",
"commandLine":"myprogram.exe",
"resourceFiles": [ {
"blobSource":"http://mystorage1.blob.core.windows.net/scripts/myprogram.exe?st=2013-08-09T08%3a49%3a37.0000000Z&se=2013-08-10T08%3a49%3a37.0000000Z&sr=c&sp=d&si=YWJjZGTVMZw%3d%3d&sig= %2bSzBm0wi8xECuGkKw97wnkSZ%2f62sxU%2b6Hq6a7qojIVE%3d",
"filePath":"myprogram.exe"
},
{
"blobSource":"http://mystorage1.blob.core.windows.net/scripts/test.txt?st=2013-08-09T08%3a49%3a37.0000000Z&se=2013-08-10T08%3a49%3a37.0000000Z&sr=c&sp=d&si=YWJjZGTVMZw%3d%3d&sig= %2bSzBm0wi8xECuGkKw97wnkSZ%2f62sxU%2b6Hq6a7qojIVE%3d",
"filePath":"[parameters('blobName')]"
} ],
"environmentSettings": [ {
"name":"myvariable",
"value":"myvalue"
} ],
"constraints": {
"maxWallClockTime":"PT1H",
"maxTaskRetryCount":0,
"retentionTime":"PT1H"
},
"killJobOnCompletion":false,
"runElevated":false,
"runExclusive":true
},
"metadata": [ {
"name":"myproperty",
"value":"[parameters('keyValue')]"
} ],
"parameters": {
"blobName" : {
"type": "string"
},
"keyValue" : {
"type": "string"
}
}
}

Просмотреть файл

@ -0,0 +1,10 @@
{
"templateInfo": {
"description": "A test application template that specifies the prohibited property 'applicationTemplate'."
},
"applicationTemplateInfo": {
"filePath" : "sample\\path"
}
}

Просмотреть файл

@ -0,0 +1,8 @@
{
"templateInfo": {
"description": "A test application template that specifies prohibited property 'id'."
},
"id" : "jobid"
}

Просмотреть файл

@ -0,0 +1,10 @@
{
"templateInfo": {
"description": "A test application template that specifies prohibited property 'poolInfo'."
},
"poolInfo": {
"poolId" : "swimming"
}
}

Просмотреть файл

@ -0,0 +1,9 @@
{
"templateInfo": {
"description": "A test application template that specifies the prohibited property 'priority'."
},
"displayName": "Static Application Template",
"priority": 100
}

Просмотреть файл

@ -0,0 +1,33 @@
{
"templateMetadata": {
"description": "A test application template that has no parameters and has exactly the same result every time."
},
"jobManagerTask": {
"id": "jobManager",
"displayName": "jobManagerDisplay",
"commandLine": "cmd /c dir /s",
"resourceFiles": [
{
"blobSource": "https://testacct.blob.core.windows.net/",
"filePath": "filePath"
}
],
"environmentSettings": [
{
"name": "name1",
"value": "value1"
},
{
"name": "name2",
"value": "value2"
}
],
"constraints": {
"maxWallClockTime": "PT1H"
},
"killJobOnCompletion": false,
"runElevated": false
}
}

Просмотреть файл

@ -0,0 +1,9 @@
{
"templateInfo": {
"description": "A test application template that specifies the unsupported properties'fluxCapacitorModel' and 'vehicleMarque'."
},
"fluxCapacitorModel": "DocBrown55",
"vehicleMarque": "deLorean"
}

Просмотреть файл

@ -0,0 +1,41 @@
{
"templateMetadata": {
"description": "A test application template that declares a property with no specified type."
},
"jobManagerTask": {
"id":"mytask1",
"commandLine":"myprogram.exe",
"resourceFiles": [ {
"blobSource":"http://mystorage1.blob.core.windows.net/scripts/myprogram.exe?st=2013-08-09T08%3a49%3a37.0000000Z&se=2013-08-10T08%3a49%3a37.0000000Z&sr=c&sp=d&si=YWJjZGTVMZw%3d%3d&sig= %2bSzBm0wi8xECuGkKw97wnkSZ%2f62sxU%2b6Hq6a7qojIVE%3d",
"filePath":"myprogram.exe"
},
{
"blobSource":"http://mystorage1.blob.core.windows.net/scripts/test.txt?st=2013-08-09T08%3a49%3a37.0000000Z&se=2013-08-10T08%3a49%3a37.0000000Z&sr=c&sp=d&si=YWJjZGTVMZw%3d%3d&sig= %2bSzBm0wi8xECuGkKw97wnkSZ%2f62sxU%2b6Hq6a7qojIVE%3d",
"filePath":"[parameters('blobName')]"
} ],
"environmentSettings": [ {
"name":"myvariable",
"value":"myvalue"
} ],
"constraints": {
"maxWallClockTime":"PT1H",
"maxTaskRetryCount":0,
"retentionTime":"PT1H"
},
"killJobOnCompletion":false,
"runElevated":false,
"runExclusive":true
},
"metadata": [ {
"name":"myproperty",
"value":"[parameters('keyValue')]"
} ],
"parameters": {
"blobName" : {
"defaultValue": "name"
},
"keyValue" : {
"type": "string"
}
}
}

Просмотреть файл

@ -0,0 +1,17 @@
{
"jobId": {
"value": "helloworld"
},
"poolId": {
"value": "xplatTestPool"
},
"outputFileStorageUrl": {
"value": "<blob url with sas>"
},
"taskStart": {
"value": 1
},
"taskEnd": {
"value": 3
}
}

Просмотреть файл

@ -0,0 +1,104 @@
{
"parameters": {
"inputFileGroup": {
"type": "string",
"defaultValue": "convert_data",
"metadata": {
"description": "The auto-storage group where the input data is stored"
}
},
"outputFileStorageUrl": {
"type": "string",
"metadata": {
"description": "The SAS URL for a container where outputs will be stored"
}
},
"inputType": {
"type": "string",
"defaultValue": "wav",
"metadata": {
"description": "The extension of the input data"
}
},
"poolId": {
"type": "string",
"defaultValue": "ffmpeg-pool",
"metadata": {
"description": "The id of Azure Batch pool which runs the job"
}
},
"jobId": {
"type": "string",
"metadata": {
"description": "The id of Azure Batch job"
}
},
"taskStart": {
"type": "int",
"metadata": {
"description": "The sweep start parameter"
}
},
"taskEnd": {
"type": "int",
"metadata": {
"description": "The sweep end parameter"
}
}
},
"job": {
"type": "Microsoft.Batch/batchAccounts/jobs",
"apiVersion": "2016-12-01",
"properties": {
"id": "[parameters('jobId')]",
"constraints": {
"maxWallClockTime": "PT5H",
"maxTaskRetryCount": 1
},
"poolInfo": {
"poolId": "[parameters('poolId')]"
},
"taskFactory": {
"type": "parametricSweep",
"parameterSets": [
{
"start": "[parameters('taskStart')]",
"end": "[parameters('taskEnd')]",
"step": 1
}
],
"repeatTask": {
"commandLine": "ffmpeg -y -i sample{0}.[parameters('inputType')] -acodec libmp3lame output.mp3",
"resourceFiles": [
{
"source": {
"fileGroup": "[parameters('inputFileGroup')]",
"prefix": "sample{0}.[parameters('inputType')]"
}
}
],
"outputFiles": [
{
"filePattern": "output.mp3",
"destination": {
"container": {
"path": "audio{0}.mp3",
"containerSas": "[parameters('outputFileStorageUrl')]"
}
},
"uploadDetails": {
"taskStatus": "TaskSuccess"
}
}
],
"packageReferences": [
{
"type": "aptPackage",
"id": "ffmpeg"
}
]
}
}
}
}
}

Просмотреть файл

@ -0,0 +1,40 @@
{
"parameters": {
"jobId": {
"type": "string",
"defaultValue": "ffmpegpool",
"metadata": {
"description": "The name of Azure Batch pool which runs the job"
}
},
"poolId": {
"type": "string",
"metadata": {
"description": "The name of Azure Batch job"
}
}
},
"job": {
"type": "Microsoft.Batch/batchAccounts/jobs",
"apiVersion": "2016-12-01",
"properties": {
"id": "[parameters('jobId')]",
"poolInfo": {
"poolId": "[parameters('poolId')]"
},
"taskFactory": {
"type": "taskCollection",
"tasks": [
{
"id" : "mytask1",
"commandLine": "cmd /c echo hello1"
},
{
"id" : "mytask2",
"commandLine": "cmd /c echo hello2"
}
]
}
}
}
}

Просмотреть файл

@ -0,0 +1,5 @@
{
"poolName": {
"value": "testpool1"
}
}

Просмотреть файл

@ -0,0 +1,57 @@
{
"parameters": {
"vmSize": {
"type": "string",
"metadata": {
"description": "The size of the virtual machines that runs the application"
},
"defaultValue": "STANDARD_D1",
"allowedValues": [
"STANDARD_A1",
"STANDARD_A2",
"STANDARD_A3",
"STANDARD_A4",
"STANDARD_D1",
"STANDARD_D2",
"STANDARD_D3",
"STANDARD_D4"
]
},
"vmCount": {
"type": "int",
"defaultValue": 3,
"metadata": {
"description": "The number of the virtual machines"
}
},
"poolName": {
"type": "string",
"defaultValue": "ffmpegpool",
"metadata": {
"description": "The name of Azure Batch pool"
}
}
},
"variables": {
"osType": {
"publisher": "Canonical",
"offer": "UbuntuServer",
"sku": "15.10",
"version": "latest"
}
},
"pool": {
"type": "Microsoft.Batch/batchAccounts/pools",
"apiVersion": "2016-12-01",
"properties": {
"id": "[parameters('poolName')]",
"virtualMachineConfiguration": {
"imageReference": "[variables('osType')]",
"nodeAgentSKUId": "batch.node.debian 8"
},
"vmSize": "[parameters('vmSize')]",
"vmCount": "[parameters('vmCount')]",
"enableAutoScale": false
}
}
}

Просмотреть файл

@ -0,0 +1,27 @@
{
"job": {
"type": "Microsoft.Batch/batchAccounts/jobs",
"apiVersion": "2016-12-01",
"properties": {
"id": "ncj-shipyard-test-job01",
"poolInfo": {
"poolId": "ncj-shipyard-test-pool01"
},
"taskFactory": {
"type": "taskCollection",
"tasks": [
{
"id": "task01",
"commandLine": "/opt/run_mnist.sh",
"clientExtensions": {
"dockerOptions": {
"image": "alfpark/caffe:cpu",
"remove_container_after_exit": true
}
}
}
]
}
}
}
}

Просмотреть файл

@ -0,0 +1,24 @@
{
"pool": {
"type": "Microsoft.Batch/batchAccounts/pools",
"apiVersion": "2016-12-01",
"properties": {
"id": "ncj-shipyard-test-pool01",
"virtualMachineConfiguration": {
"imageReference": {
"publisher": "Canonical",
"offer": "UbuntuServer",
"sku": "16.04.0-LTS"
}
},
"vmSize": "STANDARD_D1_V2",
"targetDedicated": 1,
"maxTasksPerNode": 1,
"clientExtensions": {
"dockerOptions": {
"image": "alfpark/caffe:cpu"
}
}
}
}
}

Просмотреть файл

Просмотреть файл

Просмотреть файл

Просмотреть файл

@ -0,0 +1,183 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import json
import os
import unittest
import mock
from azure.cli.command_modules.batch_extensions import _pool_utils
from azure.cli.command_modules.batch_extensions import _file_utils as utils
class TestBatchNCJFiles(unittest.TestCase):
# pylint: disable=attribute-defined-outside-init,no-member
def setUp(self):
self.data_dir = os.path.join(os.path.dirname(__file__), 'data')
self.file_dir = os.path.join(self.data_dir, 'file_tests')
self.win_base = (".\\command_modules\\azure-cli-batch-extensions\\azure\\cli\\"
"command_modules\\batch_extensions\\tests\\data")
self.nix_base = self.win_base.replace('\\', '/')
return super(TestBatchNCJFiles, self).setUp()
def test_batch_ncj_generate_container_from_filegroup(self):
self.assertEqual(utils.get_container_name("data"), 'fgrp-data')
self.assertEqual(utils.get_container_name("Data"), 'fgrp-data')
self.assertEqual(utils.get_container_name("data__test--"),
"fgrp-data-test-6640b0b7acfec6867ab146c9cf185206b5f0bdcb")
self.assertEqual(utils.get_container_name("data-test-really-long-name-with-no-"
"special-characters-o8724578o2476"),
"fgrp-data-test-reall-cc5bdae242ec8cee81a2b85a35a0f538991472c2")
with self.assertRaises(ValueError):
utils.get_container_name("data-#$%")
def test_batch_ncj_resolve_filepaths(self):
if os.name == 'nt':
resolved = utils.resolve_file_paths(self.win_base + "\\file_tests")
self.assertEqual(resolved[0], self.win_base + '\\file_tests')
self.assertEqual(len(resolved[1]), 2)
resolved = utils.resolve_file_paths(self.win_base + "\\file_tests\\")
self.assertEqual(resolved[0], self.win_base + '\\file_tests')
self.assertEqual(len(resolved[1]), 2)
resolved = utils.resolve_file_paths(self.win_base + "\\file_tests\\*")
self.assertEqual(resolved[0], self.win_base + '\\file_tests')
self.assertEqual(len(resolved[1]), 2)
resolved = utils.resolve_file_paths(self.win_base + "\\file_tests\\foo.txt")
self.assertEqual(resolved[0], self.win_base + '\\file_tests')
self.assertEqual(len(resolved[1]), 1)
resolved = utils.resolve_file_paths(self.win_base + "\\file_tests\\*.txt")
self.assertEqual(resolved[0], self.win_base + '\\file_tests')
self.assertEqual(len(resolved[1]), 1)
resolved = utils.resolve_file_paths(self.win_base + "\\file_tests\\f*.txt")
self.assertEqual(resolved[0], self.win_base + '\\file_tests')
self.assertEqual(len(resolved[1]), 1)
resolved = utils.resolve_file_paths(self.win_base + "\\**\\sample_data\\test.txt")
self.assertEqual(resolved[0], self.win_base)
self.assertEqual(len(resolved[1]), 1)
resolved = utils.resolve_file_paths(self.win_base + "\\**\\sample_data\\test*.txt")
self.assertEqual(resolved[0], self.win_base)
self.assertEqual(len(resolved[1]), 1)
resolved = utils.resolve_file_paths(self.win_base + "\\file_tests\\**\\*.txt")
self.assertEqual(resolved[0], self.win_base + '\\file_tests')
self.assertEqual(len(resolved[1]), 2)
resolved = utils.resolve_file_paths(self.nix_base + "/file_tests")
self.assertEqual(resolved[0], self.nix_base + '/file_tests')
self.assertEqual(len(resolved[1]), 2)
resolved = utils.resolve_file_paths(self.nix_base + "/file_tests/")
self.assertEqual(resolved[0], self.nix_base + '/file_tests')
self.assertEqual(len(resolved[1]), 2)
resolved = utils.resolve_file_paths(self.nix_base + "/file_tests/*")
self.assertEqual(resolved[0], self.nix_base + '/file_tests')
self.assertEqual(len(resolved[1]), 2)
resolved = utils.resolve_file_paths(self.nix_base + "/file_tests/foo.txt")
self.assertEqual(resolved[0], self.nix_base + '/file_tests')
self.assertEqual(len(resolved[1]), 1)
resolved = utils.resolve_file_paths(self.nix_base + "/file_tests/*.txt")
self.assertEqual(resolved[0], self.nix_base + '/file_tests')
self.assertEqual(len(resolved[1]), 1)
resolved = utils.resolve_file_paths(self.nix_base + "/file_tests/f*.txt")
self.assertEqual(resolved[0], self.nix_base + '/file_tests')
self.assertEqual(len(resolved[1]), 1)
resolved = utils.resolve_file_paths(self.nix_base + "/**/sample_data/test.txt")
self.assertEqual(resolved[0], self.nix_base)
self.assertEqual(len(resolved[1]), 1)
resolved = utils.resolve_file_paths(self.nix_base + "/**/sample_data/test*.txt")
self.assertEqual(resolved[0], self.nix_base)
self.assertEqual(len(resolved[1]), 1)
resolved = utils.resolve_file_paths(self.nix_base + "/file_tests/**/*.txt")
self.assertEqual(resolved[0], self.nix_base + '/file_tests')
self.assertEqual(len(resolved[1]), 2)
def test_batch_ncj_transform_resourcefiles_from_filegroup(self):
container = 'fgrp-data'
resource = {
'source': {'fileGroup': 'data'}
}
blobs = [
{'filePath': 'data1.txt', 'url': 'https://blob.fgrp-data/data1.txt'},
{'filePath': 'data2.txt', 'url': 'https://blob.fgrp-data/data2.txt'}
]
resources = utils.convert_blobs_to_resource_files(blobs, resource)
self.assertEqual(len(resources), 2)
self.assertEqual(resources[0]['blobSource'], "https://blob.fgrp-data/data1.txt")
self.assertEqual(resources[0]['filePath'], "data1.txt")
self.assertEqual(resources[1]['blobSource'], "https://blob.fgrp-data/data2.txt")
self.assertEqual(resources[1]['filePath'], "data2.txt")
resource = {
'source': {'fileGroup': 'data', 'prefix': 'data1.txt'},
'filePath': 'localFile'
}
blobs = [
{'filePath': 'data1.txt', 'url': 'https://blob.fgrp-data/data1.txt'}
]
resources = utils.convert_blobs_to_resource_files(blobs, resource)
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['blobSource'], "https://blob.fgrp-data/data1.txt")
self.assertEqual(resources[0]['filePath'], "localFile")
resource = {
'source': {'fileGroup': 'data', 'prefix': 'data1'},
'filePath': 'localFile'
}
blobs = [
{'filePath': 'data1.txt', 'url': 'https://blob.fgrp-data/data1.txt'}
]
resources = utils.convert_blobs_to_resource_files(blobs, resource)
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['blobSource'], "https://blob.fgrp-data/data1.txt")
self.assertEqual(resources[0]['filePath'], "localFile/data1.txt")
resource = {
'source': {'fileGroup': 'data', 'prefix': 'subdir/data'},
'filePath': 'localFile'
}
blobs = [
{'filePath': 'subdir/data1.txt',
'url': 'https://blob.fgrp-data/subdir/data1.txt'},
{'filePath': 'subdir/data2.txt',
'url': 'https://blob.fgrp-data/subdir/data2.txt'}
]
resources = utils.convert_blobs_to_resource_files(blobs, resource)
self.assertEqual(len(resources), 2)
self.assertEqual(resources[0]['blobSource'],
"https://blob.fgrp-data/subdir/data1.txt")
self.assertEqual(resources[0]['filePath'], "localFile/subdir/data1.txt")
self.assertEqual(resources[1]['blobSource'],
"https://blob.fgrp-data/subdir/data2.txt")
self.assertEqual(resources[1]['filePath'], "localFile/subdir/data2.txt")
resource = {
'source': {'fileGroup': 'data', 'prefix': 'subdir/data'},
'filePath': 'localFile/'
}
blobs = [
{'filePath': 'subdir/data1.txt', 'url':
'https://blob.fgrp-data/subdir/data1.txt'}
]
resources = utils.convert_blobs_to_resource_files(blobs, resource)
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['blobSource'],
"https://blob.fgrp-data/subdir/data1.txt")
self.assertEqual(resources[0]['filePath'], "localFile/subdir/data1.txt")
resource = {
'source': {'fileGroup': 'data', 'prefix': 'subdir/data'},
}
blobs = [
{'filePath': 'subdir/data1.txt',
'url': 'https://blob.fgrp-data/subdir/data1.txt'},
{'filePath': 'subdir/more/data2.txt',
'url': 'https://blob.fgrp-data/subdir/more/data2.txt'}
]
resources = utils.convert_blobs_to_resource_files(blobs, resource)
self.assertEqual(len(resources), 2)
self.assertEqual(resources[0]['blobSource'],
"https://blob.fgrp-data/subdir/data1.txt")
self.assertEqual(resources[0]['filePath'], "subdir/data1.txt")
self.assertEqual(resources[1]['blobSource'],
"https://blob.fgrp-data/subdir/more/data2.txt")
self.assertEqual(resources[1]['filePath'], "subdir/more/data2.txt")

Разница между файлами не показана из-за своего большого размера Загрузить разницу

2
setup.cfg Normal file
Просмотреть файл

@ -0,0 +1,2 @@
[bdist_wheel]
universal=1

60
setup.py Normal file
Просмотреть файл

@ -0,0 +1,60 @@
#!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from codecs import open
from setuptools import setup
VERSION = '0.1.1b1+dev'
# The full list of classifiers is available at
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
]
DEPENDENCIES = [
'azure-batch==1.1.0',
'azure-mgmt-batch==2.0.0',
'azure-cli-core'
]
with open('README.rst', 'r', encoding='utf-8') as f:
README = f.read()
with open('HISTORY.rst', 'r', encoding='utf-8') as f:
HISTORY = f.read()
setup(
name='azure-cli-batch-extensions',
version=VERSION,
description='Microsoft Azure Command-Line Tools Extended Batch Command Module',
long_description=README + '\n\n' + HISTORY,
license='MIT',
author='Microsoft Corporation',
author_email='azpycli@microsoft.com',
url='https://github.com/Azure/azure-cli',
classifiers=CLASSIFIERS,
namespace_packages=[
'azure',
'azure.cli',
'azure.cli.command_modules'
],
packages=[
'azure.cli.command_modules.batch_extensions'
],
install_requires=DEPENDENCIES,
)