Merge of vray branch, minus support for VRay license selector on pools

This commit is contained in:
David Kydd 2017-09-08 14:32:20 +12:00
Родитель 9c4c0b9f45 ba1702fc05
Коммит 7c806c093b
66 изменённых файлов: 672 добавлений и 4730 удалений

Просмотреть файл

@ -36,48 +36,6 @@
<Compile Include="azure_batch_maya\scripts\assets.py">
<SubType>Code</SubType>
</Compile>
<Compile Include="azure_batch_maya\scripts\batch_extensions\batch_auth.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\batch_extensions_client.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\models\application_template.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\models\application_template_info.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\models\application_template_parameter.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\models\apt_package_reference.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\models\auto_pool_specification.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\models\output_file.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\models\output_file_auto_storage_destination.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\models\chocolatey_package_reference.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\models\constants.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\models\extended_job_parameter.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\models\extended_pool_parameter.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\models\extended_pool_specification.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\models\extended_resource_file.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\models\extended_task_parameter.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\models\file_collection_task_factory.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\models\file_source.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\models\job_manager_task.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\models\job_preparation_task.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\models\job_release_task.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\models\merge_task.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\models\extended_output_file_destination.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\models\package_reference_base.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\models\parameter_set.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\models\parametric_sweep_task_factory.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\models\repeat_task.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\models\start_task.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\models\task_collection_task_factory.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\models\task_factory_base.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\models\yum_package_reference.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\models\__init__.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\operations\file_operations.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\operations\job_operations.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\operations\pool_operations.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\operations\__init__.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\version.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\_file_utils.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\_job_utils.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\_pool_utils.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\_template_utils.py" />
<Compile Include="azure_batch_maya\scripts\batch_extensions\__init__.py" />
<Compile Include="azure_batch_maya\scripts\config.py">
<SubType>Code</SubType>
</Compile>
@ -155,9 +113,6 @@
<Folder Include="azure_batch_maya\" />
<Folder Include="azure_batch_maya\scripts\" />
<Folder Include="azure_batch_maya\plug-in\" />
<Folder Include="azure_batch_maya\scripts\batch_extensions\" />
<Folder Include="azure_batch_maya\scripts\batch_extensions\models\" />
<Folder Include="azure_batch_maya\scripts\batch_extensions\operations\" />
<Folder Include="azure_batch_maya\scripts\tools\" />
<Folder Include="azure_batch_maya\scripts\ui\" />
<Folder Include="azure_batch_maya\templates\" />

Просмотреть файл

@ -1,3 +1,9 @@
2017-08-?? v0.11.0
------------------
- Support for V-ray for Maya
- Dependency on new Batch Extensions module
- Fixed asset file limitation
2017-07-05 v0.10.0
------------------
- Exposed low priority VM allocation for pools

Просмотреть файл

@ -0,0 +1,130 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from maya import mel, cmds
import maya.OpenMaya as om
import maya.OpenMayaMPx as omp
import os
import sys
import gzip
import glob
import tempfile
from default import AzureBatchRenderJob, AzureBatchRenderAssets
class VrayRenderJob(AzureBatchRenderJob):
render_engine = "vray"
def __init__(self):
self._renderer = "vray"
self.label = "V-Ray"
def settings(self):
if self.scene_name == '':
job_name = "Untitled"
else:
job_name = str(os.path.splitext(os.path.basename(self.scene_name))[0])
file_prefix = cmds.getAttr("defaultRenderGlobals.imageFilePrefix")
if file_prefix:
file_prefix = os.path.split(file_prefix)[1]
else:
file_prefix = "<Scene>"
self.job_name = self.display_string("Job Name: ", job_name)
self.output_name = self.display_string("Output Prefix: ", file_prefix)
self.start = self.display_int("Start frame: ", self.start_frame, edit=True)
self.end = self.display_int("End frame: ", self.end_frame, edit=True)
self.step = self.display_int("Frame step: ", self.frame_step, edit=True)
def get_title(self):
return str(cmds.textField(self.job_name, query=True, text=True))
def render_enabled(self):
return True
def get_jobdata(self):
if self.scene_name == '':
raise ValueError("Current Maya scene has not been saved to disk.")
pending_changes = cmds.file(query=True, modified=True)
if not pending_changes:
return self.scene_name, [self.scene_name]
options = {
'save': "Save and continue",
'nosave': "Continue without saving",
'cancel': "Cancel"
}
answer = cmds.confirmDialog(title="Unsaved Changes",
message="There are unsaved changes. Continue?",
button=options.values(),
defaultButton=options['save'],
cancelButton=options['cancel'],
dismissString=options['cancel'])
if answer == options['cancel']:
raise Exception("Submission cancelled")
if answer == options['save']:
cmds.SaveScene()
return self.scene_name, [self.scene_name]
def get_params(self):
params = {}
params['frameStart'] = cmds.intField(self.start, query=True, value=True)
params['frameEnd'] = cmds.intField(self.end, query=True, value=True)
params['frameStep'] = cmds.intField(self.step, query=True, value=True)
params['renderer'] = self._renderer
return params
class VrayRenderAssets(AzureBatchRenderAssets):
assets = []
render_engine = "vray"
file_nodes = {
"VRayScannedMtl": ["file"],
"VRayFastSSS2": ["prepassFileName"],
"VRayMeshMaterial": ["fileName", "overrideFileName"],
"VRayMtlGLSL": ["fileName"],
"VRayMtlOSL": ["fileName"],
"VRaySimbiont": ["file"],
"VRayVRmatMtl": ["fileName"],
"vraySettings": ["pmap_file",
"pmap_file2",
"causticsFile",
"causticsFile2",
"imap_fileName",
"imap_fileName2",
"lc_fileName",
"opt_fileName",
"shr_file_name"]
}
def check_path(self, path):
if '#' in path:
return path.replace('#', '[0-9]')
elif '<udim>' in path:
return path.replace('<udim>', '[0-9][0-9][0-9][0-9]')
elif '<tile>' in path:
return path.replace('<tile>', '_u*_v*')
else:
return path
def renderer_assets(self):
self.assets = []
collected = []
for node_type, attributes in self.file_nodes.items():
nodes = cmds.ls(type=node_type)
for node in nodes:
for attr in attributes:
collected.append(cmds.getAttr(node + "." + attr))
for path in collected:
self.assets.append(self.check_path(path))
return self.assets

Просмотреть файл

@ -17,6 +17,7 @@ import glob
import webbrowser
import subprocess
from distutils.version import StrictVersion
from distutils import dir_util
from maya import mel
from maya import cmds
@ -31,17 +32,20 @@ INSTALL_DIR = os.path.normpath(
os.path.join(cmds.internalVar(userScriptDir=True), 'azure-batch-libs'))
sys.path.append(INSTALL_DIR)
REQUIREMENTS = [
"pathlib==1.0.1",
]
REQUIREMENTS = {
"pathlib==1.0.1": "pathlib",
"msrestazure==0.4.11": "msrestazure",
"azure-common==1.1.8": "azure.common",
}
NAMESPACE_PACKAGES = [
"azure-mgmt-batch==4.0.0",
"azure-mgmt-storage==1.0.0",
"azure-common==1.1.5",
"azure-batch==3.0.0",
"azure-storage==0.32.0",
]
NAMESPACE_PACKAGES = {
"azure-mgmt-batch==4.0.0": "azure.mgmt.batch",
"azure-mgmt-storage==1.0.0": "azure.mgmt.storage",
"azure-batch==3.0.0": "azure.batch",
"azure-storage==0.32.0": "azure.storage",
"azure-batch-extensions==0.2.0": "azure.batch_extensions",
"futures==3.1.1": "concurrent.futures"
}
VERSION = "0.10.0"
EULA_PREF = "AzureBatch_EULA"
@ -327,20 +331,21 @@ def remove_ui(clientData):
print("Failed to load", (str(e)))
def dependency_installed(package):
def dependency_installed(package, namespace):
"""Check if the specified package is installed and up-to-date.
:param str package: A pip-formatted package reference.
"""
try:
package_ref = package.split('==')
module = importlib.import_module(package_ref[0].replace('-', '.'))
module = importlib.import_module(namespace)
if hasattr(module, '__version__') and len(package_ref) > 1:
if StrictVersion(package_ref[1]) > StrictVersion(getattr(module, '__version__')):
raise ImportError("Installed package out of date")
except ImportError:
print("Unable to load {}".format(package))
except ImportError as error:
print("Unable to load {}: {}".format(package, error))
return False
else:
print("Successfully loaded {} from path: {}".format(package, module.__file__))
return True
@ -351,15 +356,19 @@ def install_pkg(package):
TODO: Check if there's a better way to bypass the verification error.
TODO: Check if this works for package upgrades
"""
if not os.path.isdir(INSTALL_DIR):
os.makedirs(INSTALL_DIR)
pip_cmds = ['mayapy', os.path.join(INSTALL_DIR, 'pip'),
'install', package,
'--target', INSTALL_DIR,
'--index-url', 'http://pypi.python.org/simple/',
'--trusted-host', 'pypi.python.org']
print(pip_cmds)
installer = subprocess.Popen(pip_cmds)
installer = subprocess.Popen(pip_cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
installer.wait()
if installer.returncode != 0:
print(installer.stdout.read())
print(installer.stderr.read())
raise RuntimeError("Failed to install package: {}".format(package))
@ -372,23 +381,30 @@ def install_namespace_pkg(package, namespace):
:param str namespace: The package namespace to unpack to.
"""
temp_target = os.path.join(INSTALL_DIR, 'temp-target')
if not os.path.isdir(temp_target):
os.makedirs(temp_target)
pip_cmds = ['mayapy', os.path.join(INSTALL_DIR, 'pip'),
'install', package,
'--no-deps',
'--target', temp_target,
'--index-url', 'http://pypi.python.org/simple/',
'--trusted-host', 'pypi.python.org']
installer = subprocess.Popen(pip_cmds)
print(pip_cmds)
installer = subprocess.Popen(pip_cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
installer.wait()
if installer.returncode == 0:
try:
shutil.copytree(os.path.join(temp_target, namespace), os.path.join(INSTALL_DIR, namespace))
except Exception as e:
print(e)
dir_util.copy_tree(os.path.join(temp_target, namespace), os.path.join(INSTALL_DIR, namespace))
except Exception as exp:
print(exp)
try:
shutil.rmtree(temp_target)
except Exception as e:
print(e)
except Exception as exp:
print(exp)
else:
print(installer.stdout.read())
print(installer.stderr.read())
raise RuntimeError("Failed to install package: {}".format(package))
def initializePlugin(obj):
@ -410,11 +426,13 @@ def initializePlugin(obj):
print("Checking for dependencies...")
missing_libs = []
python_path = os.environ['PYTHONPATH'].lstrip(os.pathsep)
os.environ['PYTHONPATH'] = INSTALL_DIR + os.pathsep + python_path
for package in REQUIREMENTS:
if not dependency_installed(package):
if not dependency_installed(package, REQUIREMENTS[package]):
missing_libs.append(package)
for package in NAMESPACE_PACKAGES:
if not dependency_installed(package):
if not dependency_installed(package, NAMESPACE_PACKAGES[package]):
missing_libs.append(package)
if missing_libs:
message = ("One or more dependencies are missing or out-of-date."
@ -433,11 +451,13 @@ def initializePlugin(obj):
print("Attempting to install dependencies via Pip.")
try:
os.environ['PYTHONPATH'] = INSTALL_DIR + os.pathsep + os.environ['PYTHONPATH']
install_script = os.path.normpath(os.path.join( os.environ['AZUREBATCH_TOOLS'], 'install_pip.py'))
installer = subprocess.Popen(["mayapy", install_script, '--target', INSTALL_DIR])
installer = subprocess.Popen(["mayapy", install_script, '--target', INSTALL_DIR],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
installer.wait()
if installer.returncode != 0:
print(installer.stdout.read())
print(installer.stderr.read())
raise RuntimeError("Failed to install pip")
except BaseException as exp:
print("Failed to install Pip. Please install dependencies manually to continue.")
@ -445,10 +465,12 @@ def initializePlugin(obj):
try:
print("Installing dependencies")
for package in missing_libs:
install_pkg(package)
if package in NAMESPACE_PACKAGES:
package_path = package.split('==')[0].split('-')
package_path = NAMESPACE_PACKAGES[package].split('.')
install_namespace_pkg(package, os.path.join(*package_path))
else:
install_pkg(package)
shutil.copy(os.path.join(INSTALL_DIR, 'azure', '__init__.py'), os.path.join(INSTALL_DIR, 'azure', 'mgmt', '__init__.py'))
except:
error = "Failed to install dependencies - please install manually"
cmds.confirmDialog(message=error, button='OK')

Просмотреть файл

@ -16,6 +16,8 @@ import tempfile
import pathlib
from Queue import Queue
from azure.batch_extensions import _file_utils as fileutils
from api import MayaAPI as maya
from api import MayaCallbacks as callback
@ -256,6 +258,18 @@ class AzureBatchAssets(object):
self._set_searchpaths()
self._assets = Assets(self.batch)
def generate_sas_token(self, file_group):
"""Generate SAS token for file group container with read and list
permissions.
TODO: Move this into BatchExtensions file utils.
"""
container_name = fileutils.get_container_name(file_group)
container_url = fileutils.generate_container_sas_token(
container_name,
self.batch.file.get_storage_client(),
permission='rl')
return container_url
def set_assets(self):
"""Gather the asset references of the scene for display in the
asset tab. Called on loading and refreshing the asset tab.

Просмотреть файл

@ -1,14 +0,0 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from .batch_extensions_client import BatchExtensionsClient
from .version import VERSION
__all__ = ['BatchExtensionsClient']
__version__ = VERSION

Просмотреть файл

@ -1,326 +0,0 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import re
import hashlib
import datetime
import copy
import pathlib
from six.moves.urllib.parse import urlsplit # pylint: disable=import-error
from six.moves.urllib.parse import quote # pylint: disable=import-error
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.storage import StorageManagementClient
from azure.storage import CloudStorageAccount
from azure.storage.blob import BlobPermissions, BlockBlobService
from azure.mgmt.batch import BatchManagementClient
from . import models
def construct_sas_url(blob, uri):
"""Make up blob URL with container URL"""
newuri = copy.copy(uri)
newuri.pathname = '{}/{}'.format(uri.path, quote(blob.name))
return newuri.geturl()
def convert_blobs_to_resource_files(blobs, resource_properties):
"""Convert a list of blobs to a list of ResourceFiles"""
resource_files = []
if not blobs:
raise ValueError('No input data found with reference {}'.
format(resource_properties.source.prefix))
try:
prefix = resource_properties.source.prefix
except AttributeError:
prefix = None
if len(blobs) == 1 and blobs[0]['filePath'] == prefix:
# Single file reference: filePath should be treated as file path
file_path = resource_properties.file_path if resource_properties.file_path \
else blobs[0]['filePath']
resource_files.append(models.ExtendedResourceFile(
blob_source=blobs[0]['url'],
file_path=file_path,
))
else:
# Multiple file reference: filePath should be treated as a directory
base_file_path = ''
if resource_properties.file_path:
base_file_path = '{}/'.format(
FileUtils.STRIP_PATH.sub('', resource_properties.file_path))
for blob in blobs:
file_path = '{}{}'.format(base_file_path, blob['filePath'])
resource_files.append(models.ExtendedResourceFile(
blob_source=blob['url'],
file_path=file_path
))
# Add filemode to every resourceFile
if resource_properties.file_mode:
for f in resource_files:
f.file_mode = resource_properties.file_mode
return resource_files
def resolve_file_paths(local_path):
"""Generate list of files to upload and the relative directory"""
#local_path = FileUtils.STRIP_PATH.sub("", local_path) #TODO
files = []
if local_path.find('*') > -1:
# Supplied path is a pattern - relative directory will be the
# path up to the first wildcard
ref_dir_str = local_path.split('*')[0]
#ref_dir_str = FileUtils.STRIP_PATH.sub("", ref_dir_str) #TODO
if not os.path.isdir(ref_dir_str):
ref_dir_str = os.path.dirname(ref_dir_str)
ref_dir = pathlib.Path(ref_dir_str)
pattern = local_path[len(ref_dir_str + os.pathsep):]
files = [str(f) for f in ref_dir.glob(pattern) if f.is_file()]
local_path = ref_dir_str
else:
if os.path.isdir(local_path):
# Supplied path is a directory
files = [os.path.join(local_path, f) for f in os.listdir(local_path)
if os.path.isfile(os.path.join(local_path, f))]
elif os.path.isfile(local_path):
# Supplied path is a file
files.append(local_path)
local_path = os.path.dirname(local_path)
return local_path, files
def resolve_remote_paths(blob_service, file_group, remote_path):
blobs = blob_service.list_blobs(_get_container_name(file_group), prefix=remote_path)
return list(blobs)
def generate_container_name(file_group):
"""Generate valid container name from file group name."""
file_group = file_group.lower()
# Check for any chars that aren't 'a-z', '0-9' or '-'
valid_chars = r'^[a-z0-9][-a-z0-9]*$'
# Replace any underscores or double-hyphens with single hyphen
underscores_and_hyphens = r'[_-]+'
clean_group = re.sub(underscores_and_hyphens, '-', file_group)
clean_group = clean_group.rstrip('-')
if not re.match(valid_chars, clean_group):
raise ValueError('File group name \'{}\' contains illegal characters. '
'File group names only support alphanumeric characters, '
'underscores and hyphens.'.format(file_group))
if clean_group == file_group and len(file_group) <= FileUtils.MAX_GROUP_LENGTH:
# If specified group name is clean, no need to add hash
return file_group
else:
# If we had to transform the group name, add hash of original name
hash_str = hashlib.sha1(file_group.encode()).hexdigest()
new_group = '{}-{}'.format(clean_group, hash_str)
if len(new_group) > FileUtils.MAX_GROUP_LENGTH:
return '{}-{}'.format(clean_group[0:15], hash_str)
return new_group
def _get_container_name(file_group):
"""Get valid container name from file group name with prefix."""
return '{}{}'.format(FileUtils.GROUP_PREFIX, generate_container_name(file_group))
def _generate_blob_sas_token(blob, container, blob_service, permission=BlobPermissions.READ):
"""Generate a blob URL with SAS token."""
sas_token = blob_service.generate_blob_shared_access_signature(
container, blob.name,
permission=permission,
start=datetime.datetime.utcnow(),
expiry=datetime.datetime.utcnow() + datetime.timedelta(days=FileUtils.SAS_EXPIRY_DAYS))
return blob_service.make_blob_url(container, quote(blob.name), sas_token=sas_token)
def _generate_container_sas_token(container, blob_service, permission=BlobPermissions.WRITE):
"""Generate a container URL with SAS token."""
blob_service.create_container(container)
sas_token = blob_service.generate_container_shared_access_signature(
container,
permission=permission,
start=datetime.datetime.utcnow(),
expiry=datetime.datetime.utcnow() + datetime.timedelta(days=FileUtils.SAS_EXPIRY_DAYS))
url = '{}://{}/{}?{}'.format(
blob_service.protocol,
blob_service.primary_endpoint,
container,
sas_token)
return url
def download_blob(blob, file_group, destination, blob_service, progress_callback):
"""Download the specified file to the specified container"""
blob_service.get_blob_to_path(
_get_container_name(file_group), blob, destination,
progress_callback=progress_callback)
def upload_blob(source, destination, file_name, # pylint: disable=too-many-arguments
blob_service, remote_path=None, flatten=None, progress_callback=None):
"""Upload the specified file to the specified container"""
if not os.path.isfile(source):
raise ValueError('Failed to locate file {}'.format(source))
statinfo = os.stat(source)
if statinfo.st_size > 50000 * 4 * 1024 * 1024:
raise ValueError('The local file size {} exceeds the Azure blob size limit'.
format(statinfo.st_size))
if flatten:
# Flatten local directory structure
file_name = os.path.basename(file_name)
# Create upload container with sanitized file group name
container_name = _get_container_name(destination)
blob_service.create_container(container_name)
blob_name = file_name
if remote_path:
# Add any specified virtual directories
blob_prefix = FileUtils.STRIP_PATH.sub('', remote_path)
blob_name = '{}/{}'.format(blob_prefix, FileUtils.STRIP_PATH.sub('', file_name))
blob_name = blob_name.replace('\\', '/')
# We store the lastmodified timestamp in order to prevent overwriting with
# out-dated or duplicate data. TODO: Investigate cleaner options for handling this.
file_time = str(os.path.getmtime(source))
metadata = None
try:
metadata = blob_service.get_blob_metadata(container_name, blob_name)
except Exception: # pylint: disable=broad-except
# check notfound
pass
else:
#TODO: Check whether the blob metadata is more recent
if metadata and metadata['lastmodified']:
if metadata['lastmodified'] == file_time:
return
# Upload block blob
# TODO: Investigate compression + chunking performance enhancement proposal.
blob_service.create_blob_from_path(
container_name=container_name,
blob_name=blob_name,
file_path=source,
progress_callback=progress_callback,
metadata={'lastmodified': file_time},
# We want to validate the file as we upload, and only complete the operation
# if all the data transfers successfully
validate_content=True,
max_connections=FileUtils.PARALLEL_OPERATION_THREAD_COUNT)
class FileUtils(object):
STRIP_PATH = re.compile(r"^[\/\\]+|[\/\\]+$")
GROUP_PREFIX = 'fgrp-'
MAX_GROUP_LENGTH = 63 - len(GROUP_PREFIX)
MAX_FILE_SIZE = 50000 * 4 * 1024 * 1024
PARALLEL_OPERATION_THREAD_COUNT = 5
SAS_EXPIRY_DAYS = 7 # 7 days
ROUND_DATE = 2 * 60 * 1000 # Round to nearest 2 minutes
def __init__(self, get_storage_client):
self.resource_file_cache = {}
self.container_sas_cache = {}
self.resolve_storage_account = get_storage_client
def filter_resource_cache(self, container, prefix):
"""Return all blob refeferences in a container cache that meet a prefix requirement."""
filtered = []
for blob in self.resource_file_cache[container]:
if not prefix:
filtered.append(blob)
elif blob['filePath'].startswith(prefix):
filtered.append(blob)
return filtered
def list_container_contents(self, source, container, blob_service):
"""List blob references in container."""
if container not in self.resource_file_cache:
self.resource_file_cache[container] = []
blobs = blob_service.list_blobs(container)
for blob in blobs:
if source.file_group:
blob_sas = _generate_blob_sas_token(blob, container, blob_service)
elif source.container_url:
blob_sas = construct_sas_url(blob, urlsplit(source.container_url))
elif source.url:
blob_sas = source.url
else:
raise ValueError("FileSource has no file source.")
file_name = os.path.basename(blob.name)
file_name_only = os.path.splitext(file_name)[0]
self.resource_file_cache[container].append(
{'url': blob_sas,
'filePath': blob.name,
'fileName': file_name,
'fileNameWithoutExtension': file_name_only})
return self.filter_resource_cache(container, source.prefix)
def get_container_sas(self, file_group_name):
storage_client = self.resolve_storage_account()
container = _get_container_name(file_group_name)
try:
return self.container_sas_cache[container]
except KeyError:
self.container_sas_cache[container] = _generate_container_sas_token(container, storage_client)
return self.container_sas_cache[container]
def get_container_list(self, source):
"""List blob references in container."""
if source.file_group:
# Input data stored in auto-storage
storage_client = self.resolve_storage_account()
container = _get_container_name(source.file_group)
elif source.container_url:
uri = urlsplit(source.container_url)
if not uri.query:
raise ValueError('Invalid container url.')
storage_account_name = uri.netloc.split('.')[0]
sas_token = uri.query
storage_client = BlockBlobService(account_name=storage_account_name,
sas_token=sas_token)
container = uri.pathname.split('/')[1]
else:
raise ValueError('Unknown source.')
return self.list_container_contents(source, container, storage_client)
def resolve_resource_file(self, resource_file):
"""Convert new resourceFile reference to server-supported reference"""
if resource_file.blob_source:
# Support original resourceFile reference
if not resource_file.file_path:
raise ValueError('Malformed ResourceFile: \'blobSource\' must '
'also have \'file_path\' attribute')
return [resource_file]
if not hasattr(resource_file, 'source') or not resource_file.source:
raise ValueError('Malformed ResourceFile: Must have either '
' \'source\' or \'blobSource\'')
storage_client = self.resolve_storage_account()
container = None
blobs = []
if resource_file.source.file_group:
# Input data stored in auto-storage
container = _get_container_name(resource_file.source.file_group)
blobs = self.list_container_contents(resource_file.source, container, storage_client)
return convert_blobs_to_resource_files(blobs, resource_file)
elif resource_file.source.container_url:
# Input data storage in arbitrary container
uri = urlsplit(resource_file.source.container_url)
container = uri.pathname.split('/')[1]
blobs = self.list_container_contents(resource_file.source, container, storage_client)
return convert_blobs_to_resource_files(blobs, resource_file)
elif resource_file.source.url:
# TODO: Input data from an arbitrary HTTP GET source
raise ValueError('Not implemented')
else:
raise ValueError('Malformed ResourceFile')

Просмотреть файл

@ -1,99 +0,0 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from msrest.exceptions import ValidationError, ClientRequestError
from azure.batch.models import BatchErrorException
import threading
try:
from queue import Queue
except ImportError:
from Queue import Queue
# pylint: disable=too-few-public-methods
def _handle_batch_exception(action):
try:
return action()
except BatchErrorException as ex:
try:
message = ex.error.message.value
if ex.error.values:
for detail in ex.error.values:
message += "\n{}: {}".format(detail.key, detail.value)
raise Exception(message)
except AttributeError:
raise Exception(ex)
except (ValidationError, ClientRequestError) as ex:
raise Exception(ex)
def _bulk_add_tasks(client, job_id, tasks, queue):
added_tasks = client.add_collection(job_id, tasks)
for task in added_tasks.value:
queue.put(task)
def deploy_tasks(client, job_id, tasks, threads):
MAX_TASKS_COUNT_IN_BATCH = 100
submit_threads = threads or 10
def add_task():
start = 0
progress_queue = Queue()
submitting_tasks = []
submitted_tasks = []
while True:
end = min(start + MAX_TASKS_COUNT_IN_BATCH, len(tasks))
submit = threading.Thread(target=_bulk_add_tasks, args=(client, job_id, tasks[start:end], progress_queue))
submit.start()
submitting_tasks.append(submit)
start = end
if start >= len(tasks) or len(submitting_tasks) >= submit_threads:
while any(s for s in submitting_tasks if s.is_alive()) or not progress_queue.empty():
submitted_tasks.append(progress_queue.get())
progress_queue.task_done()
submitting_tasks = []
if start >= len(tasks):
break
return submitted_tasks
_handle_batch_exception(add_task)
def get_task_counts(client, job_id):
task_counts = {
'active': 0,
'running': 0,
'completed': 0
}
def action():
result = client.task.list(job_id, select='id, state')
for task in result:
if task.state in ['active', 'running', 'completed']:
task_counts[task.state] += 1
else:
raise ValueError('Invalid task state')
return task_counts
return _handle_batch_exception(action)
def get_target_pool(client, job):
def action():
return client.get(job.pool_info.pool_id)
if not job.pool_info:
raise ValueError('Missing required poolInfo.')
pool = None
if job.pool_info.pool_id:
pool = _handle_batch_exception(action)
elif job.pool_info.auto_pool_specification \
and job.pool_info.auto_pool_specification.pool:
pool = job.pool_info.auto_pool_specification.pool
else:
raise ValueError('Missing required poolId or autoPoolSpecification.pool.')
return pool

Просмотреть файл

@ -1,28 +0,0 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from enum import Enum
# pylint: disable=too-few-public-methods
class PoolOperatingSystemFlavor(Enum):
WINDOWS = 'windows'
LINUX = 'linux'
def get_pool_target_os_type(pool):
try:
image_publisher = pool.virtual_machine_configuration.image_reference.publisher
sku_id = pool.virtual_machine_configuration.node_agent_sku_id
except AttributeError:
image_publisher = None
sku_id = None
return PoolOperatingSystemFlavor.WINDOWS \
if not image_publisher \
or (image_publisher and image_publisher.lower().find('windows') >= 0) \
or (sku_id and sku_id.lower().find('windows') >= 0) \
else PoolOperatingSystemFlavor.LINUX

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,128 +0,0 @@
# coding=utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import time
import keyring
import ast
import base64
import hmac
import hashlib
import datetime
import requests
from requests.auth import AuthBase
from msrest.authentication import Authentication
from msrest import Serializer
from msrest.serialization import TZ_UTC
try:
from urlparse import urlparse, parse_qs
except ImportError:
from urllib.parse import urlparse, parse_qs
class SharedKeyAuth(AuthBase):
headers_to_sign = [
'content-encoding',
'content-language',
'content-length',
'content-md5',
'content-type',
'date',
'if-modified-since',
'if-match',
'if-none-match',
'if-unmodified-since',
'range']
def __init__(self, header, account_name, key):
self._header = header
self._account_name = account_name
self._key = key
def __call__(self, request):
if not request.headers.get('ocp-date'):
now = datetime.datetime.utcnow()
now = now.replace(tzinfo=TZ_UTC)
request.headers['ocp-date'] = Serializer.serialize_rfc(now)
url = urlparse(request.url)
uri_path = url.path
uri_path = uri_path.replace('%5C', '/')
uri_path = uri_path.replace('%2F', '/')
# method to sign
string_to_sign = request.method + '\n'
# get headers to sign
request_header_dict = {
key.lower(): val for key, val in request.headers.items() if val}
request_headers = [
str(request_header_dict.get(x, '')) for x in self.headers_to_sign]
string_to_sign += '\n'.join(request_headers) + '\n'
# get ocp- header to sign
ocp_headers = []
for name, value in request.headers.items():
if 'ocp-' in name and value:
ocp_headers.append((name.lower(), value))
for name, value in sorted(ocp_headers):
string_to_sign += "{}:{}\n".format(name, value)
# get account_name and uri path to sign
string_to_sign += "/{}{}".format(self._account_name, uri_path)
# get query string to sign if it is not table service
query_to_sign = parse_qs(url.query)
for name in sorted(query_to_sign.keys()):
value = query_to_sign[name][0]
if value:
string_to_sign += "\n{}:{}".format(name, value)
# sign the request
auth_string = "SharedKey {}:{}".format(
self._account_name, self._sign_string(string_to_sign))
request.headers[self._header] = auth_string
return request
def _sign_string(self, string_to_sign):
_key = self._key.encode('utf-8')
string_to_sign = string_to_sign.encode('utf-8')
try:
key = base64.b64decode(_key)
except TypeError:
raise ValueError("Invalid key value: {}".format(self._key))
signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256)
digest = signed_hmac_sha256.digest()
return base64.b64encode(digest).decode('utf-8')
class SharedKeyCredentials(Authentication):
def __init__(self, account_name, key):
super(SharedKeyCredentials, self).__init__()
self.auth = SharedKeyAuth(self.header, account_name, key)
def signed_session(self):
session = super(SharedKeyCredentials, self).signed_session()
session.auth = self.auth
return session

Просмотреть файл

@ -1,131 +0,0 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from msrest.service_client import ServiceClient
from msrest import Serializer, Deserializer
from msrestazure import AzureConfiguration
from .version import VERSION
from .operations.pool_operations import ExtendedPoolOperations
from .operations.job_operations import ExtendedJobOperations
from .operations.file_operations import ExtendedFileOperations
from azure.batch.operations.application_operations import ApplicationOperations
from azure.batch.operations.account_operations import AccountOperations
from azure.batch.operations.certificate_operations import CertificateOperations
from azure.batch.operations.job_schedule_operations import JobScheduleOperations
from azure.batch.operations.task_operations import TaskOperations
from azure.batch.operations.compute_node_operations import ComputeNodeOperations
from . import models
from azure.batch import BatchServiceClient
from azure.mgmt.batch import BatchManagementClient
from azure.mgmt.storage import StorageManagementClient
from azure.storage import CloudStorageAccount
class BatchExtensionsClient(BatchServiceClient):
"""A client for issuing REST requests to the Azure Batch service.
:ivar config: Configuration for client.
:vartype config: BatchServiceClientConfiguration
:ivar pool: Pool operations
:vartype pool: .operations.PoolOperations
:ivar account: Account operations
:vartype account: .operations.AccountOperations
:ivar job: Job operations
:vartype job: .operations.JobOperations
:ivar file: File operations
:vartype file: .operations.FileOperations
:ivar task: Task operations
:vartype task: .operations.TaskOperations
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param api_version: Client API Version.
:type api_version: str
:param str base_url: Service URL
"""
def __init__(self, credentials, base_url, subscription_id=None, resource_group=None, storage_client=None):
super(BatchExtensionsClient, self).__init__(credentials, base_url=base_url)
self.config.add_user_agent('batchextensionsclient/{}'.format(VERSION))
self._mgmt_client = None
self._resolved_storage_client = storage_client
self._account = credentials.auth._account_name
self._subscription = subscription_id
self._resource_group = resource_group
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.pool = ExtendedPoolOperations(
self, self._client, self.config, self._serialize, self._deserialize, self._storage_account)
self.job = ExtendedJobOperations(
self, self._client, self.config, self._serialize, self._deserialize, self._storage_account)
self.file = ExtendedFileOperations(
self, self._client, self.config, self._serialize, self._deserialize, self._storage_account)
self.application = ApplicationOperations(
self._client, self.config, self._serialize, self._deserialize)
self.account = AccountOperations(
self._client, self.config, self._serialize, self._deserialize)
self.certificate = CertificateOperations(
self._client, self.config, self._serialize, self._deserialize)
self.job_schedule = JobScheduleOperations(
self._client, self.config, self._serialize, self._deserialize)
self.task = TaskOperations(
self._client, self.config, self._serialize, self._deserialize)
self.compute_node = ComputeNodeOperations(
self._client, self.config, self._serialize, self._deserialize)
def _storage_account(self):
"""Resolve Auto-Storage account from supplied Batch Account"""
if self._resolved_storage_client:
return self._resolved_storage_client
if not self._subscription:
raise ValueError("Unable to resolve auto-storage account without subscription ID.")
client = self._mgmt_client if self._mgmt_client else BatchManagementClient(
self.config._creds, self._subscription)
if self._resource_group:
# If a resource group was supplied, we can use that to query the Batch Account
try:
account = client.batch_account.get(self._resource_group, self._account)
except CloudError:
raise ValueError('Couldn\'t find the account named {} in subscription {} '
'with resource group {}'.format(
self._account, self._subscription, self._resource_group))
else:
# Otherwise, we need to parse the URL for a region in order to identify
# the Batch account in the subscription
# Example URL: https://batchaccount.westus.batch.azure.com
region = urlsplit(self.config.base_url).netloc.split('.', 2)[1]
accounts = (x for x in client.batch_account.list()
if x.name == self._account and x.location == region)
try:
account = next(accounts)
except StopIteration:
raise ValueError('Couldn\'t find the account named {} in subscription {} '
'in region {}'.format(
self._account, self._subscription, region))
if not account.auto_storage:
raise ValueError('No linked auto-storage for account {}'.format(self._account))
storage_account_info = account.auto_storage.storage_account_id.split('/') # pylint: disable=no-member
storage_resource_group = storage_account_info[4]
storage_account = storage_account_info[8]
storage_client = StorageManagementClient(self.config._creds, self._subscription)
keys = storage_client.storage_accounts.list_keys(storage_resource_group, storage_account)
storage_key = keys.keys[0].value # pylint: disable=no-member
self._resolved_storage_client = CloudStorageAccount(storage_account, storage_key)\
.create_block_blob_service()
return self._resolved_storage_client

Просмотреть файл

@ -1,49 +0,0 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# Not ideal syntax - but savaes us having to check and repopulate this
# list every time the SDK is regenerated.
from azure.batch.models import *
from azure.batch.models.batch_service_client_enums import *
from .extended_task_parameter import ExtendedTaskParameter
from .extended_job_parameter import ExtendedJobParameter
from .extended_pool_parameter import ExtendedPoolParameter
from .extended_pool_specification import ExtendedPoolSpecification
from .auto_pool_specification import AutoPoolSpecification
from .output_file import OutputFile
from .extended_output_file_destination import ExtendedOutputFileDestination
from .output_file_auto_storage_destination import OutputFileAutoStorageDestination
from .extended_resource_file import ExtendedResourceFile
from .file_source import FileSource
from .task_factory_base import TaskFactoryBase
from .task_collection_task_factory import TaskCollectionTaskFactory
from .parametric_sweep_task_factory import ParametricSweepTaskFactory
from .file_collection_task_factory import FileCollectionTaskFactory
from .parameter_set import ParameterSet
from .repeat_task import RepeatTask
from .package_reference_base import PackageReferenceBase
from .chocolatey_package_reference import ChocolateyPackageReference
from .yum_package_reference import YumPackageReference
from .apt_package_reference import AptPackageReference
from .application_template_info import ApplicationTemplateInfo
from .merge_task import MergeTask
from .job_preparation_task import JobPreparationTask
from .job_release_task import JobReleaseTask
from .job_manager_task import JobManagerTask
from .start_task import StartTask
from .application_template import ApplicationTemplate
from .constants import (
PROPS_RESERVED_FOR_JOBS,
PROPS_PERMITTED_ON_TEMPLATES,
ROOT_FILE_UPLOAD_URL,
FILE_EGRESS_OVERRIDE,
FILE_EGRESS_ENV_NAME,
FILE_EGRESS_PREFIX,
FILE_EGRESS_RESOURCES)

Просмотреть файл

@ -1,111 +0,0 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ApplicationTemplate(Model):
"""An Azure Batch Application Template.
:param job_manager_task: Details of a Job Manager task to be launched when
the job is started. If the job does not specify a Job Manager task, the
user must explicitly add tasks to the job. If the job does specify a Job
Manager task, the Batch service creates the Job Manager task when the job
is created, and will try to schedule the Job Manager task before
scheduling other tasks in the job. The Job Manager task's typical purpose
is to control and/or monitor job execution, for example by deciding what
additional tasks to run, determining when the work is complete, etc.
(However, a Job Manager task is not restricted to these activities - it is
a fully-fledged task in the system and perform whatever actions are
required for the job.) For example, a Job Manager task might download a
file specified as a parameter, analyze the contents of that file and
submit additional tasks based on those contents.
:type job_manager_task: :class:`JobManagerTask
<azure.batch.models.JobManagerTask>`
:param job_preparation_task: The Job Preparation task. If a job has a Job
Preparation task, the Batch service will run the Job Preparation task on a
compute node before starting any tasks of that job on that compute node.
:type job_preparation_task: :class:`JobPreparationTask
<azure.batch.models.JobPreparationTask>`
:param job_release_task: The Job Release task. A Job Release task cannot
be specified without also specifying a Job Preparation task for the job.
The Batch service runs the Job Release task on the compute nodes that have
run the Job Preparation task. The primary purpose of the Job Release task
is to undo changes to compute nodes made by the Job Preparation task.
Example activities include deleting local files, or shutting down services
that were started as part of job preparation.
:type job_release_task: :class:`JobReleaseTask
<azure.batch.models.JobReleaseTask>`
:param common_environment_settings: The list of common environment
variable settings. These environment variables are set for all tasks in
the job (including the Job Manager, Job Preparation and Job Release
tasks).
:type common_environment_settings: list of :class:`EnvironmentSetting
<azure.batch.models.EnvironmentSetting>`
:param on_all_tasks_complete: The action the Batch service should take
when all tasks in the job are in the completed state. Note that if a job
contains no tasks, then all tasks are considered complete. This option is
therefore most commonly used with a Job Manager task; if you want to use
automatic job termination without a Job Manager, you should initially set
onAllTasksComplete to noAction and update the job properties to set
onAllTasksComplete to terminateJob once you have finished adding tasks.
Permitted values are: noAction - do nothing. The job remains active unless
terminated or disabled by some other means. terminateJob - terminate the
job. The job's terminateReason is set to 'AllTasksComplete'. The default
is noAction. Possible values include: 'noAction', 'terminateJob'
:type on_all_tasks_complete: str or :class:`OnAllTasksComplete
<azure.batch.models.OnAllTasksComplete>`
:param on_task_failure: The action the Batch service should take when any
task in the job fails. A task is considered to have failed if it completes
with a non-zero exit code and has exhausted its retry count, or if it had
a scheduling error. noAction - do nothing. performExitOptionsJobAction -
take the action associated with the task exit condition in the task's
exitConditions collection. (This may still result in no action being
taken, if that is what the task specifies.) The default is noAction.
Possible values include: 'noAction', 'performExitOptionsJobAction'
:type on_task_failure: str or :class:`OnTaskFailure
<azure.batch.models.OnTaskFailure>`
:param metadata: A list of name-value pairs associated with the job as
metadata. The Batch service does not assign any meaning to metadata; it is
solely for the use of user code.
:type metadata: list of :class:`MetadataItem
<azure.batch.models.MetadataItem>`
:param uses_task_dependencies: The flag that determines if this job will
use tasks with dependencies.
:type uses_task_dependencies: bool
:param task_factory: A task factory reference to automatically generate a set of
tasks to be added to the job.
:type task_factory: :class:`TaskFactoryBase
<azure.batch_extensions.models.TaskFactoryBase>`
"""
_attribute_map = {
'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'},
'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'},
'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'},
'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'},
'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'},
'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'},
'metadata': {'key': 'metadata', 'type': '[MetadataItem]'},
'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'},
'task_factory': {'key': 'taskFactory', 'type': 'TaskFactoryBase'},
}
def __init__(self, job_manager_task=None, job_preparation_task=None, job_release_task=None, common_environment_settings=None,
on_all_tasks_complete=None, on_task_failure=None, metadata=None, uses_task_dependencies=None,
task_factory=None):
self.job_manager_task = job_manager_task
self.job_preparation_task = job_preparation_task
self.job_release_task = job_release_task
self.common_environment_settings = common_environment_settings
self.on_all_tasks_complete = on_all_tasks_complete
self.on_task_failure = on_task_failure
self.metadata = metadata
self.uses_task_dependencies = uses_task_dependencies
self.task_factory = task_factory

Просмотреть файл

@ -1,46 +0,0 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
from msrest.serialization import Model
class ApplicationTemplateInfo(Model):
"""A reference to an Azure Batch Application Template.
:param str file_path: The path to an application template file. This can
be a full path, or relative to the current working directory. Alternatively
a relative directory can be supplied with the 'current_directory' argument.
A ValueError will be raised if the supplied file path cannot be found.
:param dict parameters: A dictory of parameter names and values to be
subtituted into the application template.
"""
_validation = {
'file_path': {'required': True},
}
_attribute_map = {
'file_path': {'key': 'filePath', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'object'},
}
def __init__(self, file_path, parameters=None, current_directory="."):
self.file_path = file_path
if not os.path.isfile(file_path):
self.file_path = os.path.abspath(os.path.join(current_directory, str(file_path)))
self.parameters = parameters
# Rule: Template file must exist
# (We do this in order to give a good diagnostic in the most common case, knowing that this is
# technically a race condition because someone could delete the file between our check here and
# reading the file later on. We expect such cases to be rare.)
try:
with open(self.file_path, 'r'):
pass
except EnvironmentError as error:
raise ValueError("Unable to read the template '{}': {}".format(self.file_path, error))

Просмотреть файл

@ -1,24 +0,0 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ApplicationTemplateParameter(Model):
_attribute_map = {
'file_group': {'key': 'fileGroup', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'container_url': {'key': 'containerUrl', 'type': 'str'},
'prefix': {'key': 'prefix', 'type': 'str'},
}
def __init__(self, file_group=None, url=None, container_url=None, prefix=None):
self.file_group = file_group
self.url = url
self.container_url = container_url
self.prefix = prefix

Просмотреть файл

@ -1,33 +0,0 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from .package_reference_base import PackageReferenceBase
class AptPackageReference(PackageReferenceBase):
"""A reference to a package to be installed using the APT package
manager on a Linux node (apt-get).
:param str id: The name of the package.
:param str version: The version of the package to be installed. If omitted,
the latest version (according to the package repository) will be installed.
"""
_validation = {
'type': {'required': True},
'id': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
}
def __init__(self, id, version=None):
super(AptPackageReference, self).__init__(id=id, version=version)
self.type = 'aptPackage'

Просмотреть файл

@ -1,57 +0,0 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AutoPoolSpecification(Model):
"""Specifies characteristics for a temporary 'auto pool'. The Batch service
will create this auto pool when the job is submitted.
:param auto_pool_id_prefix: A prefix to be added to the unique identifier
when a pool is automatically created. The Batch service assigns each auto
pool a unique identifier on creation. To distinguish between pools created
for different purposes, you can specify this element to add a prefix to
the id that is assigned. The prefix can be up to 20 characters long.
:type auto_pool_id_prefix: str
:param pool_lifetime_option: The minimum lifetime of created auto pools,
and how multiple jobs on a schedule are assigned to pools. When the pool
lifetime scope is jobSchedule level, the Batch service keeps track of the
last autopool created for the job schedule, and deletes that pool when the
job schedule completes. Batch will also delete this pool if the user
updates the auto pool specification in a way that changes this lifetime.
Possible values include: 'jobSchedule', 'job'
:type pool_lifetime_option: str or :class:`PoolLifetimeOption
<azure.batch.models.PoolLifetimeOption>`
:param keep_alive: Whether to keep an auto pool alive after its lifetime
expires. If false, the Batch service deletes the pool once its lifetime
(as determined by the poolLifetimeOption setting) expires; that is, when
the job or job schedule completes. If true, the Batch service does not
delete the pool automatically. It is up to the user to delete auto pools
created with this option.
:type keep_alive: bool
:param pool: The pool specification for the auto pool.
:type pool: :class:`PoolSpecification
<azure.batch.models.PoolSpecification>`
"""
_validation = {
'pool_lifetime_option': {'required': True},
}
_attribute_map = {
'auto_pool_id_prefix': {'key': 'autoPoolIdPrefix', 'type': 'str'},
'pool_lifetime_option': {'key': 'poolLifetimeOption', 'type': 'PoolLifetimeOption'},
'keep_alive': {'key': 'keepAlive', 'type': 'bool'},
'pool': {'key': 'pool', 'type': 'ExtendedPoolSpecification'},
}
def __init__(self, pool_lifetime_option, auto_pool_id_prefix=None, keep_alive=None, pool=None):
self.auto_pool_id_prefix = auto_pool_id_prefix
self.pool_lifetime_option = pool_lifetime_option
self.keep_alive = keep_alive
self.pool = pool

Просмотреть файл

@ -1,37 +0,0 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from .package_reference_base import PackageReferenceBase
class ChocolateyPackageReference(PackageReferenceBase):
"""A reference to a package to be installed using the Chocolatey package
manager on a Windows node.
:param str id: The name of the package.
:param str version: The version of the package to be installed. If omitted,
the latest version (according to the package repository) will be installed.
:param bool allow_empty_checksums: Whether Chocolatey will install packages
without a checksum for validation. Default is false.
"""
_validation = {
'type': {'required': True},
'id': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'allow_empty_checksums': {'key': 'allowEmptyChecksums', 'type': 'bool'}
}
def __init__(self, id, version=None, allow_empty_checksums=None):
super(ChocolateyPackageReference, self).__init__(id=id, version=version)
self.allow_empty_checksums = allow_empty_checksums
self.type = 'chocolateyPackage'

Просмотреть файл

@ -1,80 +0,0 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
ROOT_FILE_UPLOAD_URL = 'https://raw.githubusercontent.com/Azure/azure-batch-cli-extensions/master'
FILE_EGRESS_OVERRIDE = 'FILE_EGRESS_OVERRIDE_URL'
FILE_EGRESS_ENV_NAME = 'AZ_BATCH_FILE_UPLOAD_CONFIG'
FILE_EGRESS_PREFIX = 'azure/cli/command_modules/batch_extensions/fileegress/'
FILE_EGRESS_RESOURCES = {
FILE_EGRESS_PREFIX + 'batchfileuploader.py',
FILE_EGRESS_PREFIX + 'configuration.py',
FILE_EGRESS_PREFIX + 'requirements.txt',
FILE_EGRESS_PREFIX + 'setup_uploader.py',
FILE_EGRESS_PREFIX + 'uploader.py',
FILE_EGRESS_PREFIX + 'util.py',
FILE_EGRESS_PREFIX + 'uploadfiles.py'}
# These properties are reserved for application template use
# and may not be used on jobs using an application template
PROPS_RESERVED_FOR_TEMPLATES = {
'jobManagerTask',
'jobPreparationTask',
'jobReleaseTask',
#'commonEnvironmentSettings',
'usesTaskDependencies',
'onAllTasksComplete',
'onTaskFailure',
'taskFactory'}
PROPS_PERMITTED_ON_TEMPLATES = PROPS_RESERVED_FOR_TEMPLATES.union({
'templateMetadata',
'parameters',
'metadata'})
ATTRS_RESERVED_FOR_TEMPLATES = {
'job_manager_task',
'job_preparation_task',
'job_release_task',
#'common_environment_settings',
'uses_task_dependencies',
'on_all_tasks_complete',
'on_task_failure',
'task_factory'}
# These properties are reserved for job use
# and may not be used on an application template
PROPS_RESERVED_FOR_JOBS = {
'id',
'displayName',
'priority',
'constraints',
'poolInfo',
'applicationTemplateInfo'}
# Properties on a repeatTask object that should be
# applied to each expanded task.
PROPS_ON_REPEAT_TASK = {
'displayName',
'resourceFiles',
'environmentSettings',
'constraints',
'userIdentity',
'exitConditions',
'clientExtensions',
'outputFiles',
'packageReferences'}
PROPS_ON_COLLECTION_TASK = PROPS_ON_REPEAT_TASK.union({
'multiInstanceSettings',
'dependsOn'})

Просмотреть файл

@ -1,163 +0,0 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from azure.batch.models import JobAddParameter
from .constants import ATTRS_RESERVED_FOR_TEMPLATES
class ExtendedJobParameter(JobAddParameter):
"""An Azure Batch job to add.
:param id: A string that uniquely identifies the job within the account.
The ID can contain any combination of alphanumeric characters including
hyphens and underscores, and cannot contain more than 64 characters. It is
common to use a GUID for the id.
:type id: str
:param display_name: The display name for the job. The display name need
not be unique and can contain any Unicode characters up to a maximum
length of 1024.
:type display_name: str
:param priority: The priority of the job. Priority values can range from
-1000 to 1000, with -1000 being the lowest priority and 1000 being the
highest priority. The default value is 0.
:type priority: int
:param constraints: The execution constraints for the job.
:type constraints: :class:`JobConstraints
<azure.batch.models.JobConstraints>`
:param job_manager_task: Details of a Job Manager task to be launched when
the job is started. If the job does not specify a Job Manager task, the
user must explicitly add tasks to the job. If the job does specify a Job
Manager task, the Batch service creates the Job Manager task when the job
is created, and will try to schedule the Job Manager task before
scheduling other tasks in the job. The Job Manager task's typical purpose
is to control and/or monitor job execution, for example by deciding what
additional tasks to run, determining when the work is complete, etc.
(However, a Job Manager task is not restricted to these activities - it is
a fully-fledged task in the system and perform whatever actions are
required for the job.) For example, a Job Manager task might download a
file specified as a parameter, analyze the contents of that file and
submit additional tasks based on those contents.
:type job_manager_task: :class:`JobManagerTask
<azure.batch.models.JobManagerTask>`
:param job_preparation_task: The Job Preparation task. If a job has a Job
Preparation task, the Batch service will run the Job Preparation task on a
compute node before starting any tasks of that job on that compute node.
:type job_preparation_task: :class:`JobPreparationTask
<azure.batch.models.JobPreparationTask>`
:param job_release_task: The Job Release task. A Job Release task cannot
be specified without also specifying a Job Preparation task for the job.
The Batch service runs the Job Release task on the compute nodes that have
run the Job Preparation task. The primary purpose of the Job Release task
is to undo changes to compute nodes made by the Job Preparation task.
Example activities include deleting local files, or shutting down services
that were started as part of job preparation.
:type job_release_task: :class:`JobReleaseTask
<azure.batch.models.JobReleaseTask>`
:param common_environment_settings: The list of common environment
variable settings. These environment variables are set for all tasks in
the job (including the Job Manager, Job Preparation and Job Release
tasks).
:type common_environment_settings: list of :class:`EnvironmentSetting
<azure.batch.models.EnvironmentSetting>`
:param pool_info: The pool on which the Batch service runs the job's
tasks.
:type pool_info: :class:`PoolInformation
<azure.batch.models.PoolInformation>`
:param on_all_tasks_complete: The action the Batch service should take
when all tasks in the job are in the completed state. Note that if a job
contains no tasks, then all tasks are considered complete. This option is
therefore most commonly used with a Job Manager task; if you want to use
automatic job termination without a Job Manager, you should initially set
onAllTasksComplete to noAction and update the job properties to set
onAllTasksComplete to terminateJob once you have finished adding tasks.
Permitted values are: noAction - do nothing. The job remains active unless
terminated or disabled by some other means. terminateJob - terminate the
job. The job's terminateReason is set to 'AllTasksComplete'. The default
is noAction. Possible values include: 'noAction', 'terminateJob'
:type on_all_tasks_complete: str or :class:`OnAllTasksComplete
<azure.batch.models.OnAllTasksComplete>`
:param on_task_failure: The action the Batch service should take when any
task in the job fails. A task is considered to have failed if it completes
with a non-zero exit code and has exhausted its retry count, or if it had
a scheduling error. noAction - do nothing. performExitOptionsJobAction -
take the action associated with the task exit condition in the task's
exitConditions collection. (This may still result in no action being
taken, if that is what the task specifies.) The default is noAction.
Possible values include: 'noAction', 'performExitOptionsJobAction'
:type on_task_failure: str or :class:`OnTaskFailure
<azure.batch.models.OnTaskFailure>`
:param metadata: A list of name-value pairs associated with the job as
metadata. The Batch service does not assign any meaning to metadata; it is
solely for the use of user code.
:type metadata: list of :class:`MetadataItem
<azure.batch.models.MetadataItem>`
:param uses_task_dependencies: The flag that determines if this job will
use tasks with dependencies.
:type uses_task_dependencies: bool
:param task_factory: A task factory reference to automatically generate a set of
tasks to be added to the job.
:type task_factory: :class:`TaskFactoryBase
<azure.batch_extensions.models.TaskFactoryBase>`
:param application_template_info: A reference to an application template file to
be expanded to complete the job specification. If supplied, the following arugments
cannot also be supplied or they will be overwritten: 'job_manager_task',
'common_environment_settings', 'uses_task_dependencies', 'on_all_tasks_complete',
'on_task_failure', 'task_factory', 'job_preparation_task', 'job_release_task'.
:type application_template_info: :class:`ApplicationTemplateInfo
<azure.batch_extensions.models.ApplicationTemplateInfo>`
"""
_validation = {
'id': {'required': True},
'pool_info': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'priority': {'key': 'priority', 'type': 'int'},
'constraints': {'key': 'constraints', 'type': 'JobConstraints'},
'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'},
'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'},
'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'},
'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'},
'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'},
'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'},
'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'},
'metadata': {'key': 'metadata', 'type': '[MetadataItem]'},
'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'},
'task_factory': {'key': 'taskFactory', 'type': 'TaskFactoryBase'},
'application_template_info': {'key': 'applicationTemplateInfo', 'type': 'ApplicationTemplateInfo'}
}
def __init__(self, id, pool_info, display_name=None, priority=None, constraints=None, job_manager_task=None,
job_preparation_task=None, job_release_task=None, common_environment_settings=None,
on_all_tasks_complete=None, on_task_failure=None, metadata=None, uses_task_dependencies=None,
task_factory=None, application_template_info=None):
super(ExtendedJobParameter, self).__init__(
id = id,
display_name = display_name,
priority = priority,
constraints = constraints,
job_manager_task = job_manager_task,
job_preparation_task = job_preparation_task,
job_release_task = job_release_task,
common_environment_settings = common_environment_settings,
pool_info = pool_info,
on_all_tasks_complete = on_all_tasks_complete,
on_task_failure = on_task_failure,
metadata = metadata,
uses_task_dependencies = uses_task_dependencies)
self.task_factory = task_factory
self.application_template_info = application_template_info
if self.application_template_info:
# Rule: Jobs may not use properties reserved for template use
reserved = [k for k, v in self.__dict__.items() \
if k in ATTRS_RESERVED_FOR_TEMPLATES and v is not None]
if reserved:
raise ValueError("Jobs using application templates may not use these "
"properties: {}".format(', '.join(reserved)))

Просмотреть файл

@ -1,34 +0,0 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ExtendedOutputFileDestination(Model):
"""The specification for where output files should be uploaded to on task
completion.
:param container: A location in Azure blob storage to which files are
uploaded. This cannot be combined with auto_storage.
:type container: :class:`OutputFileBlobContainerDestination
<azure.batch.models.OutputFileBlobContainerDestination>`
:param auto_storage: An auto-storage file group reference. This cannot be
combined with container.
:type auto_storage: :class:`OutputFileAutoStorageDestination
<azure.batch_extensions.models.OutputFileAutoStorageDestination>`
"""
_attribute_map = {
'container': {'key': 'container', 'type': 'OutputFileBlobContainerDestination'},
'auto_storage': {'key': 'autoStorage', 'type': 'OutputFileAutoStorageDestination'},
}
def __init__(self, container=None, auto_storage=None):
if container and auto_storage:
raise ValueError("Cannot specify both container and auto_storage.")
self.container = container
self.auto_storage = auto_storage

Просмотреть файл

@ -1,209 +0,0 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from azure.batch.models import PoolAddParameter
class ExtendedPoolParameter(PoolAddParameter):
"""A pool in the Azure Batch service to add.
:param id: A string that uniquely identifies the pool within the account.
The ID can contain any combination of alphanumeric characters including
hyphens and underscores, and cannot contain more than 64 characters. The
ID is case-preserving and case-insensitive (that is, you may not have two
pool IDs within an account that differ only by case).
:type id: str
:param display_name: The display name for the pool. The display name need
not be unique and can contain any Unicode characters up to a maximum
length of 1024.
:type display_name: str
:param vm_size: The size of virtual machines in the pool. All virtual
machines in a pool are the same size. For information about available
sizes of virtual machines for Cloud Services pools (pools created with
cloudServiceConfiguration), see Sizes for Cloud Services
(http://azure.microsoft.com/documentation/articles/cloud-services-sizes-specs/).
Batch supports all Cloud Services VM sizes except ExtraSmall, A1V2 and
A2V2. For information about available VM sizes for pools using images from
the Virtual Machines Marketplace (pools created with
virtualMachineConfiguration) see Sizes for Virtual Machines (Linux)
(https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/)
or Sizes for Virtual Machines (Windows)
(https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/).
Batch supports all Azure VM sizes except STANDARD_A0 and those with
premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series).
:type vm_size: str
:param cloud_service_configuration: The cloud service configuration for
the pool. This property and virtualMachineConfiguration are mutually
exclusive and one of the properties must be specified. This property
cannot be specified if the Batch account was created with its
poolAllocationMode property set to 'UserSubscription'.
:type cloud_service_configuration: :class:`CloudServiceConfiguration
<azure.batch.models.CloudServiceConfiguration>`
:param virtual_machine_configuration: The virtual machine configuration
for the pool. This property and cloudServiceConfiguration are mutually
exclusive and one of the properties must be specified.
:type virtual_machine_configuration: :class:`VirtualMachineConfiguration
<azure.batch.models.VirtualMachineConfiguration>`
:param resize_timeout: The timeout for allocation of compute nodes to the
pool. This timeout applies only to manual scaling; it has no effect when
enableAutoScale is set to true. The default value is 15 minutes. The
minimum value is 5 minutes. If you specify a value less than 5 minutes,
the Batch service returns an error; if you are calling the REST API
directly, the HTTP status code is 400 (Bad Request).
:type resize_timeout: timedelta
:param target_dedicated_nodes: The desired number of dedicated compute
nodes in the pool. This property must not be specified if enableAutoScale
is set to true. If enableAutoScale is set to false, then you must set
either targetDedicatedNodes, targetLowPriorityNodes, or both.
:type target_dedicated_nodes: int
:param target_low_priority_nodes: The desired number of low-priority
compute nodes in the pool. This property must not be specified if
enableAutoScale is set to true. If enableAutoScale is set to false, then
you must set either targetDedicatedNodes, targetLowPriorityNodes, or both.
:type target_low_priority_nodes: int
:param enable_auto_scale: Whether the pool size should automatically
adjust over time. If false, at least one of targetDedicateNodes and
targetLowPriorityNodes must be specified. If true, the autoScaleFormula
property is required and the pool automatically resizes according to the
formula. The default value is false.
:type enable_auto_scale: bool
:param auto_scale_formula: A formula for the desired number of compute
nodes in the pool. This property must not be specified if enableAutoScale
is set to false. It is required if enableAutoScale is set to true. The
formula is checked for validity before the pool is created. If the formula
is not valid, the Batch service rejects the request with detailed error
information. For more information about specifying this formula, see
'Automatically scale compute nodes in an Azure Batch pool'
(https://azure.microsoft.com/documentation/articles/batch-automatic-scaling/).
:type auto_scale_formula: str
:param auto_scale_evaluation_interval: The time interval at which to
automatically adjust the pool size according to the autoscale formula. The
default value is 15 minutes. The minimum and maximum value are 5 minutes
and 168 hours respectively. If you specify a value less than 5 minutes or
greater than 168 hours, the Batch service returns an error; if you are
calling the REST API directly, the HTTP status code is 400 (Bad Request).
:type auto_scale_evaluation_interval: timedelta
:param enable_inter_node_communication: Whether the pool permits direct
communication between nodes. Enabling inter-node communication limits the
maximum size of the pool due to deployment restrictions on the nodes of
the pool. This may result in the pool not reaching its desired size. The
default value is false.
:type enable_inter_node_communication: bool
:param network_configuration: The network configuration for the pool.
:type network_configuration: :class:`NetworkConfiguration
<azure.batch.models.NetworkConfiguration>`
:param start_task: A task specified to run on each compute node as it
joins the pool. The task runs when the node is added to the pool or when
the node is restarted.
:type start_task: :class:`StartTask <azure.batch.models.StartTask>`
:param certificate_references: The list of certificates to be installed on
each compute node in the pool. For Windows compute nodes, the Batch
service installs the certificates to the specified certificate store and
location. For Linux compute nodes, the certificates are stored in a
directory inside the task working directory and an environment variable
AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this
location. For certificates with visibility of 'remoteUser', a 'certs'
directory is created in the user's home directory (e.g.,
/home/{user-name}/certs) and certificates are placed in that directory.
:type certificate_references: list of :class:`CertificateReference
<azure.batch.models.CertificateReference>`
:param application_package_references: The list of application packages to
be installed on each compute node in the pool. This property is currently
not supported on pools created using the virtualMachineConfiguration
(IaaS) property.
:type application_package_references: list of
:class:`ApplicationPackageReference
<azure.batch.models.ApplicationPackageReference>`
:param application_licenses: The list of application licenses the Batch
service will make available on each compute node in the pool. The list of
application licenses must be a subset of available Batch service
application licenses. If a license is requested which is not supported,
pool creation will fail.
:type application_licenses: list of str
:param max_tasks_per_node: The maximum number of tasks that can run
concurrently on a single compute node in the pool. The default value is 1.
The maximum value of this setting depends on the size of the compute nodes
in the pool (the vmSize setting).
:type max_tasks_per_node: int
:param task_scheduling_policy: How the Batch service distributes tasks
between compute nodes in the pool.
:type task_scheduling_policy: :class:`TaskSchedulingPolicy
<azure.batch.models.TaskSchedulingPolicy>`
:param user_accounts: The list of user accounts to be created on each node
in the pool.
:type user_accounts: list of :class:`UserAccount
<azure.batch.models.UserAccount>`
:param metadata: A list of name-value pairs associated with the pool as
metadata. The Batch service does not assign any meaning to metadata; it is
solely for the use of user code.
:type metadata: list of :class:`MetadataItem
<azure.batch.models.MetadataItem>`
:param package_references: A list of packages to be installed on the compute
nodes. Must be of a Package Manager type in accordance with the selected
operating system.
:type package_references: list of :class:`PackageReferenceBase
<azure.batch_extensions.models.PackageReferenceBase>`
"""
_validation = {
'id': {'required': True},
'vm_size': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'vm_size': {'key': 'vmSize', 'type': 'str'},
'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'},
'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'},
'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'},
'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'},
'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'},
'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'},
'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'},
'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'},
'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'},
'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'},
'start_task': {'key': 'startTask', 'type': 'StartTask'},
'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'},
'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'},
'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'},
'max_tasks_per_node': {'key': 'maxTasksPerNode', 'type': 'int'},
'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'},
'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'},
'metadata': {'key': 'metadata', 'type': '[MetadataItem]'},
'package_references': {'key': 'packageReferences', 'type': '[PackageReferenceBase]'}
}
def __init__(self, id, vm_size, display_name=None, cloud_service_configuration=None, virtual_machine_configuration=None,
resize_timeout=None, target_dedicated_nodes=None, target_low_priority_nodes=None, enable_auto_scale=None,
auto_scale_formula=None, auto_scale_evaluation_interval=None, enable_inter_node_communication=None, network_configuration=None,
start_task=None, certificate_references=None, application_package_references=None, application_licenses=None,
max_tasks_per_node=None, task_scheduling_policy=None, user_accounts=None, metadata=None, package_references=None):
super(ExtendedPoolParameter, self).__init__(
id=id,
display_name=display_name,
vm_size=vm_size,
cloud_service_configuration=cloud_service_configuration,
virtual_machine_configuration=virtual_machine_configuration,
resize_timeout=resize_timeout,
target_dedicated_nodes = target_dedicated_nodes,
target_low_priority_nodes = target_low_priority_nodes,
enable_auto_scale=enable_auto_scale,
auto_scale_formula=auto_scale_formula,
auto_scale_evaluation_interval=auto_scale_evaluation_interval,
enable_inter_node_communication=enable_inter_node_communication,
network_configuration=network_configuration,
start_task=start_task,
certificate_references=certificate_references,
application_package_references=application_package_references,
application_licenses = application_licenses,
max_tasks_per_node=max_tasks_per_node,
task_scheduling_policy=task_scheduling_policy,
user_accounts=user_accounts,
metadata=metadata)
self.package_references = package_references

Просмотреть файл

@ -1,204 +0,0 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from azure.batch.models import PoolSpecification
class ExtendedPoolSpecification(PoolSpecification):
"""Specification for creating a new pool.
:param display_name: The display name for the pool. The display name need
not be unique and can contain any Unicode characters up to a maximum
length of 1024.
:type display_name: str
:param vm_size: The size of the virtual machines in the pool. All virtual
machines in a pool are the same size. For information about available
sizes of virtual machines for Cloud Services pools (pools created with
cloudServiceConfiguration), see Sizes for Cloud Services
(http://azure.microsoft.com/documentation/articles/cloud-services-sizes-specs/).
Batch supports all Cloud Services VM sizes except ExtraSmall, A1V2 and
A2V2. For information about available VM sizes for pools using images from
the Virtual Machines Marketplace (pools created with
virtualMachineConfiguration) see Sizes for Virtual Machines (Linux)
(https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/)
or Sizes for Virtual Machines (Windows)
(https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/).
Batch supports all Azure VM sizes except STANDARD_A0 and those with
premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series).
:type vm_size: str
:param cloud_service_configuration: The cloud service configuration for
the pool. This property must be specified if the pool needs to be created
with Azure PaaS VMs. This property and virtualMachineConfiguration are
mutually exclusive and one of the properties must be specified. If neither
is specified then the Batch service returns an error; if you are calling
the REST API directly, the HTTP status code is 400 (Bad Request). This
property cannot be specified if the Batch account was created with its
poolAllocationMode property set to 'UserSubscription'.
:type cloud_service_configuration: :class:`CloudServiceConfiguration
<azure.batch.models.CloudServiceConfiguration>`
:param virtual_machine_configuration: The virtual machine configuration
for the pool. This property must be specified if the pool needs to be
created with Azure IaaS VMs. This property and cloudServiceConfiguration
are mutually exclusive and one of the properties must be specified. If
neither is specified then the Batch service returns an error; if you are
calling the REST API directly, the HTTP status code is 400 (Bad Request).
:type virtual_machine_configuration: :class:`VirtualMachineConfiguration
<azure.batch.models.VirtualMachineConfiguration>`
:param max_tasks_per_node: The maximum number of tasks that can run
concurrently on a single compute node in the pool. The default value is 1.
The maximum value of this setting depends on the size of the compute nodes
in the pool (the vmSize setting).
:type max_tasks_per_node: int
:param task_scheduling_policy: How tasks are distributed among compute
nodes in the pool.
:type task_scheduling_policy: :class:`TaskSchedulingPolicy
<azure.batch.models.TaskSchedulingPolicy>`
:param resize_timeout: The timeout for allocation of compute nodes to the
pool. This timeout applies only to manual scaling; it has no effect when
enableAutoScale is set to true. The default value is 15 minutes. The
minimum value is 5 minutes. If you specify a value less than 5 minutes,
the Batch service rejects the request with an error; if you are calling
the REST API directly, the HTTP status code is 400 (Bad Request).
:type resize_timeout: timedelta
:param target_dedicated_nodes: The desired number of dedicated compute
nodes in the pool. This property must not be specified if enableAutoScale
is set to true. If enableAutoScale is set to false, then you must set
either targetDedicatedNodes, targetLowPriorityNodes, or both.
:type target_dedicated_nodes: int
:param target_low_priority_nodes: The desired number of low-priority
compute nodes in the pool. This property must not be specified if
enableAutoScale is set to true. If enableAutoScale is set to false, then
you must set either targetDedicatedNodes, targetLowPriorityNodes, or both.
:type target_low_priority_nodes: int
:param enable_auto_scale: Whether the pool size should automatically
adjust over time. If false, the targetDedicated element is required. If
true, the autoScaleFormula element is required. The pool automatically
resizes according to the formula. The default value is false.
:type enable_auto_scale: bool
:param auto_scale_formula: The formula for the desired number of compute
nodes in the pool. This property must not be specified if enableAutoScale
is set to false. It is required if enableAutoScale is set to true. The
formula is checked for validity before the pool is created. If the formula
is not valid, the Batch service rejects the request with detailed error
information.
:type auto_scale_formula: str
:param auto_scale_evaluation_interval: The time interval at which to
automatically adjust the pool size according to the autoscale formula. The
default value is 15 minutes. The minimum and maximum value are 5 minutes
and 168 hours respectively. If you specify a value less than 5 minutes or
greater than 168 hours, the Batch service rejects the request with an
invalid property value error; if you are calling the REST API directly,
the HTTP status code is 400 (Bad Request).
:type auto_scale_evaluation_interval: timedelta
:param enable_inter_node_communication: Whether the pool permits direct
communication between nodes. Enabling inter-node communication limits the
maximum size of the pool due to deployment restrictions on the nodes of
the pool. This may result in the pool not reaching its desired size. The
default value is false.
:type enable_inter_node_communication: bool
:param network_configuration: The network configuration for the pool.
:type network_configuration: :class:`NetworkConfiguration
<azure.batch.models.NetworkConfiguration>`
:param start_task: A task to run on each compute node as it joins the
pool. The task runs when the node is added to the pool or when the node is
restarted.
:type start_task: :class:`StartTask <azure.batch.models.StartTask>`
:param certificate_references: A list of certificates to be installed on
each compute node in the pool. For Windows compute nodes, the Batch
service installs the certificates to the specified certificate store and
location. For Linux compute nodes, the certificates are stored in a
directory inside the task working directory and an environment variable
AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this
location. For certificates with visibility of 'remoteUser', a 'certs'
directory is created in the user's home directory (e.g.,
/home/{user-name}/certs) and certificates are placed in that directory.
:type certificate_references: list of :class:`CertificateReference
<azure.batch.models.CertificateReference>`
:param application_package_references: The list of application packages to
be installed on each compute node in the pool. This property is currently
not supported on auto pools created with the virtualMachineConfiguration
(IaaS) property.
:type application_package_references: list of
:class:`ApplicationPackageReference
<azure.batch.models.ApplicationPackageReference>`
:param application_licenses: The list of application licenses the Batch
service will make available on each compute node in the pool. The list of
application licenses must be a subset of available Batch service
application licenses. If a license is requested which is not supported,
pool creation will fail.
:type application_licenses: list of str
:param user_accounts: The list of user accounts to be created on each node
in the pool.
:type user_accounts: list of :class:`UserAccount
<azure.batch.models.UserAccount>`
:param metadata: A list of name-value pairs associated with the pool as
metadata. The Batch service does not assign any meaning to metadata; it is
solely for the use of user code.
:type metadata: list of :class:`MetadataItem
<azure.batch.models.MetadataItem>`
:param package_references: A list of packages to be installed on the compute
nodes. Must be of a Package Manager type in accordance with the selected
operating system.
:type package_references: list of :class:`PackageReferenceBase
<azure.batch_extensions.models.PackageReferenceBase>`
"""
_validation = {
'vm_size': {'required': True},
}
_attribute_map = {
'display_name': {'key': 'displayName', 'type': 'str'},
'vm_size': {'key': 'vmSize', 'type': 'str'},
'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'},
'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'},
'max_tasks_per_node': {'key': 'maxTasksPerNode', 'type': 'int'},
'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'},
'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'},
'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'},
'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'},
'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'},
'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'},
'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'},
'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'},
'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'},
'start_task': {'key': 'startTask', 'type': 'StartTask'},
'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'},
'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'},
'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'},
'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'},
'metadata': {'key': 'metadata', 'type': '[MetadataItem]'},
'package_references': {'key': 'packageReferences', 'type': '[PackageReferenceBase]'}
}
def __init__(self, vm_size, display_name=None, cloud_service_configuration=None, virtual_machine_configuration=None,
max_tasks_per_node=None, task_scheduling_policy=None, resize_timeout=None, target_dedicated_nodes=None,
target_low_priority_nodes=None, enable_auto_scale=None, auto_scale_formula=None, auto_scale_evaluation_interval=None,
enable_inter_node_communication=None, network_configuration=None, start_task=None, certificate_references=None,
application_package_references=None, application_licenses=None, user_accounts=None, metadata=None, package_references=None):
super(ExtendedPoolSpecification, self).__init__(
display_name=display_name,
vm_size=vm_size,
cloud_service_configuration=cloud_service_configuration,
virtual_machine_configuration=virtual_machine_configuration,
max_tasks_per_node=max_tasks_per_node,
task_scheduling_policy=task_scheduling_policy,
resize_timeout=resize_timeout,
target_dedicated_nodes = target_dedicated_nodes,
target_low_priority_nodes = target_low_priority_nodes,
enable_auto_scale=enable_auto_scale,
auto_scale_formula=auto_scale_formula,
auto_scale_evaluation_interval=auto_scale_evaluation_interval,
enable_inter_node_communication=enable_inter_node_communication,
network_configuration=network_configuration,
start_task=start_task,
certificate_references=certificate_references,
application_package_references=application_package_references,
application_licenses = application_licenses,
user_accounts=user_accounts,
metadata=metadata)
self.package_references = package_references

Просмотреть файл

@ -1,49 +0,0 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from azure.batch.models import ResourceFile
class ExtendedResourceFile(ResourceFile):
"""A file to be downloaded from Azure blob storage to a compute node.
:param blob_source: The URL of the file within Azure Blob Storage. This
URL must be readable using anonymous access; that is, the Batch service
does not present any credentials when downloading the blob. There are two
ways to get such a URL for a blob in Azure storage: include a Shared
Access Signature (SAS) granting read permissions on the blob, or set the
ACL for the blob or its container to allow public access.
:type blob_source: str
:param file_path: The location on the compute node to which to download
the file, relative to the task's working directory. If using a file group
source that references more than one file, this will be considered the name
of a directory, otherwise it will be treated as the destination file name.
:type file_path: str
:param file_mode: The file permission mode attribute in octal format. This
property applies only to files being downloaded to Linux compute nodes. It
will be ignored if it is specified for a resourceFile which will be
downloaded to a Windows node. If this property is not specified for a
Linux node, then a default value of 0770 is applied to the file.
If using a file group source that references more than one file, this will be
applied to all files in the group.
:type file_mode: str
:param source: A file source reference which could include a collection of files from
a Azure Storage container or an auto-storage file group.
:type source: :class:`FileSource
<azure.batch_extensions.models.FileSource>`
"""
_attribute_map = {
'blob_source': {'key': 'blobSource', 'type': 'str'},
'file_path': {'key': 'filePath', 'type': 'str'},
'file_mode': {'key': 'fileMode', 'type': 'str'},
'source': {'key': 'source', 'type': 'FileSource'}
}
def __init__(self, blob_source=None, file_path=None, file_mode=None, source=None):
super(ExtendedResourceFile, self).__init__(blob_source, file_path, file_mode)
self.source = source

Просмотреть файл

@ -1,145 +0,0 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from azure.batch.models import TaskAddParameter
class ExtendedTaskParameter(TaskAddParameter):
"""An Azure Batch task to add.
:param id: A string that uniquely identifies the task within the job. The
ID can contain any combination of alphanumeric characters including
hyphens and underscores, and cannot contain more than 64 characters. The
ID is case-preserving and case-insensitive (that is, you may not have two
IDs within a job that differ only by case).
:type id: str
:param display_name: A display name for the task. The display name need
not be unique and can contain any Unicode characters up to a maximum
length of 1024.
:type display_name: str
:param command_line: The command line of the task. For multi-instance
tasks, the command line is executed as the primary task, after the primary
task and all subtasks have finished executing the coordination command
line. The command line does not run under a shell, and therefore cannot
take advantage of shell features such as environment variable expansion.
If you want to take advantage of such features, you should invoke the
shell in the command line, for example using "cmd /c MyCommand" in Windows
or "/bin/sh -c MyCommand" in Linux.
:type command_line: str
:param exit_conditions: How the Batch service should respond when the task
completes.
:type exit_conditions: :class:`ExitConditions
<azure.batch.models.ExitConditions>`
:param resource_files: A list of files that the Batch service will
download to the compute node before running the command line. For
multi-instance tasks, the resource files will only be downloaded to the
compute node on which the primary task is executed.
:type resource_files: list of :class:`ResourceFile
<azure.batch.models.ResourceFile>`
:param environment_settings: A list of environment variable settings for
the task.
:type environment_settings: list of :class:`EnvironmentSetting
<azure.batch.models.EnvironmentSetting>`
:param affinity_info: A locality hint that can be used by the Batch
service to select a compute node on which to start the new task.
:type affinity_info: :class:`AffinityInformation
<azure.batch.models.AffinityInformation>`
:param constraints: The execution constraints that apply to this task. If
you do not specify constraints, the maxTaskRetryCount is the
maxTaskRetryCount specified for the job, and the maxWallClockTime and
retentionTime are infinite.
:type constraints: :class:`TaskConstraints
<azure.batch.models.TaskConstraints>`
:param user_identity: The user identity under which the task runs. If
omitted, the task runs as a non-administrative user unique to the task.
:type user_identity: :class:`UserIdentity
<azure.batch.models.UserIdentity>`
:param multi_instance_settings: An object that indicates that the task is
a multi-instance task, and contains information about how to run the
multi-instance task.
:type multi_instance_settings: :class:`MultiInstanceSettings
<azure.batch.models.MultiInstanceSettings>`
:param depends_on: The tasks that this task depends on. This task will not
be scheduled until all tasks that it depends on have completed
successfully. If any of those tasks fail and exhaust their retry counts,
this task will never be scheduled. If the job does not have
usesTaskDependencies set to true, and this element is present, the request
fails with error code TaskDependenciesNotSpecifiedOnJob.
:type depends_on: :class:`TaskDependencies
<azure.batch.models.TaskDependencies>`
:param application_package_references: A list of application packages that
the Batch service will deploy to the compute node before running the
command line.
:type application_package_references: list of
:class:`ApplicationPackageReference
<azure.batch.models.ApplicationPackageReference>`
:param authentication_token_settings: The settings for an authentication
token that the task can use to perform Batch service operations. If this
property is set, the Batch service provides the task with an
authentication token which can be used to authenticate Batch service
operations without requiring an account access key. The token is provided
via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations
that the task can carry out using the token depend on the settings. For
example, a task can request job permissions in order to add other tasks to
the job, or check the status of the job or of other tasks under the job.
:type authentication_token_settings: :class:`AuthenticationTokenSettings
<azure.batch.models.AuthenticationTokenSettings>`
:param output_files: A list of files that the Batch service will upload
from the compute node after running the command line. For multi-instance
tasks, the files will only be uploaded from the compute node on which the
primary task is executed.
:type output_files: list of :class:`OutputFile
<azure.batch_extensions.models.OutputFile>`
:param package_references: A list of packages to be installed on the compute
nodes. Must be of a Package Manager type in accordance with the selected
operating system.
:type package_references: list of :class:`PackageReferenceBase
<azure.batch_extensions.models.PackageReferenceBase>`
"""
_validation = {
'id': {'required': True},
'command_line': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'command_line': {'key': 'commandLine', 'type': 'str'},
'exit_conditions': {'key': 'exitConditions', 'type': 'ExitConditions'},
'resource_files': {'key': 'resourceFiles', 'type': '[ExtendedResourceFile]'},
'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'},
'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'},
'affinity_info': {'key': 'affinityInfo', 'type': 'AffinityInformation'},
'constraints': {'key': 'constraints', 'type': 'TaskConstraints'},
'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'},
'multi_instance_settings': {'key': 'multiInstanceSettings', 'type': 'MultiInstanceSettings'},
'depends_on': {'key': 'dependsOn', 'type': 'TaskDependencies'},
'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'},
'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'},
'package_references': {'key': 'packageReferences', 'type': '[PackageReferenceBase]'}
}
def __init__(self, id, command_line, display_name=None, exit_conditions=None, resource_files=None, output_files=None,
environment_settings=None, affinity_info=None, constraints=None, user_identity=None, multi_instance_settings=None,
depends_on=None, application_package_references=None, authentication_token_settings=None, package_references=None):
super(ExtendedTaskParameter, self).__init__(
id=id,
display_name=display_name,
command_line=command_line,
exit_conditions=exit_conditions,
resource_files=resource_files,
output_files = output_files,
environment_settings=environment_settings,
affinity_info=affinity_info,
constraints=constraints,
user_identity=user_identity,
multi_instance_settings=multi_instance_settings,
depends_on=depends_on,
application_package_references=application_package_references,
authentication_token_settings=authentication_token_settings)
self.package_references = package_references

Просмотреть файл

@ -1,42 +0,0 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from .task_factory_base import TaskFactoryBase
class FileCollectionTaskFactory(TaskFactoryBase):
"""A Task Factory for generating a set of tasks based on the contents
of an Azure Storage container or auto-storage file group. One task
will be generated per input file, and automatically added to the job.
:param source: The input file source from which the tasks will be generated.
:type source: :class:`FileSource <azure.batch_extensions.models.FileSource>`
:param repeat_task: The task template the will be used to generate each task.
:type repeat_task: :class:`RepeatTask <azure.batch_extensions.models.RepeatTask>`
:param merge_task: An optional additional task to be run after all the other
generated tasks have completed successfully.
:type merge_task: :class:`MergeTask <azure.batch_extensions.models.MergeTask>`
"""
_validation = {
'type': {'required': True},
'source': {'required': True},
'repeat_task': {'required': True}
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'source': {'key': 'source', 'type': 'FileSource'},
'repeat_task': {'key': 'repeatTask', 'type': 'RepeatTask'},
'merge_task': {'key': 'mergeTask', 'type': 'MergeTask'}
}
def __init__(self, source, repeat_task, merge_task=None):
super(FileCollectionTaskFactory, self).__init__(merge_task)
self.source = source
self.repeat_task = repeat_task
self.type = 'taskPerFile'

Просмотреть файл

@ -1,33 +0,0 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class FileSource(Model):
"""A source of input files to be downloaded onto a compute node.
:param str file_group: The name of an auto-storage file group.
:param str url: The URL of a file to be downloaded.
:param str container_url: The SAS URL of an Azure Storage container.
:param str prefix: The filename prefix or subdirectory of input files
in either an auto-storage file group or container. Will be ignored if
conbined with url.
"""
_attribute_map = {
'file_group': {'key': 'fileGroup', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'container_url': {'key': 'containerUrl', 'type': 'str'},
'prefix': {'key': 'prefix', 'type': 'str'},
}
def __init__(self, file_group=None, url=None, container_url=None, prefix=None):
self.file_group = file_group
self.url = url
self.container_url = container_url
self.prefix = prefix

Просмотреть файл

@ -1,140 +0,0 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class JobManagerTask(Model):
"""Specifies details of a Job Manager task.
:param id: A string that uniquely identifies the Job Manager taskwithin
the job. The id can contain any combination of alphanumeric characters
including hyphens and underscores and cannot contain more than 64
characters.
:type id: str
:param display_name: The display name of the Job Manager task. It need not
be unique and can contain any Unicode characters up to a maximum length of
1024.
:type display_name: str
:param command_line: The command line of the Job Manager task. The command
line does not run under a shell, and therefore cannot take advantage of
shell features such as environment variable expansion. If you want to take
advantage of such features, you should invoke the shell in the command
line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c
MyCommand" in Linux.
:type command_line: str
:param resource_files: A list of files that the Batch service will
download to the compute node before running the command line. Files listed
under this element are located in the task's working directory.
:type resource_files: list of :class:`ExtendedResourceFile
<azure.batch_extensions.models.ExtendedResourceFile>`
:param output_files: A list of files that the Batch service will upload
from the compute node after running the command line.
:type output_files: list of :class:`OutputFile
<azure.batch.models.OutputFile>`
:param environment_settings: A list of environment variable settings for
the Job Manager task.
:type environment_settings: list of :class:`EnvironmentSetting
<azure.batch.models.EnvironmentSetting>`
:param constraints: Constraints that apply to the Job Manager task.
:type constraints: :class:`TaskConstraints
<azure.batch.models.TaskConstraints>`
:param kill_job_on_completion: Whether completion of the Job Manager task
signifies completion of the entire job. If true, when the Job Manager task
completes, the Batch service marks the job as complete. If any tasks are
still running at this time (other than Job Release), those tasks are
terminated. If false, the completion of the Job Manager task does not
affect the job status. In this case, you should either use the
onAllTasksComplete attribute to terminate the job, or have a client or
user terminate the job explicitly. An example of this is if the Job
Manager creates a set of tasks but then takes no further role in their
execution. The default value is true. If you are using the
onAllTasksComplete and onTaskFailure attributes to control job lifetime,
and using the Job Manager task only to create the tasks for the job (not
to monitor progress), then it is important to set killJobOnCompletion to
false.
:type kill_job_on_completion: bool
:param user_identity: The user identity under which the Job Manager task
runs. If omitted, the task runs as a non-administrative user unique to the
task.
:type user_identity: :class:`UserIdentity
<azure.batch.models.UserIdentity>`
:param run_exclusive: Whether the Job Manager task requires exclusive use
of the compute node where it runs. If true, no other tasks will run on the
same compute node for as long as the Job Manager is running. If false,
other tasks can run simultaneously with the Job Manager on a compute node.
The Job Manager task counts normally against the node's concurrent task
limit, so this is only relevant if the node allows multiple concurrent
tasks. The default value is true.
:type run_exclusive: bool
:param application_package_references: A list of application packages that
the Batch service will deploy to the compute node before running the
command line. Application packages are downloaded and deployed to a shared
directory, not the task directory. Therefore, if a referenced package is
already on the compute node, and is up to date, then it is not
re-downloaded; the existing copy on the compute node is used. If a
referenced application package cannot be installed, for example because
the package has been deleted or because download failed, the task fails
with a scheduling error. This property is currently not supported on jobs
running on pools created using the virtualMachineConfiguration (IaaS)
property. If a task specifying applicationPackageReferences runs on such a
pool, it fails with a scheduling error with code
TaskSchedulingConstraintFailed.
:type application_package_references: list of
:class:`ApplicationPackageReference
<azure.batch.models.ApplicationPackageReference>`
:param authentication_token_settings: The settings for an authentication
token that the task can use to perform Batch service operations. If this
property is set, the Batch service provides the task with an
authentication token which can be used to authenticate Batch service
operations without requiring an account access key. The token is provided
via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations
that the task can carry out using the token depend on the settings. For
example, a task can request job permissions in order to add other tasks to
the job, or check the status of the job or of other tasks under the job.
:type authentication_token_settings: :class:`AuthenticationTokenSettings
<azure.batch.models.AuthenticationTokenSettings>`
:param allow_low_priority_node: Whether the Job Manager task may run on a
low-priority compute node. The default value is false.
:type allow_low_priority_node: bool
"""
_validation = {
'id': {'required': True},
'command_line': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'command_line': {'key': 'commandLine', 'type': 'str'},
'resource_files': {'key': 'resourceFiles', 'type': '[ExtendedResourceFile]'},
'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'},
'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'},
'constraints': {'key': 'constraints', 'type': 'TaskConstraints'},
'kill_job_on_completion': {'key': 'killJobOnCompletion', 'type': 'bool'},
'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'},
'run_exclusive': {'key': 'runExclusive', 'type': 'bool'},
'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'},
'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'},
'allow_low_priority_node': {'key': 'allowLowPriorityNode', 'type': 'bool'},
}
def __init__(self, id, command_line, display_name=None, resource_files=None, output_files=None, environment_settings=None, constraints=None, kill_job_on_completion=None, user_identity=None, run_exclusive=None, application_package_references=None, authentication_token_settings=None, allow_low_priority_node=None):
self.id = id
self.display_name = display_name
self.command_line = command_line
self.resource_files = resource_files
self.output_files = output_files
self.environment_settings = environment_settings
self.constraints = constraints
self.kill_job_on_completion = kill_job_on_completion
self.user_identity = user_identity
self.run_exclusive = run_exclusive
self.application_package_references = application_package_references
self.authentication_token_settings = authentication_token_settings
self.allow_low_priority_node = allow_low_priority_node

Просмотреть файл

@ -1,96 +0,0 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class JobPreparationTask(Model):
"""A Job Preparation task to run before any tasks of the job on any given
compute node.
:param id: A string that uniquely identifies the Job Preparation task
within the job. The ID can contain any combination of alphanumeric
characters including hyphens and underscores and cannot contain more than
64 characters. If you do not specify this property, the Batch service
assigns a default value of 'jobpreparation'. No other task in the job can
have the same id as the Job Preparation task. If you try to submit a task
with the same id, the Batch service rejects the request with error code
TaskIdSameAsJobPreparationTask; if you are calling the REST API directly,
the HTTP status code is 409 (Conflict).
:type id: str
:param command_line: The command line of the Job Preparation task. The
command line does not run under a shell, and therefore cannot take
advantage of shell features such as environment variable expansion. If you
want to take advantage of such features, you should invoke the shell in
the command line, for example using "cmd /c MyCommand" in Windows or
"/bin/sh -c MyCommand" in Linux.
:type command_line: str
:param resource_files: A list of files that the Batch service will
download to the compute node before running the command line. Files listed
under this element are located in the task's working directory.
:type resource_files: list of :class:`ExtendedResourceFile
<azure.batch_extensions.models.ExtendedResourceFile>`
:param environment_settings: A list of environment variable settings for
the Job Preparation task.
:type environment_settings: list of :class:`EnvironmentSetting
<azure.batch.models.EnvironmentSetting>`
:param constraints: Constraints that apply to the Job Preparation task.
:type constraints: :class:`TaskConstraints
<azure.batch.models.TaskConstraints>`
:param wait_for_success: Whether the Batch service should wait for the Job
Preparation task to complete successfully before scheduling any other
tasks of the job on the compute node. If true and the Job Preparation task
fails on a compute node, the Batch service retries the Job Preparation
task up to its maximum retry count (as specified in the constraints
element). If the task has still not completed successfully after all
retries, then the Batch service will not schedule tasks of the job to the
compute node. The compute node remains active and eligible to run tasks of
other jobs. If false, the Batch service will not wait for the Job
Preparation task to complete. In this case, other tasks of the job can
start executing on the compute node while the Job Preparation task is
still running; and even if the Job Preparation task fails, new tasks will
continue to be scheduled on the node. The default value is true.
:type wait_for_success: bool
:param user_identity: The user identity under which the Job Preparation
task runs. If omitted, the task runs as a non-administrative user unique
to the task.
:type user_identity: :class:`UserIdentity
<azure.batch.models.UserIdentity>`
:param rerun_on_node_reboot_after_success: Whether the Batch service
should rerun the Job Preparation task after a compute node reboots. The
Job Preparation task is always rerun if a compute node is reimaged, or if
the Job Preparation task did not complete (e.g. because the reboot
occurred while the task was running). Therefore, you should always write a
Job Preparation task to be idempotent and to behave correctly if run
multiple times. The default value is true.
:type rerun_on_node_reboot_after_success: bool
"""
_validation = {
'command_line': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'command_line': {'key': 'commandLine', 'type': 'str'},
'resource_files': {'key': 'resourceFiles', 'type': '[ExtendedResourceFile]'},
'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'},
'constraints': {'key': 'constraints', 'type': 'TaskConstraints'},
'wait_for_success': {'key': 'waitForSuccess', 'type': 'bool'},
'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'},
'rerun_on_node_reboot_after_success': {'key': 'rerunOnNodeRebootAfterSuccess', 'type': 'bool'},
}
def __init__(self, command_line, id=None, resource_files=None, environment_settings=None, constraints=None, wait_for_success=None, user_identity=None, rerun_on_node_reboot_after_success=None):
self.id = id
self.command_line = command_line
self.resource_files = resource_files
self.environment_settings = environment_settings
self.constraints = constraints
self.wait_for_success = wait_for_success
self.user_identity = user_identity
self.rerun_on_node_reboot_after_success = rerun_on_node_reboot_after_success

Просмотреть файл

@ -1,83 +0,0 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class JobReleaseTask(Model):
"""A Job Release task to run on job completion on any compute node where the
job has run.
:param id: A string that uniquely identifies the Job Release task within
the job. The ID can contain any combination of alphanumeric characters
including hyphens and underscores and cannot contain more than 64
characters. If you do not specify this property, the Batch service assigns
a default value of 'jobrelease'. No other task in the job can have the
same id as the Job Release task. If you try to submit a task with the same
id, the Batch service rejects the request with error code
TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the
HTTP status code is 409 (Conflict).
:type id: str
:param command_line: The command line of the Job Release task. The command
line does not run under a shell, and therefore cannot take advantage of
shell features such as environment variable expansion. If you want to take
advantage of such features, you should invoke the shell in the command
line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c
MyCommand" in Linux.
:type command_line: str
:param resource_files: A list of files that the Batch service will
download to the compute node before running the command line. Files listed
under this element are located in the task's working directory.
:type resource_files: list of :class:`ExtendedResourceFile
<azure.batch_extensions.models.ExtendedResourceFile>`
:param environment_settings: A list of environment variable settings for
the Job Release task.
:type environment_settings: list of :class:`EnvironmentSetting
<azure.batch.models.EnvironmentSetting>`
:param max_wall_clock_time: The maximum elapsed time that the Job Release
task may run on a given compute node, measured from the time the task
starts. If the task does not complete within the time limit, the Batch
service terminates it. The default value is 15 minutes. You may not
specify a timeout longer than 15 minutes. If you do, the Batch service
rejects it with an error; if you are calling the REST API directly, the
HTTP status code is 400 (Bad Request).
:type max_wall_clock_time: timedelta
:param retention_time: The minimum time to retain the task directory for
the Job Release task on the compute node. After this time, the Batch
service may delete the task directory and all its contents. The default is
infinite, i.e. the task directory will be retained until the compute node
is removed or reimaged.
:type retention_time: timedelta
:param user_identity: The user identity under which the Job Release task
runs. If omitted, the task runs as a non-administrative user unique to the
task.
:type user_identity: :class:`UserIdentity
<azure.batch.models.UserIdentity>`
"""
_validation = {
'command_line': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'command_line': {'key': 'commandLine', 'type': 'str'},
'resource_files': {'key': 'resourceFiles', 'type': '[ExtendedResourceFile]'},
'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'},
'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'},
'retention_time': {'key': 'retentionTime', 'type': 'duration'},
'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'},
}
def __init__(self, command_line, id=None, resource_files=None, environment_settings=None, max_wall_clock_time=None, retention_time=None, user_identity=None):
self.id = id
self.command_line = command_line
self.resource_files = resource_files
self.environment_settings = environment_settings
self.max_wall_clock_time = max_wall_clock_time
self.retention_time = retention_time
self.user_identity = user_identity

Просмотреть файл

@ -1,133 +0,0 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class MergeTask(Model):
"""An Azure Batch task template to repeat.
:param str id: The ID of the merge task.
:param display_name: A display name for the task. The display name need
not be unique and can contain any Unicode characters up to a maximum
length of 1024.
:type display_name: str
:param command_line: The command line of the task. For multi-instance
tasks, the command line is executed as the primary task, after the primary
task and all subtasks have finished executing the coordination command
line. The command line does not run under a shell, and therefore cannot
take advantage of shell features such as environment variable expansion.
If you want to take advantage of such features, you should invoke the
shell in the command line, for example using "cmd /c MyCommand" in Windows
or "/bin/sh -c MyCommand" in Linux.
:type command_line: str
:param exit_conditions: How the Batch service should respond when the task
completes.
:type exit_conditions: :class:`ExitConditions
<azure.batch.models.ExitConditions>`
:param resource_files: A list of files that the Batch service will
download to the compute node before running the command line. For
multi-instance tasks, the resource files will only be downloaded to the
compute node on which the primary task is executed.
:type resource_files: list of :class:`ExtendedResourceFile
<azure.batch_extensions.models.ExtendedResourceFile>`
:param environment_settings: A list of environment variable settings for
the task.
:type environment_settings: list of :class:`EnvironmentSetting
<azure.batch.models.EnvironmentSetting>`
:param affinity_info: A locality hint that can be used by the Batch
service to select a compute node on which to start the new task.
:type affinity_info: :class:`AffinityInformation
<azure.batch.models.AffinityInformation>`
:param constraints: The execution constraints that apply to this task. If
you do not specify constraints, the maxTaskRetryCount is the
maxTaskRetryCount specified for the job, and the maxWallClockTime and
retentionTime are infinite.
:type constraints: :class:`TaskConstraints
<azure.batch.models.TaskConstraints>`
:param user_identity: The user identity under which the task runs. If
omitted, the task runs as a non-administrative user unique to the task.
:type user_identity: :class:`UserIdentity
<azure.batch.models.UserIdentity>`
:param depends_on: The tasks that this task depends on. This task will not
be scheduled until all tasks that it depends on have completed
successfully. If any of those tasks fail and exhaust their retry counts,
this task will never be scheduled. If the job does not have
usesTaskDependencies set to true, and this element is present, the request
fails with error code TaskDependenciesNotSpecifiedOnJob.
:type depends_on: :class:`TaskDependencies
<azure.batch.models.TaskDependencies>`
:param application_package_references: A list of application packages that
the Batch service will deploy to the compute node before running the
command line.
:type application_package_references: list of
:class:`ApplicationPackageReference
<azure.batch.models.ApplicationPackageReference>`
:param authentication_token_settings: The settings for an authentication
token that the task can use to perform Batch service operations. If this
property is set, the Batch service provides the task with an
authentication token which can be used to authenticate Batch service
operations without requiring an account access key. The token is provided
via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations
that the task can carry out using the token depend on the settings. For
example, a task can request job permissions in order to add other tasks to
the job, or check the status of the job or of other tasks under the job.
:type authentication_token_settings: :class:`AuthenticationTokenSettings
<azure.batch.models.AuthenticationTokenSettings>`
:param output_files: A list of output file references to up persisted once
the task has completed.
:type output_files: list of :class:`OutputFile
<azure.batch_extensions.models.OutputFile>`
:param package_references: A list of packages to be installed on the compute
nodes. Must be of a Package Manager type in accordance with the selected
operating system.
:type package_references: list of :class:`PackageReferenceBase
<azure.batch_extensions.models.PackageReferenceBase>`
"""
_validation = {
'command_line': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'command_line': {'key': 'commandLine', 'type': 'str'},
'exit_conditions': {'key': 'exitConditions', 'type': 'ExitConditions'},
'resource_files': {'key': 'resourceFiles', 'type': '[ExtendedResourceFile]'},
'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'},
'affinity_info': {'key': 'affinityInfo', 'type': 'AffinityInformation'},
'constraints': {'key': 'constraints', 'type': 'TaskConstraints'},
'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'},
'depends_on': {'key': 'dependsOn', 'type': 'TaskDependencies'},
'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'},
'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'},
'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'},
'package_references': {'key': 'packageReferences', 'type': '[PackageReferenceBase]'},
}
def __init__(self, command_line, id=None, display_name=None, exit_conditions=None, resource_files=None, environment_settings=None,
affinity_info=None, constraints=None, user_identity=None, depends_on=None, application_package_references=None,
authentication_token_settings=None, output_files=None, package_references=None):
self.id = id
self.display_name = display_name
self.command_line = command_line
self.exit_conditions = exit_conditions
self.resource_files = resource_files
self.environment_settings = environment_settings
self.affinity_info = affinity_info
self.constraints = constraints
self.user_identity = user_identity
self.depends_on = depends_on
self.application_package_references = application_package_references
self.authentication_token_settings = authentication_token_settings
self.output_files = output_files
self.package_references = package_references

Просмотреть файл

@ -1,51 +0,0 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class OutputFile(Model):
"""A specification for uploading files from an Azure Batch node to another
location after the Batch service has finished executing the task process.
:param file_pattern: A pattern indicating which file(s) to upload. Both
relative and absolute paths are supported. Relative paths are relative to
the task working directory. For wildcards, use * to match any character
and ** to match any directory. For example, **\\*.txt matches any file
ending in .txt in the task working directory or any subdirectory. Note
that \\ and / are treated interchangeably and mapped to the correct
directory separator on the compute node operating system.
:type file_pattern: str
:param destination: The destination for the output file(s).
:type destination: :class:`ExtendedOutputFileDestination
<azure.batch_extensions.models.ExtendedOutputFileDestination>`
:param upload_options: Additional options for the upload operation,
including under what conditions to perform the upload.
:type upload_options: :class:`OutputFileUploadOptions
<azure.batch.models.OutputFileUploadOptions>`
"""
_validation = {
'file_pattern': {'required': True},
'destination': {'required': True},
'upload_options': {'required': True},
}
_attribute_map = {
'file_pattern': {'key': 'filePattern', 'type': 'str'},
'destination': {'key': 'destination', 'type': 'ExtendedOutputFileDestination'},
'upload_options': {'key': 'uploadOptions', 'type': 'OutputFileUploadOptions'},
}
def __init__(self, file_pattern, destination, upload_options):
self.file_pattern = file_pattern
self.destination = destination
self.upload_options = upload_options

Просмотреть файл

@ -1,34 +0,0 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class OutputFileAutoStorageDestination(Model):
"""An speficition of output files upload destination that uses an
auto-storage file group.
:param str file_group: The name of the file group that the output files will
be uploaded to.
:param str path: The destination path within the file group that the files will
be uploaded to. Is the output file specification refers to a single file, this will
be treated as a file name. If the output file specification refers to potentially
multiple files, this will be treated as a subfolder.
"""
_validation = {
'file_group': {'required': True}
}
_attribute_map = {
'file_group': {'key': 'fileGroup', 'type': 'str'},
'path': {'key': 'path', 'type': 'str'},
}
def __init__(self, file_group, path=None):
self.file_group = file_group
self.path = path

Просмотреть файл

@ -1,40 +0,0 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PackageReferenceBase(Model):
"""A reference to a package to be installed on the compute nodes using
a package manager.
:param str id: The name of the package.
:param str version: The version of the package to be installed. If omitted,
the latest version (according to the package repository) will be installed.
"""
_validation = {
'type': {'required': True},
'id': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
}
_subtype_map = {
'type': {'aptPackage': 'AptPackageReference',
'chocolateyPackage': 'ChocolateyPackageReference',
'yumPackage': 'YumPackageReference'}
}
def __init__(self, id, version=None):
self.type = None
self.id = id
self.version = version

Просмотреть файл

@ -1,46 +0,0 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ParameterSet(Model):
"""A set of parametric sweep range range parameters.
:param int start: The starting value of the sweep.
:param int end: The ending value of the sweep (inclusive).
:param int step: The incremental step value, default is 1. The step value
can be negative (i.e. a decending sweep), but only id the start value is
a higher value than the end.
"""
_validation = {
'start': {'required': True},
'end': {'required': True},
}
_attribute_map = {
'start': {'key': 'start', 'type': 'int'},
'end': {'key': 'end', 'type': 'int'},
'step': {'key': 'step', 'type': 'int'},
}
def __init__(self, start, end, step=1):
try:
self.start = int(start)
self.end = int(end)
self.step = int(step)
except (TypeError, ValueError):
raise ValueError("'start', 'end' and 'step' parameters must be integers.")
if step == 0:
raise ValueError("'step' parameter cannot be 0.")
elif start > end and step > 0:
raise ValueError(
"'step' must be a negative number when 'start' is greater than 'end'")
elif start < end and step < 0:
raise ValueError(
"'step' must be a positive number when 'end' is greater than 'start'")

Просмотреть файл

@ -1,45 +0,0 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from .task_factory_base import TaskFactoryBase
class ParametricSweepTaskFactory(TaskFactoryBase):
"""A Task Factory for generating a set of tasks based on one or more parameter
sets to define a numeric input range. Each parameter set will have a start, end
and step value. A task will be generated for each integer in this range. Multiple
parameter sets can be combined for a multi-dimensional sweep.
:param parameter_sets: A list if parameter sets from which tasks will be generated.
:type parameter_sets: A list of :class:`ParameterSet<azure.batch_extensions.models.ParameterSet>`
:param repeat_task: The task template the will be used to generate each task.
:type repeat_task: :class:`RepeatTask <azure.batch_extensions.models.RepeatTask>`
:param merge_task: An optional additional task to be run after all the other
generated tasks have completed successfully.
:type merge_task: :class:`MergeTask <azure.batch_extensions.models.MergeTask>`
"""
_validation = {
'type': {'required': True},
'parameter_sets': {'required': True, 'min_items': 1},
'repeat_task': {'required': True}
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'parameter_sets': {'key': 'parameterSets', 'type': '[ParameterSet]'},
'repeat_task': {'key': 'repeatTask', 'type': 'RepeatTask'},
'merge_task': {'key': 'mergeTask', 'type': 'MergeTask'}
}
def __init__(self, parameter_sets, repeat_task, merge_task=None):
super(ParametricSweepTaskFactory, self).__init__(merge_task)
if not parameter_sets:
raise ValueError("Parametric Sweep task factory requires at least one parameter set.")
self.parameter_sets = parameter_sets
self.repeat_task = repeat_task
self.type = 'parametricSweep'

Просмотреть файл

@ -1,120 +0,0 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class RepeatTask(Model):
"""An Azure Batch task template to repeat.
:param display_name: A display name for the task. The display name need
not be unique and can contain any Unicode characters up to a maximum
length of 1024.
:type display_name: str
:param command_line: The command line of the task. For multi-instance
tasks, the command line is executed as the primary task, after the primary
task and all subtasks have finished executing the coordination command
line. The command line does not run under a shell, and therefore cannot
take advantage of shell features such as environment variable expansion.
If you want to take advantage of such features, you should invoke the
shell in the command line, for example using "cmd /c MyCommand" in Windows
or "/bin/sh -c MyCommand" in Linux.
:type command_line: str
:param exit_conditions: How the Batch service should respond when the task
completes.
:type exit_conditions: :class:`ExitConditions
<azure.batch.models.ExitConditions>`
:param resource_files: A list of files that the Batch service will
download to the compute node before running the command line. For
multi-instance tasks, the resource files will only be downloaded to the
compute node on which the primary task is executed.
:type resource_files: list of :class:`ExtendedResourceFile
<azure.batch_extensions.models.ExtendedResourceFile>`
:param environment_settings: A list of environment variable settings for
the task.
:type environment_settings: list of :class:`EnvironmentSetting
<azure.batch.models.EnvironmentSetting>`
:param affinity_info: A locality hint that can be used by the Batch
service to select a compute node on which to start the new task.
:type affinity_info: :class:`AffinityInformation
<azure.batch.models.AffinityInformation>`
:param constraints: The execution constraints that apply to this task. If
you do not specify constraints, the maxTaskRetryCount is the
maxTaskRetryCount specified for the job, and the maxWallClockTime and
retentionTime are infinite.
:type constraints: :class:`TaskConstraints
<azure.batch.models.TaskConstraints>`
:param user_identity: The user identity under which the task runs. If
omitted, the task runs as a non-administrative user unique to the task.
:type user_identity: :class:`UserIdentity
<azure.batch.models.UserIdentity>`
:param application_package_references: A list of application packages that
the Batch service will deploy to the compute node before running the
command line.
:type application_package_references: list of
:class:`ApplicationPackageReference
<azure.batch.models.ApplicationPackageReference>`
:param authentication_token_settings: The settings for an authentication
token that the task can use to perform Batch service operations. If this
property is set, the Batch service provides the task with an
authentication token which can be used to authenticate Batch service
operations without requiring an account access key. The token is provided
via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations
that the task can carry out using the token depend on the settings. For
example, a task can request job permissions in order to add other tasks to
the job, or check the status of the job or of other tasks under the job.
:type authentication_token_settings: :class:`AuthenticationTokenSettings
<azure.batch.models.AuthenticationTokenSettings>`
:param output_files: A list of output file references to up persisted once
the task has completed.
:type output_files: list of :class:`OutputFile
<azure.batch_extensions.models.OutputFile>`
:param package_references: A list of packages to be installed on the compute
nodes. Must be of a Package Manager type in accordance with the selected
operating system.
:type package_references: list of :class:`PackageReferenceBase
<azure.batch_extensions.models.PackageReferenceBase>`
"""
_validation = {
'command_line': {'required': True},
}
_attribute_map = {
'display_name': {'key': 'displayName', 'type': 'str'},
'command_line': {'key': 'commandLine', 'type': 'str'},
'exit_conditions': {'key': 'exitConditions', 'type': 'ExitConditions'},
'resource_files': {'key': 'resourceFiles', 'type': '[ExtendedResourceFile]'},
'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'},
'affinity_info': {'key': 'affinityInfo', 'type': 'AffinityInformation'},
'constraints': {'key': 'constraints', 'type': 'TaskConstraints'},
'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'},
'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'},
'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'},
'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'},
'package_references': {'key': 'packageReferences', 'type': '[PackageReferenceBase]'}
}
def __init__(self, command_line, display_name=None, exit_conditions=None, resource_files=None, environment_settings=None,
affinity_info=None, constraints=None, user_identity=None, application_package_references=None,
authentication_token_settings=None, output_files=None, package_references=None):
self.display_name = display_name
self.command_line = command_line
self.exit_conditions = exit_conditions
self.resource_files = resource_files
self.environment_settings = environment_settings
self.affinity_info = affinity_info
self.constraints = constraints
self.user_identity = user_identity
self.application_package_references = application_package_references
self.authentication_token_settings = authentication_token_settings
self.output_files = output_files
self.package_references = package_references

Просмотреть файл

@ -1,77 +0,0 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class StartTask(Model):
"""A task which is run when a compute node joins a pool in the Azure Batch
service, or when the compute node is rebooted or reimaged.
:param command_line: The command line of the start task. The command line
does not run under a shell, and therefore cannot take advantage of shell
features such as environment variable expansion. If you want to take
advantage of such features, you should invoke the shell in the command
line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c
MyCommand" in Linux.
:type command_line: str
:param resource_files: A list of files that the Batch service will
download to the compute node before running the command line.
:type resource_files: list of :class:`ExtendedResourceFile
<azure.batch_extensions.models.ExtendedResourceFile>`
:param environment_settings: A list of environment variable settings for
the start task.
:type environment_settings: list of :class:`EnvironmentSetting
<azure.batch.models.EnvironmentSetting>`
:param user_identity: The user identity under which the start task runs.
If omitted, the task runs as a non-administrative user unique to the task.
:type user_identity: :class:`UserIdentity
<azure.batch.models.UserIdentity>`
:param max_task_retry_count: The maximum number of times the task may be
retried. The Batch service retries a task if its exit code is nonzero.
Note that this value specifically controls the number of retries. The
Batch service will try the task once, and may then retry up to this limit.
For example, if the maximum retry count is 3, Batch tries the task up to 4
times (one initial try and 3 retries). If the maximum retry count is 0,
the Batch service does not retry the task. If the maximum retry count is
-1, the Batch service retries the task without limit.
:type max_task_retry_count: int
:param wait_for_success: Whether the Batch service should wait for the
start task to complete successfully (that is, to exit with exit code 0)
before scheduling any tasks on the compute node. If true and the start
task fails on a compute node, the Batch service retries the start task up
to its maximum retry count (maxTaskRetryCount). If the task has still not
completed successfully after all retries, then the Batch service marks the
compute node unusable, and will not schedule tasks to it. This condition
can be detected via the node state and scheduling error detail. If false,
the Batch service will not wait for the start task to complete. In this
case, other tasks can start executing on the compute node while the start
task is still running; and even if the start task fails, new tasks will
continue to be scheduled on the node. The default is false.
:type wait_for_success: bool
"""
_validation = {
'command_line': {'required': True},
}
_attribute_map = {
'command_line': {'key': 'commandLine', 'type': 'str'},
'resource_files': {'key': 'resourceFiles', 'type': '[ExtendedResourceFile]'},
'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'},
'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'},
'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'},
'wait_for_success': {'key': 'waitForSuccess', 'type': 'bool'},
}
def __init__(self, command_line, resource_files=None, environment_settings=None, user_identity=None, max_task_retry_count=None, wait_for_success=None):
self.command_line = command_line
self.resource_files = resource_files
self.environment_settings = environment_settings
self.user_identity = user_identity
self.max_task_retry_count = max_task_retry_count
self.wait_for_success = wait_for_success

Просмотреть файл

@ -1,33 +0,0 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from .task_factory_base import TaskFactoryBase
class TaskCollectionTaskFactory(TaskFactoryBase):
"""A Task Factory for adding a predefined collection of tasks automatically
to a job on submission.
:param tasks: A list if task parameters, each of which will be added straight to the job.
:type tasks: A list of :class:`ExtendedTaskParameter
<azure.batch_extensions.models.ExtendedTaskParameter>`
"""
_validation = {
'type': {'required': True},
'tasks': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'tasks': {'key': 'tasks', 'type': '[ExtendedTaskParameter]'},
}
def __init__(self, tasks):
super(TaskCollectionTaskFactory, self).__init__()
self.tasks = tasks
self.type = 'taskCollection'

Просмотреть файл

@ -1,37 +0,0 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TaskFactoryBase(Model):
"""A Task Factory for automatically adding a collection of tasks to a job on
submission.
:param merge_task: An optional additional task to be run after all the other
generated tasks have completed successfully.
:type merge_task: :class:`MergeTask <azure.batch_extensions.models.MergeTask>`
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'merge_task': {'key': 'mergeTask', 'type': 'MergeTask'}
}
_subtype_map = {
'type': {'parametricSweep': 'ParametricSweepTaskFactory',
'taskPerFile': 'FileCollectionTaskFactory',
'taskCollection': 'TaskCollectionTaskFactory'}
}
def __init__(self, merge_task=None):
self.merge_task = merge_task
self.type = None

Просмотреть файл

@ -1,37 +0,0 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from .package_reference_base import PackageReferenceBase
class YumPackageReference(PackageReferenceBase):
"""A reference to a package to be installed using the YUM package
manager on a Linux node.
:param str id: The name of the package.
:param str version: The version of the package to be installed. If omitted,
the latest version (according to the package repository) will be installed.
:param bool disable_excludes: Whether to allow packages that might otherwise
be excluded by VM configuration (e.g. kernel packages). Default is False.
"""
_validation = {
'type': {'required': True},
'id': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'disable_excludes': {'key': 'disableExcludes', 'type': 'bool'}
}
def __init__(self, id, version=None, disable_excludes=None):
super(YumPackageReference, self).__init__(id=id, version=version)
self.disable_excludes = disable_excludes
self.type = 'yumPackage'

Просмотреть файл

@ -1,20 +0,0 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .pool_operations import ExtendedPoolOperations
from .job_operations import ExtendedJobOperations
from .file_operations import ExtendedFileOperations
__all__ = [
'PoolOperations',
'JobOperations',
'FileOperations',
]

Просмотреть файл

@ -1,140 +0,0 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
import uuid
import os
from .. import models
from .. import _file_utils as file_utils
from azure.batch.operations.file_operations import FileOperations
class ExtendedFileOperations(FileOperations):
"""FileOperations operations.
:param parent: The parent BatchExtensionsClient object.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:param get_storage_account: A callable to retrieve a storage client object.
"""
def __init__(self, parent, client, config, serializer, deserializer, get_storage_account):
super(ExtendedFileOperations, self).__init__(client, config, serializer, deserializer)
self._parent = parent
self.get_storage_client = get_storage_account
def generate_sas_url(self, file_group, file_name, remote_path=None):
"""Generate a SAS URL for a specific file in an exiting file group.
:param str file_group: The file group into the file was uploaded.
:param str file_name: The name of the file to generate the URL for.
:param str remote_path: The subfoder in the file group under which the file
was uploaded.
:returns: The URL (str).
"""
container = file_utils._get_container_name(file_group)
storage_client = self.get_storage_client()
if remote_path:
# Add any specified virtual directories
blob_prefix = remote_path.strip('\\/')
file_name = '{}/{}'.format(blob_prefix, file_name.strip('\\/'))
try:
blob = storage_client.get_blob_properties(container, file_name)
except Exception as exp: # TODO: Catch specific error.
raise ValueError("Unable to locate blob '{}' in container '{}'. Error: {}".format(
file_name, container, exp))
else:
return file_utils._generate_blob_sas_token(blob, container, storage_client)
def upload(self, local_path, file_group, remote_path=None, flatten=None, progress_callback=None):
"""Upload local file or directory of files to storage
:param str local_path: The full path to the local file or directory or files.
Also supports * and ** notation.
:param str file_group: The name of the file group under which to upload the files.
:param str remote_path: A subfolder path to upload the files to.
:param bool flatten: Whether to flatten the local directory structure when uploading.
The default is False, where the local directory strucutre will be maintained.
:param func progress_callback: A callback function to monitor progress of an individual
file upload. Must take two parameters, the data uploaded so far (int) and the total
data to be uploaded (int), both in bytes.
"""
path, files = file_utils.resolve_file_paths(local_path)
if len(files) > 0:
for f in files: # TODO: Threaded pool.
file_name = os.path.relpath(f, path)
file_utils.upload_blob(f, file_group, file_name, self.get_storage_client(),
remote_path=remote_path, flatten=flatten, progress_callback=progress_callback)
else:
raise ValueError('No files or directories found matching local path {}'.format(local_path))
def download(self, local_path, file_group, remote_path=None,
overwrite=False, progress_callback=None):
"""Download the contents of a file group, optionally relative to a subfolder.
:param str local_path: The directory into which the files will be downloaded. If
the files have a remote folder structure, this will be maintained relative to this
directory.
:param str file_group: The file group from which to download files.
:param str remote_path: The subfolder from which to download files.
:param bool overwrite: Whether to overwrite files if the already exist at the local
path specified.
:param func progress_callback: A function to monitor progress of the download of an
individual file. Must take two parameters, the data so far retrieved (int) and the
total data to be retrieved (int) both in bytes.
"""
storage_client = self.get_storage_client()
if remote_path and not remote_path.endswith('/'):
remote_path += '/'
files = file_utils.resolve_remote_paths(storage_client, file_group, remote_path)
if len(files) > 0:
for f in files:
file_name = os.path.realpath(\
os.path.join(local_path, f.name[len(remote_path):] if remote_path else f.name))
if not os.path.exists(file_name) or overwrite:
if not os.path.exists(os.path.dirname(file_name)):
try:
os.makedirs(os.path.dirname(file_name))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
file_utils.download_blob(f.name, file_group, file_name,
storage_client, progress_callback)
else:
raise ValueError('No files found in file group {} matching remote path {}'.format(
file_group, remote_path))
def list_groups(self, num_results=None, include_metadata=False):
"""List the file group names in the storage account."""
storage_client = self.get_storage_client()
prefix = file_utils.FileUtils.GROUP_PREFIX
return storage_client.list_containers(prefix=prefix, num_results=num_results, include_metadata=include_metadata)
def list_from_group(self, file_group, remote_path=None, num_results=None):
"""List the files in the file group."""
storage_client = self.get_storage_client()
container = file_utils._get_container_name(file_group)
return storage_client.list_blobs(container, prefix=remote_path, num_results=num_results)
def delete_group(self, file_group):
"""Attempt to delete the file group and all of it's contents.
Will do nothing if the group does not exist.
"""
storage_client = self.get_storage_client()
container = file_utils._get_container_name(file_group)
return storage_client.delete_container(container, fail_not_exist=False)
def delete_from_group(self, file_group, remote_path):
"""Delete one of more files from within a group."""
storage_client = self.get_storage_client()
container = file_utils._get_container_name(file_group)
blobs_to_delete = self.list_from_group(file_group)

Просмотреть файл

@ -1,184 +0,0 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
import uuid
import json
from msrest.exceptions import DeserializationError
from .. import models
from .. import _template_utils as templates
from .. import _job_utils as job_utils
from .. import _pool_utils as pool_utils
from .._file_utils import FileUtils
from azure.batch.operations.job_operations import JobOperations
class ExtendedJobOperations(JobOperations):
"""JobOperations operations.
:param parent: The parent BatchExtensionsClient object.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:param get_storage_account: A callable to retrieve a storage client object.
"""
def __init__(self, parent, client, config, serializer, deserializer, get_storage_account):
super(ExtendedJobOperations, self).__init__(client, config, serializer, deserializer)
self._parent = parent
self.get_storage_client = get_storage_account
def _load_template_file(self, json_file):
"""Load the contents of a JSON file as a dict.
:param str json_file: The path to the JSON file or a
file-like object.
"""
try:
try:
template_json = json.load(json_file)
except AttributeError: # Not a readable source.
with open(json_file, 'r') as template:
template_json = json.load(template)
except (EnvironmentError, ValueError) as error:
raise ValueError("Invalid JSON file: {}".format(error))
else:
return template_json
def expand_template(self, template, parameters=None):
"""Expand a JSON template, substituting in optional parameters.
:param template: The template data. Can either be a dictionary,
or a path to a JSON-formatted file, or a file-like readable object.
:param parameters: The values of parameters to be substituted into
the template. Can either be a dictionary, a path to a JSON-formatted file,
or a file-like readable object.
:returns: The pool specification JSON dictionary.
"""
if not isinstance(template, dict):
template = self._load_template_file(template)
if parameters and not isinstance(parameters, dict):
parameters = self._load_template_file(parameters)
elif not parameters:
parameters = {}
expanded_job_object = templates.expand_template(template, parameters)
try:
return expanded_job_object['job']['properties']
except KeyError as err:
raise ValueError("Template missing required element: {}".format(
err.args[0]))
def jobparameter_from_json(self, json_data):
"""Create an ExtendedJobParameter object from a JSON specification.
:param dict json_data: The JSON specification of an AddJobParameter or an
ExtendedJobParameter.
"""
try:
job = self._deserialize('ExtendedJobParameter', json_data)
if job is None:
raise ValueError("JSON file is not in correct format.")
return job
except Exception as exp:
raise ValueError("Unable to deserialize to ExtendedJobParameter: {}".format(exp))
def add(self, job, job_add_options=None, custom_headers=None, raw=False, threads=None, **operation_config):
"""Adds a job to the specified account.
The Batch service supports two ways to control the work done as part of
a job. In the first approach, the user specifies a Job Manager task.
The Batch service launches this task when it is ready to start the job.
The Job Manager task controls all other tasks that run under this job,
by using the Task APIs. In the second approach, the user directly
controls the execution of tasks under an active job, by using the Task
APIs. Also note: when naming jobs, avoid including sensitive
information such as user names or secret project names. This
information may appear in telemetry logs accessible to Microsoft
Support engineers.
:param job: The job to be added.
:type job: :class:`JobAddParameter<azure.batch.models.JobAddParameter>` or
:class:`ExtendedJobParameter<azure.batch_extensions.models.ExtendedJobParameter>`
:param job_add_options: Additional parameters for the operation
:type job_add_options: :class:`JobAddOptions
<azure.batch.models.JobAddOptions>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`BatchErrorException<azure.batch.models.BatchErrorException>`
"""
# Process an application template reference.
if hasattr(job, 'application_template_info') and job.application_template_info:
try:
templates.expand_application_template(job, self._deserialize)
except DeserializationError as error:
raise ValueError("Failed to load application template from '{}': {}".
format(job.application_template_info.file_path, error))
# Process a task factory.
auto_complete = False
task_collection = []
file_utils = FileUtils(self.get_storage_client)
if hasattr(job, 'task_factory') and job.task_factory:
task_collection = templates.expand_task_factory(job, file_utils)
# If job has a task factory and terminate job on all tasks complete is set, the job will
# already be terminated when we add the tasks, so we need to set to noAction, then patch
# the job once the tasks have been submitted.
if job.on_all_tasks_complete and job.on_all_tasks_complete != 'noAction':
auto_complete = job.on_all_tasks_complete
job.on_all_tasks_complete = 'noaction'
should_get_pool = templates.should_get_pool(job, task_collection)
pool_os_flavor = None
if should_get_pool:
pool = job_utils.get_target_pool(self._parent.pool, job)
pool_os_flavor = pool_utils.get_pool_target_os_type(pool)
# Handle package management on autopool
if job.pool_info.auto_pool_specification \
and job.pool_info.auto_pool_specification.pool \
and job.pool_info.auto_pool_specification.pool.package_references:
pool = job.pool_info.auto_pool_specification.pool
cmds = [templates.process_pool_package_references(pool)]
pool_os_flavor = pool_utils.get_pool_target_os_type(pool)
pool.start_task = models.StartTask(
**templates.construct_setup_task(pool.start_task, cmds, pool_os_flavor))
commands = []
# Handle package management on tasks.
commands.append(templates.process_task_package_references(
task_collection, pool_os_flavor))
job.job_preparation_task = models.JobPreparationTask(
**templates.construct_setup_task(job.job_preparation_task,
commands, pool_os_flavor))
# Handle any extended resource file references.
templates.post_processing(job, file_utils, pool_os_flavor)
if task_collection:
templates.post_processing(task_collection, file_utils, pool_os_flavor)
templates.process_job_for_output_files(job, task_collection, pool_os_flavor, file_utils)
# Begin original job add process
result = super(ExtendedJobOperations, self).add(job, job_add_options, custom_headers, raw, **operation_config)
if task_collection:
job_utils.deploy_tasks(self._parent.task, job.id, task_collection, threads)
if auto_complete:
# If the option to terminate the job was set, we need to reapply it with a patch
# now that the tasks have been added.
self.patch(job.id, {'on_all_tasks_complete': auto_complete})
return result

Просмотреть файл

@ -1,128 +0,0 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
import uuid
import json
from .. import models
from .. import _file_utils as file_utils
from .. import _pool_utils as pool_utils
from .. import _template_utils as templates
from azure.batch.operations.pool_operations import PoolOperations
class ExtendedPoolOperations(PoolOperations):
"""PoolOperations operations.
:param parent: The parent BatchExtensionsClient object.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:param get_storage_account: A callable to retrieve a storage client object.
"""
def __init__(self, parent, client, config, serializer, deserializer, get_storage_account):
super(ExtendedPoolOperations, self).__init__(client, config, serializer, deserializer)
self._parent = parent
self.get_storage_client = get_storage_account
def _load_template_file(self, json_file):
"""Load the contents of a JSON file as a dict.
:param str json_file: The path to the JSON file or a
file-like object.
"""
try:
try:
template_json = json.load(json_file)
except AttributeError: # Not a readable source.
with open(json_file, 'r') as template:
template_json = json.load(template)
except (EnvironmentError, ValueError) as error:
raise ValueError("Invalid JSON file: {}".format(error))
else:
return template_json
def expand_template(self, template, parameters=None):
"""Expand a JSON template, substituting in optional parameters.
:param template: The template data. Can either be a dictionary,
or a path to a JSON-formatted file, or a file-like readable object.
:param parameters: The values of parameters to be substituted into
the template. Can either be a dictionary, a path to a JSON-formatted file,
or a file-like readable object.
:returns: The pool specification JSON dictionary.
"""
if not isinstance(template, dict):
template = self._load_template_file(template)
if parameters and not isinstance(parameters, dict):
parameters = self._load_template_file(parameters)
elif not parameters:
parameters = {}
expanded_job_object = templates.expand_template(template, parameters)
if 'pool' not in expanded_job_object:
raise ValueError('Missing pool element in the template.')
if 'properties' not in expanded_job_object['pool']:
raise ValueError('Missing pool properties element in the template.')
# bulid up the jsonFile object to hand to the batch service.
return expanded_job_object['pool']['properties']
def poolparameter_from_json(self, json_data):
"""Create an ExtendedPoolParameter object from a JSON specification.
:param dict json_data: The JSON specification of an AddPoolParameter or an
ExtendedPoolParameter.
"""
try:
pool = self._deserialize('ExtendedPoolParameter', json_data)
if pool is None:
raise ValueError("JSON data is not in correct format.")
return pool
except Exception as exp:
raise ValueError("Unable to deserialize to ExtendedPoolParameter: {}".format(exp))
def add(
self, pool, pool_add_options=None, custom_headers=None, raw=False, **operation_config):
"""Adds a pool to the specified account.
When naming pools, avoid including sensitive information such as user
names or secret project names. This information may appear in telemetry
logs accessible to Microsoft Support engineers.
:param pool: The pool to be added.
:type pool: :class:`PoolAddParameter<azure.batch.models.PoolAddParameter>` or
:class:`ExtendedPoolParameter<azure.batch_extensions.models.ExtendedPoolParameter>`
:param pool_add_options: Additional parameters for the operation
:type pool_add_options: :class:`PoolAddOptions
<azure.batch.models.PoolAddOptions>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`BatchErrorException<azure.batch.models.BatchErrorException>`
"""
pool_os_flavor=None
# Handle package manangement
if hasattr(pool, 'package_references') and pool.package_references:
pool_os_flavor = pool_utils.get_pool_target_os_type(pool)
cmds = [templates.process_pool_package_references(pool)]
# Update the start task command
pool.start_task = models.StartTask(**templates.construct_setup_task(
pool.start_task, cmds, pool_os_flavor))
# Handle any extended resource file references.
fileutils = file_utils.FileUtils(self.get_storage_client)
templates.post_processing(pool, fileutils, pool_os_flavor)
return super(ExtendedPoolOperations, self).add(pool, pool_add_options, custom_headers, raw, **operation_config)

Просмотреть файл

@ -1,13 +0,0 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
VERSION = "0.0.1"

Просмотреть файл

@ -13,8 +13,8 @@ from ui_config import ConfigUI
from api import MayaAPI as maya
import azure.storage.blob as storage
import batch_extensions as batch
from batch_extensions.batch_auth import SharedKeyCredentials
import azure.batch_extensions as batch
from azure.batch.batch_auth import SharedKeyCredentials
LOG_LEVELS = {
@ -156,6 +156,8 @@ class AzureBatchConfig(object):
self.ui.threads = self._cfg.getint('AzureBatch', 'threads')
except ConfigParser.NoOptionError:
self.ui.threads = 20
finally:
self._client.threads = self.ui.threads
self.ui.set_authenticate(self._auth)
def _auto_authentication(self):
@ -190,6 +192,7 @@ class AzureBatchConfig(object):
:param int threads: The specified number of threads.
"""
self._cfg.set('AzureBatch', 'threads', threads)
self._client.threads = threads
self._save_config()
def save_changes(self):
@ -219,7 +222,7 @@ class AzureBatchConfig(object):
def get_threads(self):
"""Attempt to retrieve number of threads configured for the plugin."""
return self.ui.threads
return self._client.threads
def get_cached_vm_sku(self):
"""Attempt to retrieve a selected VM SKU from a previous session."""

Просмотреть файл

@ -14,7 +14,7 @@ from ui_environment import EnvironmentUI
MAYA_IMAGES = {
'Window 2016':
'Windows 2016':
{
'node_sku_id': 'batch.node.windows amd64',
'publisher': 'batch',
@ -30,7 +30,7 @@ MAYA_IMAGES = {
'sku': 'rendering',
'version': 'latest'
},
'Window 2016 (Preview)':
'Windows 2016 (Preview)':
{
'node_sku_id': 'batch.node.windows amd64',
'publisher': 'batch',
@ -152,10 +152,12 @@ class AzureBatchEnvironment(object):
return utils.OperatingSystem.linux
else:
raise ValueError('Selected pool is not using a valid Maya image.')
if utils.OperatingSystem.windows.value in self.ui.get_image():
image = self.ui.get_image()
if utils.OperatingSystem.windows.value in image:
self._log.debug("Detected windows: {}".format(image))
return utils.OperatingSystem.windows
else:
self._log.debug("Detected centos: {}".format(image))
return utils.OperatingSystem.linux
def get_environment_settings(self):

Просмотреть файл

@ -13,7 +13,7 @@ import json
import uuid
import traceback
from batch_extensions import models
from azure.batch_extensions import models
from api import MayaAPI as maya
from api import MayaCallbacks as callback
@ -37,7 +37,6 @@ class AzureBatchSubmission(object):
self._log = logging.getLogger('AzureBatchMaya')
self._call = call
self._tab_index = index
self._submit_threads = None
self.max_pool_size = 1000
self.ui = SubmissionUI(self, frame)
@ -204,7 +203,6 @@ class AzureBatchSubmission(object):
self.pool_manager = pools
self.env_manager = env
self.data_path = session.path
self._submit_threads = session.get_threads
if self.renderer:
self.renderer.delete()
self._configure_renderer()
@ -269,6 +267,7 @@ class AzureBatchSubmission(object):
application_params['assetScript'] = map_url
application_params['thumbScript'] = thumb_url
application_params['workspace'] = workspace_url
application_params['storageURL'] = self.asset_manager.generate_sas_token(file_group)
self._switch_tab()
self.ui.submit_status("Configuring job...")
@ -287,9 +286,8 @@ class AzureBatchSubmission(object):
progress.is_cancelled()
self.ui.submit_status("Submitting...")
progress.status("Submitting...")
threads = self._submit_threads()
self._log.debug("Submitting using {} threads.".format(threads))
self._call(self.batch.job.add, new_job, threads=threads)
self._log.debug("Submitting using {} threads.".format(self.batch.threads))
self._call(self.batch.job.add, new_job)
maya.info("Job submitted successfully")
if watch_job:

Просмотреть файл

@ -14,7 +14,6 @@ import threading
batch_client = None
storage_client = None
concurrent_downloads = None
header_line_length = 50
@ -39,29 +38,25 @@ def _check_valid_dir(directory):
raise RuntimeError(exp)
def _download_output(container, blob_name, output_path, size):
print("Downloading task output: {}".format(blob_name))
storage_client.get_blob_to_path(container, blob_name, output_path)
print("Output {} download successful".format(blob_name))
def _download_output(job_id, output_name, output_path, size):
print("Downloading task output: {}".format(output_name))
batch_client.file.download(output_path, job_id, remote_path=output_name)
print("Output {} download successful".format(output_name))
def _track_completed_outputs(container, dwnld_dir):
job_outputs = storage_client.list_blobs(container)
def _track_completed_outputs(job_id, dwnld_dir):
job_outputs = batch_client.file.list_from_group(job_id)
downloads = []
for output in job_outputs:
if output.name.startswith('thumbs/'):
if output['name'].startswith('thumbs/'):
continue
else:
output_file = os.path.normpath(os.path.join(dwnld_dir, output.name))
if not os.path.isfile(output_file):
if not os.path.isdir(os.path.dirname(output_file)):
os.makedirs(os.path.dirname(output_file))
downloads.append(
threading.Thread(
target=_download_output,
args=(container, output.name, output_file, output.properties.content_length)))
args=(job_id, output['name'], dwnld_dir, output['size'])))
downloads[-1].start()
if len(downloads) >= concurrent_downloads:
if len(downloads) >= batch_client.threads:
for thread in downloads:
thread.join()
downloads = []
@ -102,12 +97,12 @@ def _check_job_stopped(job):
raise RuntimeError(exp)
def track_job_progress(id, container, dwnld_dir):
def track_job_progress(job_id, dwnld_dir):
from azure.batch.models import TaskState
print("Tracking job with ID: {0}".format(id))
print("Tracking job with ID: {0}".format(job_id))
try:
job = batch_client.job.get(id)
tasks = [t for t in batch_client.task.list(id)]
job = batch_client.job.get(job_id)
tasks = [t for t in batch_client.task.list(job_id)]
while True:
completed_tasks = [t for t in tasks if t.state == TaskState.completed]
errored_tasks = [t for t in completed_tasks if t.execution_info.exit_code != 0]
@ -119,57 +114,58 @@ def track_job_progress(id, container, dwnld_dir):
if errored_tasks:
print(" - Warning: some tasks have failed.")
_track_completed_outputs(container, dwnld_dir)
_track_completed_outputs(job_id, dwnld_dir)
if _check_job_stopped(job):
return # Job complete
time.sleep(10)
job = batch_client.job.get(id)
tasks = [t for t in batch_client.task.list(id)]
job = batch_client.job.get(job_id)
tasks = [t for t in batch_client.task.list(job_id)]
except KeyboardInterrupt:
raise RuntimeError("Monitoring aborted.")
def _authenticate(cfg_path):
global batch_client, storage_client, concurrent_downloads
global batch_client, storage_client
cfg = ConfigParser.ConfigParser()
try:
cfg.read(cfg_path)
credentials = SharedKeyCredentials(
cfg.get("AzureBatch", "batch_account"),
cfg.get("AzureBatch", "batch_key"))
batch_client = batch.BatchServiceClient(
credentials, base_url=cfg.get("AzureBatch", "batch_url"))
storage_client = storage.BlockBlobService(
cfg.get("AzureBatch", "storage_account"),
cfg.get("AzureBatch", "storage_key"),
endpoint_suffix="core.windows.net")
cfg.get("AzureBatch", "storage_key"))
batch_client = batch.BatchExtensionsClient(
credentials, base_url=cfg.get("AzureBatch", "batch_url"),
storage_client=storage_client)
try:
concurrent_downloads = cfg.get("AzureBatch", "threads")
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError) as exp:
concurrent_downloads = 20
batch_client.threads = cfg.get("AzureBatch", "threads")
except ConfigParser.NoOptionError:
batch_client.threads = 20
except (EnvironmentError, ConfigParser.NoOptionError, ConfigParser.NoSectionError) as exp:
raise ValueError("Failed to authenticate using Maya configuration {0}, Exception: {1}".format(cfg_path, exp))
raise ValueError("Failed to authenticate.\n"
"Using Maya configuration file: {}\n"
"Error: {}".format(cfg_path, exp))
if __name__ == "__main__":
try:
sys.path.append(sys.argv[5])
print("Appending path {0}".format(sys.argv[5]))
sys.path.append(sys.argv[4])
print("Appending path {0}".format(sys.argv[4]))
import azure.storage.blob as storage
import azure.batch as batch
import azure.batch_extensions as batch
from azure.batch.batch_auth import SharedKeyCredentials
data_path = sys.argv[1]
job_id = sys.argv[2]
download_dir = sys.argv[3]
container = sys.argv[4]
_check_valid_dir(download_dir)
_authenticate(data_path)
EXIT_STRING = ""
track_job_progress(job_id, container, download_dir)
track_job_progress(job_id, download_dir)
except (RuntimeError, ValueError) as exp:
EXIT_STRING = exp

Просмотреть файл

@ -62,5 +62,6 @@
"poseInterpolator",
"mtoa",
"Substance",
"xgenToolkit"
"xgenToolkit",
"vrayformaya"
]

Просмотреть файл

@ -38,7 +38,7 @@ class EnvironmentUI(object):
with utils.ColumnLayout(
2, col_width=((1,160),(2,160)), row_spacing=(1,5),
row_offset=((1, "top", 15),(4, "bottom", 15))):
row_offset=((1, "top", 15),(5, "bottom", 15))):
maya.text(label="Use Image: ", align='right')
with utils.Dropdown(self.set_image) as image_settings:
self._image = image_settings
@ -49,7 +49,7 @@ class EnvironmentUI(object):
self._sku = sku_settings
for sku in skus:
self._sku.add_item(sku)
maya.text(label="Use license servers: ", align='right')
maya.text(label="Use licenses: ", align='right')
for label, checked in licenses.items():
self.license_settings[label] = maya.check_box(
label=label, value=checked, changeCommand=self.use_license_server)

Просмотреть файл

@ -10,8 +10,6 @@ import platform
import pathlib
from api import MayaAPI as maya
from batch_extensions import _file_utils as file_utils
from exception import CancellationException, FileUploadException
@ -483,7 +481,6 @@ class JobWatcher(object):
self.data_path,
self.job_id,
self.selected_dir,
file_utils._get_container_name(self.job_id),
os.path.join(maya.script_dir(), 'azure-batch-libs')] # TODO: Configure somewhere
self._log.debug("Preparing commandline arguments...")
return self.cleanup_args(args)

Просмотреть файл

@ -73,16 +73,21 @@
"metadata": {
"description": "Arnold logging verbosity"
}
},
"storageURL": {
"type": "string",
"metadata": {
"description": "SAS URL for input file group with read and list permissions"
}
}
},
"jobPreparationTask": {
"userIdentity": {
"autoUser": {
"elevationLevel": "admin"
}
},
"resourceFiles": [
{
"source": {
"fileGroup": "[parameters('projectData')]"
},
"filePath": "assets/"
},
{
"blobSource": "[parameters('assetScript')]",
"filePath": "scripts/renderPrep.mel"
@ -96,7 +101,7 @@
"filePath": "workspace.mel"
}
],
"commandLine": "dir"
"commandLine": "/bin/bash -c \"yum install -y libunwind libicu && curl -sSL -o dotnet.tar.gz https://go.microsoft.com/fwlink/?linkid=848821 && mkdir -p /opt/dotnet && tar zxf dotnet.tar.gz -C /opt/dotnet && ln -nsf /opt/dotnet/dotnet /usr/local/bin && wget -O azcopy.tar.gz https://aka.ms/downloadazcopyprlinux && tar -xf azcopy.tar.gz && ./install.sh && azcopy --source \\\"[parameters('storageURL')]\\\" --destination $AZ_BATCH_JOB_PREP_WORKING_DIR/assets --recursive\""
},
"taskFactory": {
"type": "parametricSweep",

Просмотреть файл

@ -73,16 +73,21 @@
"metadata": {
"description": "Arnold logging verbosity"
}
},
"storageURL": {
"type": "string",
"metadata": {
"description": "SAS URL for input file group with read and list permissions"
}
}
},
"jobPreparationTask": {
"userIdentity": {
"autoUser": {
"elevationLevel": "admin"
}
},
"resourceFiles": [
{
"source": {
"fileGroup": "[parameters('projectData')]"
},
"filePath": "assets\\"
},
{
"blobSource": "[parameters('assetScript')]",
"filePath": "scripts\\renderPrep.mel"
@ -96,7 +101,7 @@
"filePath": "workspace.mel"
}
],
"commandLine": "dir"
"commandLine": "azcopy /source:\"[parameters('storageURL')]\" /dest:\"%AZ_BATCH_JOB_PREP_WORKING_DIR%\\assets\" /S"
},
"taskFactory": {
"type": "parametricSweep",

Просмотреть файл

@ -66,16 +66,21 @@
"metadata": {
"description": "The file group where outputs will be stored"
}
},
"storageURL": {
"type": "string",
"metadata": {
"description": "SAS URL for input file group with read and list permissions"
}
}
},
"jobPreparationTask": {
"userIdentity": {
"autoUser": {
"elevationLevel": "admin"
}
},
"resourceFiles": [
{
"source": {
"fileGroup": "[parameters('projectData')]"
},
"filePath": "assets/"
},
{
"blobSource": "[parameters('assetScript')]",
"filePath": "scripts/renderPrep.mel"
@ -89,7 +94,7 @@
"filePath": "workspace.mel"
}
],
"commandLine": "dir"
"commandLine": "/bin/bash -c \"yum install -y libunwind libicu && curl -sSL -o dotnet.tar.gz https://go.microsoft.com/fwlink/?linkid=848821 && mkdir -p /opt/dotnet && tar zxf dotnet.tar.gz -C /opt/dotnet && ln -nsf /opt/dotnet/dotnet /usr/local/bin && wget -O azcopy.tar.gz https://aka.ms/downloadazcopyprlinux && tar -xf azcopy.tar.gz && ./install.sh && azcopy --source \\\"[parameters('storageURL')]\\\" --destination $AZ_BATCH_JOB_PREP_WORKING_DIR/assets --recursive\""
},
"taskFactory": {
"type": "parametricSweep",

Просмотреть файл

@ -66,16 +66,21 @@
"metadata": {
"description": "The file group where outputs will be stored"
}
},
"storageURL": {
"type": "string",
"metadata": {
"description": "SAS URL for input file group with read and list permissions"
}
}
},
"jobPreparationTask": {
"userIdentity": {
"autoUser": {
"elevationLevel": "admin"
}
},
"resourceFiles": [
{
"source": {
"fileGroup": "[parameters('projectData')]"
},
"filePath": "assets\\"
},
{
"blobSource": "[parameters('assetScript')]",
"filePath": "scripts\\renderPrep.mel"
@ -89,7 +94,7 @@
"filePath": "workspace.mel"
}
],
"commandLine": "dir"
"commandLine": "azcopy /source:\"[parameters('storageURL')]\" /dest:\"%AZ_BATCH_JOB_PREP_WORKING_DIR%\\assets\" /S"
},
"taskFactory": {
"type": "parametricSweep",

Просмотреть файл

@ -0,0 +1,183 @@
{
"templateMetadata": {
"description": "Application template for working with Maya and Arnold on CentOS."
},
"parameters": {
"sceneFile": {
"type": "string",
"metadata": {
"description": "The Maya scene file to be rendered"
}
},
"renderer": {
"type": "string",
"defaultValue": "vray",
"metadata": {
"description": "The Maya renderer to be used for the render"
},
"allowedValues": [
"vray"
]
},
"projectData": {
"type": "string",
"metadata": {
"description": "The file group where the input data is stored"
}
},
"assetScript": {
"type": "string",
"metadata": {
"description": "The SAS URL to a pre-render asset path redirection script"
}
},
"thumbScript": {
"type": "string",
"metadata": {
"description": "The SAS URL to the thumbnail generation script"
}
},
"frameStart": {
"type": "int",
"metadata": {
"description": "Index of the first frame to render"
}
},
"workspace": {
"type": "string",
"metadata": {
"description": "The SAS URL to the project workspace"
}
},
"frameStep": {
"type": "int",
"metadata": {
"description": "Incremental step in frame sequeunce"
}
},
"frameEnd": {
"type": "int",
"metadata": {
"description": "Index of the last frame to render"
}
},
"outputs": {
"type": "string",
"metadata": {
"description": "The file group where outputs will be stored"
}
},
"storageURL": {
"type": "string",
"metadata": {
"description": "SAS URL for input file group with read and list permissions"
}
}
},
"jobPreparationTask": {
"userIdentity": {
"autoUser": {
"elevationLevel": "admin"
}
},
"resourceFiles": [
{
"blobSource": "[parameters('assetScript')]",
"filePath": "scripts/renderPrep.mel"
},
{
"blobSource": "[parameters('thumbScript')]",
"filePath": "thumbnail.py"
},
{
"blobSource": "[parameters('workspace')]",
"filePath": "workspace.mel"
}
],
"commandLine": "/bin/bash -c \"yum install -y libunwind libicu && curl -sSL -o dotnet.tar.gz https://go.microsoft.com/fwlink/?linkid=848821 && mkdir -p /opt/dotnet && tar zxf dotnet.tar.gz -C /opt/dotnet && ln -nsf /opt/dotnet/dotnet /usr/local/bin && wget -O azcopy.tar.gz https://aka.ms/downloadazcopyprlinux && tar -xf azcopy.tar.gz && ./install.sh && azcopy --source \\\"[parameters('storageURL')]\\\" --destination $AZ_BATCH_JOB_PREP_WORKING_DIR/assets --recursive\""
},
"taskFactory": {
"type": "parametricSweep",
"parameterSets": [
{
"start": "[parameters('frameStart')]",
"end": "[parameters('frameEnd')]",
"step": "[parameters('frameStep')]"
}
],
"repeatTask": {
"displayName": "Frame {0}",
"userIdentity": {
"autoUser": {
"scope": "task",
"elevationLevel": "admin"
}
},
"commandLine": "sudo mkdir -m a=rwx -p \"/X\";sudo mount --rbind $AZ_BATCH_JOB_PREP_WORKING_DIR/assets /X;Render -renderer [parameters('renderer')] -proj \"$AZ_BATCH_JOB_PREP_WORKING_DIR\" -verb -rd \"$AZ_BATCH_TASK_WORKING_DIR/images\" -s {0} -e {0} \"[parameters('sceneFile')]\";err=$?;python /mnt/resource/batch/tasks/workitems/[parameters('outputs')]/job-1/jobpreparation/wd/thumbnail.py $err;sudo umount \"/X\";exit $err",
"environmentSettings": [
{
"name": "MAYA_SCRIPT_PATH",
"value": "/mnt/resource/batch/tasks/workitems/[parameters('outputs')]/job-1/jobpreparation/wd/scripts"
},
{
"name": "FLEXLM_TIMEOUT",
"value": "5000000"
},
{
"name": "VRAY_AUTH_CLIENT_FILE_PATH",
"value": "/etc/vray"
}
],
"outputFiles": [
{
"filePattern": "images/**/*",
"destination": {
"autoStorage": {
"fileGroup": "[parameters('outputs')]"
}
},
"uploadOptions": {
"uploadCondition": "taskSuccess"
}
},
{
"filePattern": "thumbs/*.png",
"destination": {
"autoStorage": {
"fileGroup": "[parameters('outputs')]",
"path": "thumbs"
}
},
"uploadOptions": {
"uploadCondition": "taskSuccess"
}
},
{
"filePattern": "../stdout.txt",
"destination": {
"autoStorage": {
"fileGroup": "[parameters('outputs')]",
"path": "logs/frame_{0}.log"
}
},
"uploadOptions": {
"uploadCondition": "taskCompletion"
}
},
{
"filePattern": "../stderr.txt",
"destination": {
"autoStorage": {
"fileGroup": "[parameters('outputs')]",
"path": "logs/frame_{0}_error.log"
}
},
"uploadOptions": {
"uploadCondition": "taskCompletion"
}
}
]
}
},
"onAllTasksComplete": "terminateJob"
}

Просмотреть файл

@ -0,0 +1,173 @@
{
"templateMetadata": {
"description": "Sample application template for working with Blender."
},
"parameters": {
"sceneFile": {
"type": "string",
"metadata": {
"description": "The Maya scene file to be rendered"
}
},
"renderer": {
"type": "string",
"defaultValue": "vray",
"metadata": {
"description": "The Maya renderer to be used for the render"
},
"allowedValues": [
"vray"
]
},
"projectData": {
"type": "string",
"metadata": {
"description": "The file group where the input data is stored"
}
},
"assetScript": {
"type": "string",
"metadata": {
"description": "The SAS URL to a pre-render asset path redirection script"
}
},
"thumbScript": {
"type": "string",
"metadata": {
"description": "The SAS URL to the thumbnail generation script"
}
},
"workspace": {
"type": "string",
"metadata": {
"description": "The SAS URL to the project workspace"
}
},
"frameStart": {
"type": "int",
"metadata": {
"description": "Index of the first frame to render"
}
},
"frameStep": {
"type": "int",
"metadata": {
"description": "Incremental step in frame sequeunce"
}
},
"frameEnd": {
"type": "int",
"metadata": {
"description": "Index of the last frame to render"
}
},
"outputs": {
"type": "string",
"metadata": {
"description": "The file group where outputs will be stored"
}
},
"storageURL": {
"type": "string",
"metadata": {
"description": "SAS URL for input file group with read and list permissions"
}
}
},
"jobPreparationTask": {
"userIdentity": {
"autoUser": {
"elevationLevel": "admin"
}
},
"resourceFiles": [
{
"blobSource": "[parameters('assetScript')]",
"filePath": "scripts\\renderPrep.mel"
},
{
"blobSource": "[parameters('thumbScript')]",
"filePath": "thumbnail.py"
},
{
"blobSource": "[parameters('workspace')]",
"filePath": "workspace.mel"
}
],
"commandLine": "azcopy /source:\"[parameters('storageURL')]\" /dest:\"%AZ_BATCH_JOB_PREP_WORKING_DIR%\\assets\" /S"
},
"taskFactory": {
"type": "parametricSweep",
"parameterSets": [
{
"start": "[parameters('frameStart')]",
"end": "[parameters('frameEnd')]",
"step": "[parameters('frameStep')]"
}
],
"repeatTask": {
"displayName": "Frame {0}",
"commandLine": "subst X: %AZ_BATCH_JOB_PREP_WORKING_DIR%\\assets & render -renderer [parameters('renderer')] -proj \"%AZ_BATCH_JOB_PREP_WORKING_DIR%\" -verb -rd \"%AZ_BATCH_TASK_WORKING_DIR%\\images\" -s {0} -e {0} \"[parameters('sceneFile')]\" & call mayapy %AZ_BATCH_JOB_PREP_WORKING_DIR%\\thumbnail.py %^errorlevel%",
"environmentSettings": [
{
"name": "MAYA_SCRIPT_PATH",
"value": "%AZ_BATCH_JOB_PREP_WORKING_DIR%\\scripts"
},
{
"name": "FLEXLM_TIMEOUT",
"value": "5000000"
}
],
"outputFiles": [
{
"filePattern": "images/**/*",
"destination": {
"autoStorage": {
"fileGroup": "[parameters('outputs')]"
}
},
"uploadOptions": {
"uploadCondition": "taskSuccess"
}
},
{
"filePattern": "thumbs/*.png",
"destination": {
"autoStorage": {
"fileGroup": "[parameters('outputs')]",
"path": "thumbs"
}
},
"uploadOptions": {
"uploadCondition": "taskSuccess"
}
},
{
"filePattern": "../stdout.txt",
"destination": {
"autoStorage": {
"fileGroup": "[parameters('outputs')]",
"path": "logs/frame_{0}.log"
}
},
"uploadOptions": {
"uploadCondition": "taskCompletion"
}
},
{
"filePattern": "../stderr.txt",
"destination": {
"autoStorage": {
"fileGroup": "[parameters('outputs')]",
"path": "logs/frame_{0}_error.log"
}
},
"uploadOptions": {
"uploadCondition": "taskCompletion"
}
}
]
}
},
"onAllTasksComplete": "terminateJob"
}

Просмотреть файл

@ -18,7 +18,7 @@ try:
except ImportError:
import mock
import batch_extensions
from azure import batch_extensions
from ui_assets import AssetsUI
from assets import Asset, Assets, AzureBatchAssets

Просмотреть файл

@ -11,13 +11,13 @@ from mock import patch, Mock
from msrest import Serializer, Deserializer
from azure.storage import CloudStorageAccount
from azure.storage.blob.blockblobservice import BlockBlobService
import batch_extensions as batch
from batch_extensions.batch_auth import SharedKeyCredentials
from batch_extensions import models
from batch_extensions import operations
from batch_extensions import _template_utils as utils
from batch_extensions import _pool_utils as pool_utils
from batch_extensions import _file_utils as file_utils
import azure.batch_extensions as batch
from azure.batch.batch_auth import SharedKeyCredentials
from azure.batch_extensions import models
from azure.batch_extensions import operations
from azure.batch_extensions import _template_utils as utils
from azure.batch_extensions import _pool_utils as pool_utils
from azure.batch_extensions import _file_utils as file_utils
class TestBatchExtensions(unittest.TestCase):

Просмотреть файл

@ -11,9 +11,9 @@ import uuid
from environment import MAYA_IMAGES
import utils
import batch_extensions as batch
from batch_extensions import models
from batch_extensions.batch_auth import SharedKeyCredentials
import azure.batch_extensions as batch
from azure.batch_extensions import models
from azure.batch.batch_auth import SharedKeyCredentials
from azure.storage.blob.blockblobservice import BlockBlobService
STORAGE_ACCOUNT = os.environ['AZURE_STORAGE_ACCOUNT']

Просмотреть файл

@ -16,7 +16,7 @@ except ImportError:
import os
import sys
from collections import namedtuple
from batch_extensions import models
from azure.batch_extensions import models
CWD = os.path.dirname(os.path.abspath(__file__))
top_dir = os.path.dirname(CWD)

Просмотреть файл

@ -21,8 +21,8 @@ except ImportError:
from ui_pools import PoolsUI, AzureBatchPoolInfo
from pools import AzureBatchPools
from environment import AzureBatchEnvironment
import batch_extensions as batch
from batch_extensions import models
import azure.batch_extensions as batch
from azure.batch_extensions import models
class AzureTestBatchPools(unittest.TestCase):

Просмотреть файл

@ -27,10 +27,10 @@ from shared import AzureBatchSettings
from exception import CancellationException
from utils import OperatingSystem
from batch_extensions import BatchExtensionsClient
from batch_extensions.batch_auth import SharedKeyCredentials
from batch_extensions import operations
from batch_extensions import models
from azure.batch_extensions import BatchExtensionsClient
from azure.batch.batch_auth import SharedKeyCredentials
from azure.batch_extensions import operations
from azure.batch_extensions import models
from azure.storage.blob import BlockBlobService