Update Batch Extension for REST API 2018-08-01 (#84)

* Update models to use only kw args

* Update the model with autorest 3 style

* Update all extension models calls and test_extensions.py

* Fix test break

* Add test recordings

* Manually edit recordings

* Update requirements.txt
This commit is contained in:
Brandon Klein 2018-09-21 11:21:26 -07:00 коммит произвёл Xing Wu
Родитель 29197a346f
Коммит dd6873c641
78 изменённых файлов: 5060 добавлений и 2332 удалений

Просмотреть файл

@ -3,6 +3,11 @@
SDK Release History
===================
4.0.0 (2018-08-29)
------------------
* **Breaking** Model signatures are now using only keywords-arguments syntax. Each positional argument must be rewritten as a keyword argument.
3.1.2 (2018-08-22)
------------------

Просмотреть файл

@ -843,7 +843,7 @@ def _transform_repeat_task(task, context, index, transformer):
"""
if not task or not task.command_line:
raise ValueError("RepeatTask and it's command line must be defined.")
new_task = models.ExtendedTaskParameter(str(index), **copy.deepcopy(task.__dict__))
new_task = models.ExtendedTaskParameter(id=str(index), **copy.deepcopy(task.__dict__))
_replacement_transform(transformer, new_task, 'command_line', context)
_replacement_transform(transformer, new_task, 'display_name', context)
try:
@ -893,7 +893,7 @@ def _parse_parameter_sets(parameter_sets):
raise ValueError("At least one parameter set is required.")
iterations = []
for params in parameter_sets:
valid_params = models.ParameterSet(params.start, params.end, params.step)
valid_params = models.ParameterSet(start=params.start, end=params.end, step=params.step)
end = valid_params.end + 1 if valid_params.end >= valid_params.start else valid_params.end - 1
iterations.append(range(valid_params.start, end, valid_params.step))
return itertools.product(*iterations)

Просмотреть файл

@ -10,36 +10,102 @@
from azure.batch.models import *
from azure.batch.models.batch_service_client_enums import *
from .extended_task_parameter import ExtendedTaskParameter
from .extended_job_parameter import ExtendedJobParameter
from .extended_pool_parameter import ExtendedPoolParameter
from .extended_pool_specification import ExtendedPoolSpecification
from .auto_pool_specification import AutoPoolSpecification
from .output_file import OutputFile
from .extended_output_file_destination import ExtendedOutputFileDestination
from .output_file_auto_storage_destination import OutputFileAutoStorageDestination
from .extended_resource_file import ExtendedResourceFile
from .multi_instance_settings import MultiInstanceSettings
from .file_source import FileSource
from .task_factory_base import TaskFactoryBase
from .task_collection_task_factory import TaskCollectionTaskFactory
from .parametric_sweep_task_factory import ParametricSweepTaskFactory
from .file_collection_task_factory import FileCollectionTaskFactory
from .parameter_set import ParameterSet
from .repeat_task import RepeatTask
from .package_reference_base import PackageReferenceBase
from .chocolatey_package_reference import ChocolateyPackageReference
from .yum_package_reference import YumPackageReference
from .apt_package_reference import AptPackageReference
from .application_template_info import ApplicationTemplateInfo
from .merge_task import MergeTask
from .job_preparation_task import JobPreparationTask
from .job_release_task import JobReleaseTask
from .job_manager_task import JobManagerTask
from .start_task import StartTask
from .application_template import ApplicationTemplate
from .job_template import JobTemplate
from .pool_template import PoolTemplate
try:
from .extended_task_parameter_py3 import ExtendedTaskParameter
from .extended_job_parameter_py3 import ExtendedJobParameter
from .extended_pool_parameter_py3 import ExtendedPoolParameter
from .extended_pool_specification_py3 import ExtendedPoolSpecification
from .auto_pool_specification_py3 import AutoPoolSpecification
from .output_file_py3 import OutputFile
from .extended_output_file_destination_py3 import ExtendedOutputFileDestination
from .output_file_auto_storage_destination_py3 import OutputFileAutoStorageDestination
from .extended_resource_file_py3 import ExtendedResourceFile
from .multi_instance_settings_py3 import MultiInstanceSettings
from .file_source_py3 import FileSource
from .task_factory_base_py3 import TaskFactoryBase
from .task_collection_task_factory_py3 import TaskCollectionTaskFactory
from .parametric_sweep_task_factory_py3 import ParametricSweepTaskFactory
from .file_collection_task_factory_py3 import FileCollectionTaskFactory
from .parameter_set_py3 import ParameterSet
from .repeat_task_py3 import RepeatTask
from .package_reference_base_py3 import PackageReferenceBase
from .chocolatey_package_reference_py3 import ChocolateyPackageReference
from .yum_package_reference_py3 import YumPackageReference
from .apt_package_reference_py3 import AptPackageReference
from .application_template_info_py3 import ApplicationTemplateInfo
from .merge_task_py3 import MergeTask
from .job_preparation_task_py3 import JobPreparationTask
from .job_release_task_py3 import JobReleaseTask
from .job_manager_task_py3 import JobManagerTask
from .start_task_py3 import StartTask
from .application_template_py3 import ApplicationTemplate
from .job_template_py3 import JobTemplate
from .pool_template_py3 import PoolTemplate
except (SyntaxError, ImportError):
from .extended_task_parameter import ExtendedTaskParameter
from .extended_job_parameter import ExtendedJobParameter
from .extended_pool_parameter import ExtendedPoolParameter
from .extended_pool_specification import ExtendedPoolSpecification
from .auto_pool_specification import AutoPoolSpecification
from .output_file import OutputFile
from .extended_output_file_destination import ExtendedOutputFileDestination
from .output_file_auto_storage_destination import OutputFileAutoStorageDestination
from .extended_resource_file import ExtendedResourceFile
from .multi_instance_settings import MultiInstanceSettings
from .file_source import FileSource
from .task_factory_base import TaskFactoryBase
from .task_collection_task_factory import TaskCollectionTaskFactory
from .parametric_sweep_task_factory import ParametricSweepTaskFactory
from .file_collection_task_factory import FileCollectionTaskFactory
from .parameter_set import ParameterSet
from .repeat_task import RepeatTask
from .package_reference_base import PackageReferenceBase
from .chocolatey_package_reference import ChocolateyPackageReference
from .yum_package_reference import YumPackageReference
from .apt_package_reference import AptPackageReference
from .application_template_info import ApplicationTemplateInfo
from .merge_task import MergeTask
from .job_preparation_task import JobPreparationTask
from .job_release_task import JobReleaseTask
from .job_manager_task import JobManagerTask
from .start_task import StartTask
from .application_template import ApplicationTemplate
from .job_template import JobTemplate
from .pool_template import PoolTemplate
from .constants import (
PROPS_RESERVED_FOR_JOBS,
PROPS_PERMITTED_ON_TEMPLATES)
__all__ = [
'ExtendedTaskParameter',
'ExtendedJobParameter',
'ExtendedPoolParameter',
'ExtendedPoolSpecification',
'AutoPoolSpecification',
'OutputFile',
'ExtendedOutputFileDestination',
'OutputFileAutoStorageDestination',
'ExtendedResourceFile',
'MultiInstanceSettings',
'FileSource',
'TaskFactoryBase',
'TaskCollectionTaskFactory',
'ParametricSweepTaskFactory',
'FileCollectionTaskFactory',
'ParameterSet',
'RepeatTask',
'PackageReferenceBase',
'ChocolateyPackageReference',
'YumPackageReference',
'AptPackageReference',
'ApplicationTemplateInfo',
'MergeTask',
'JobPreparationTask',
'JobReleaseTask',
'JobManagerTask',
'StartTask',
'ApplicationTemplate',
'JobTemplate',
'PoolTemplate',
]

Просмотреть файл

@ -96,15 +96,14 @@ class ApplicationTemplate(Model):
'task_factory': {'key': 'taskFactory', 'type': 'TaskFactoryBase'},
}
def __init__(self, job_manager_task=None, job_preparation_task=None, job_release_task=None,
common_environment_settings=None, on_all_tasks_complete=None, on_task_failure=None,
metadata=None, uses_task_dependencies=None, task_factory=None):
self.job_manager_task = job_manager_task
self.job_preparation_task = job_preparation_task
self.job_release_task = job_release_task
self.common_environment_settings = common_environment_settings
self.on_all_tasks_complete = on_all_tasks_complete
self.on_task_failure = on_task_failure
self.metadata = metadata
self.uses_task_dependencies = uses_task_dependencies
self.task_factory = task_factory
def __init__(self, **kwargs):
super(ApplicationTemplate, self).__init__(**kwargs)
self.job_manager_task = kwargs.get('job_manager_task', None)
self.job_preparation_task = kwargs.get('job_preparation_task', None)
self.job_release_task = kwargs.get('job_release_task', None)
self.common_environment_settings = kwargs.get('common_environment_settings', None)
self.on_all_tasks_complete = kwargs.get('on_all_tasks_complete', None)
self.on_task_failure = kwargs.get('on_task_failure', None)
self.metadata = kwargs.get('metadata', None)
self.uses_task_dependencies = kwargs.get('uses_task_dependencies', None)
self.task_factory = kwargs.get('task_factory', None)

Просмотреть файл

@ -27,11 +27,13 @@ class ApplicationTemplateInfo(Model):
'parameters': {'key': 'parameters', 'type': 'object'},
}
def __init__(self, file_path, parameters=None, current_directory="."):
self.file_path = file_path
if not os.path.isfile(file_path):
self.file_path = os.path.abspath(os.path.join(current_directory, str(file_path)))
self.parameters = parameters
def __init__(self, **kwargs):
super(ApplicationTemplateInfo, self).__init__(**kwargs)
self.file_path = kwargs.get('file_path', None)
if not os.path.isfile(self.file_path):
current_directory = kwargs.get('current_directory', ".")
self.file_path = os.path.abspath(os.path.join(current_directory, str(self.file_path)))
self.parameters = kwargs.get('parameters', None)
# Rule: Template file must exist
# (We do this in order to give a good diagnostic in the most common case, knowing that this is

Просмотреть файл

@ -0,0 +1,46 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
from msrest.serialization import Model
class ApplicationTemplateInfo(Model):
"""A reference to an Azure Batch Application Template.
:param str file_path: The path to an application template file. This can
be a full path, or relative to the current working directory. Alternatively
a relative directory can be supplied with the 'current_directory' argument.
A ValueError will be raised if the supplied file path cannot be found.
:param dict parameters: A dictory of parameter names and values to be
subtituted into the application template.
"""
_validation = {
'file_path': {'required': True},
}
_attribute_map = {
'file_path': {'key': 'filePath', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'object'},
}
def __init__(self, *, file_path: str, parameters: object=None, current_directory: str=".", **kwargs) -> None:
super(ApplicationTemplateInfo, self).__init__(**kwargs)
self.file_path = file_path
if not os.path.isfile(self.file_path):
current_directory = current_directory
self.file_path = os.path.abspath(os.path.join(current_directory, str(self.file_path)))
self.parameters = parameters
# Rule: Template file must exist
# (We do this in order to give a good diagnostic in the most common case, knowing that this is
# technically a race condition because someone could delete the file between our check here and
# reading the file later on. We expect such cases to be rare.)
try:
with open(self.file_path, 'r'):
pass
except EnvironmentError as error:
raise ValueError("Unable to read the template '{}': {}".format(self.file_path, error))

Просмотреть файл

@ -0,0 +1,111 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class ApplicationTemplate(Model):
"""An Azure Batch Application Template.
:param job_manager_task: Details of a Job Manager task to be launched when
the job is started. If the job does not specify a Job Manager task, the
user must explicitly add tasks to the job. If the job does specify a Job
Manager task, the Batch service creates the Job Manager task when the job
is created, and will try to schedule the Job Manager task before
scheduling other tasks in the job. The Job Manager task's typical purpose
is to control and/or monitor job execution, for example by deciding what
additional tasks to run, determining when the work is complete, etc.
(However, a Job Manager task is not restricted to these activities - it is
a fully-fledged task in the system and perform whatever actions are
required for the job.) For example, a Job Manager task might download a
file specified as a parameter, analyze the contents of that file and
submit additional tasks based on those contents.
:type job_manager_task: :class:`JobManagerTask
<azure.batch.models.JobManagerTask>`
:param job_preparation_task: The Job Preparation task. If a job has a Job
Preparation task, the Batch service will run the Job Preparation task on a
compute node before starting any tasks of that job on that compute node.
:type job_preparation_task: :class:`JobPreparationTask
<azure.batch.models.JobPreparationTask>`
:param job_release_task: The Job Release task. A Job Release task cannot
be specified without also specifying a Job Preparation task for the job.
The Batch service runs the Job Release task on the compute nodes that have
run the Job Preparation task. The primary purpose of the Job Release task
is to undo changes to compute nodes made by the Job Preparation task.
Example activities include deleting local files, or shutting down services
that were started as part of job preparation.
:type job_release_task: :class:`JobReleaseTask
<azure.batch.models.JobReleaseTask>`
:param common_environment_settings: The list of common environment
variable settings. These environment variables are set for all tasks in
the job (including the Job Manager, Job Preparation and Job Release
tasks). Individual tasks can override an environment setting specified
here by specifying the same setting name with a different value.
:type common_environment_settings: list of :class:`EnvironmentSetting
<azure.batch.models.EnvironmentSetting>`
:param on_all_tasks_complete: The action the Batch service should take
when all tasks in the job are in the completed state. Note that if a job
contains no tasks, then all tasks are considered complete. This option is
therefore most commonly used with a Job Manager task; if you want to use
automatic job termination without a Job Manager, you should initially set
onAllTasksComplete to noAction and update the job properties to set
onAllTasksComplete to terminateJob once you have finished adding tasks.
Permitted values are: noAction - do nothing. The job remains active unless
terminated or disabled by some other means. terminateJob - terminate the
job. The job's terminateReason is set to 'AllTasksComplete'. The default
is noAction. Possible values include: 'noAction', 'terminateJob'
:type on_all_tasks_complete: str or :class:`OnAllTasksComplete
<azure.batch.models.OnAllTasksComplete>`
:param on_task_failure: The action the Batch service should take when any
task in the job fails. A task is considered to have failed if has a
failureInfo. A failureInfo is set if the task completes with a non-zero
exit code after exhausting its retry count, or if there was an error
starting the task, for example due to a resource file download error.
noAction - do nothing. performExitOptionsJobAction - take the action
associated with the task exit condition in the task's exitConditions
collection. (This may still result in no action being taken, if that is
what the task specifies.) The default is noAction. Possible values
include: 'noAction', 'performExitOptionsJobAction'
:type on_task_failure: str or :class:`OnTaskFailure
<azure.batch.models.OnTaskFailure>`
:param metadata: A list of name-value pairs associated with the job as
metadata. The Batch service does not assign any meaning to metadata; it is
solely for the use of user code.
:type metadata: list of :class:`MetadataItem
<azure.batch.models.MetadataItem>`
:param uses_task_dependencies: Whether tasks in the job can define
dependencies on each other. The default is false.
:type uses_task_dependencies: bool
:param task_factory: A task factory reference to automatically generate a set of
tasks to be added to the job.
:type task_factory: :class:`TaskFactoryBase
<azext.batch.models.TaskFactoryBase>`
"""
_attribute_map = {
'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'},
'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'},
'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'},
'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'},
'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'},
'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'},
'metadata': {'key': 'metadata', 'type': '[MetadataItem]'},
'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'},
'task_factory': {'key': 'taskFactory', 'type': 'TaskFactoryBase'},
}
def __init__(self, *, job_manager_task=None, job_preparation_task=None, job_release_task=None,
common_environment_settings=None, on_all_tasks_complete=None, on_task_failure=None,
metadata=None, uses_task_dependencies: bool=None, task_factory=None, **kwargs) -> None:
super(ApplicationTemplate, self).__init__(**kwargs)
self.job_manager_task = job_manager_task
self.job_preparation_task = job_preparation_task
self.job_release_task = job_release_task
self.common_environment_settings = common_environment_settings
self.on_all_tasks_complete = on_all_tasks_complete
self.on_task_failure = on_task_failure
self.metadata = metadata
self.uses_task_dependencies = uses_task_dependencies
self.task_factory = task_factory

Просмотреть файл

@ -28,6 +28,6 @@ class AptPackageReference(PackageReferenceBase):
'version': {'key': 'version', 'type': 'str'},
}
def __init__(self, id, version=None):
super(AptPackageReference, self).__init__(id=id, version=version)
def __init__(self, **kwargs):
super(AptPackageReference, self).__init__(**kwargs)
self.type = 'aptPackage'

Просмотреть файл

@ -0,0 +1,33 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=redefined-builtin
from .package_reference_base import PackageReferenceBase
class AptPackageReference(PackageReferenceBase):
"""A reference to a package to be installed using the APT package
manager on a Linux node (apt-get).
:param str id: The name of the package.
:param str version: The version of the package to be installed. If omitted,
the latest version (according to the package repository) will be installed.
"""
_validation = {
'type': {'required': True},
'id': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
}
def __init__(self, *, id: str, version: str=None, **kwargs) -> None:
super(AptPackageReference, self).__init__(id=id, version=version, **kwargs)
self.type = 'aptPackage'

Просмотреть файл

@ -51,9 +51,9 @@ class AutoPoolSpecification(Model):
'pool': {'key': 'pool', 'type': 'ExtendedPoolSpecification'},
}
def __init__(self, pool_lifetime_option, auto_pool_id_prefix=None, keep_alive=None, pool=None):
super(AutoPoolSpecification, self).__init__()
self.auto_pool_id_prefix = auto_pool_id_prefix
self.pool_lifetime_option = pool_lifetime_option
self.keep_alive = keep_alive
self.pool = pool
def __init__(self, **kwargs):
super(AutoPoolSpecification, self).__init__(**kwargs)
self.auto_pool_id_prefix = kwargs.get('auto_pool_id_prefix', None)
self.pool_lifetime_option = kwargs.get('pool_lifetime_option', None)
self.keep_alive = kwargs.get('keep_alive', None)
self.pool = kwargs.get('pool', None)

Просмотреть файл

@ -0,0 +1,60 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class AutoPoolSpecification(Model):
"""Specifies characteristics for a temporary 'auto pool'. The Batch service
will create this auto pool when the job is submitted.
:param auto_pool_id_prefix: A prefix to be added to the unique identifier
when a pool is automatically created. The Batch service assigns each auto
pool a unique identifier on creation. To distinguish between pools created
for different purposes, you can specify this element to add a prefix to
the ID that is assigned. The prefix can be up to 20 characters long.
:type auto_pool_id_prefix: str
:param pool_lifetime_option: The minimum lifetime of created auto pools,
and how multiple jobs on a schedule are assigned to pools. When the pool
lifetime is jobSchedule the pool exists for the lifetime of the job
schedule. The Batch Service creates the pool when it creates the first job
on the schedule. You may apply this option only to job schedules, not to
jobs. When the pool lifetime is job the pool exists for the lifetime of
the job to which it is dedicated. The Batch service creates the pool when
it creates the job. If the 'job' option is applied to a job schedule, the
Batch service creates a new auto pool for every job created on the
schedule. Possible values include: 'jobSchedule', 'job'
:type pool_lifetime_option: str or :class:`PoolLifetimeOption
<azure.batch.models.PoolLifetimeOption>`
:param keep_alive: Whether to keep an auto pool alive after its lifetime
expires. If false, the Batch service deletes the pool once its lifetime
(as determined by the poolLifetimeOption setting) expires; that is, when
the job or job schedule completes. If true, the Batch service does not
delete the pool automatically. It is up to the user to delete auto pools
created with this option.
:type keep_alive: bool
:param pool: The pool specification for the auto pool.
:type pool: :class:`PoolSpecification
<azure.batch.models.PoolSpecification>`
"""
_validation = {
'pool_lifetime_option': {'required': True},
}
_attribute_map = {
'auto_pool_id_prefix': {'key': 'autoPoolIdPrefix', 'type': 'str'},
'pool_lifetime_option': {'key': 'poolLifetimeOption', 'type': 'PoolLifetimeOption'},
'keep_alive': {'key': 'keepAlive', 'type': 'bool'},
'pool': {'key': 'pool', 'type': 'ExtendedPoolSpecification'},
}
def __init__(self, *, pool_lifetime_option, auto_pool_id_prefix: str=None,
keep_alive: bool=None, pool=None, **kwargs) -> None:
super(AutoPoolSpecification, self).__init__(**kwargs)
self.auto_pool_id_prefix = auto_pool_id_prefix
self.pool_lifetime_option = pool_lifetime_option
self.keep_alive = keep_alive
self.pool = pool

Просмотреть файл

@ -31,7 +31,7 @@ class ChocolateyPackageReference(PackageReferenceBase):
'allow_empty_checksums': {'key': 'allowEmptyChecksums', 'type': 'bool'}
}
def __init__(self, id, version=None, allow_empty_checksums=None):
super(ChocolateyPackageReference, self).__init__(id=id, version=version)
self.allow_empty_checksums = allow_empty_checksums
def __init__(self, **kwargs):
super(ChocolateyPackageReference, self).__init__(**kwargs)
self.allow_empty_checksums = kwargs.get('allow_empty_checksums', None)
self.type = 'chocolateyPackage'

Просмотреть файл

@ -0,0 +1,37 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=redefined-builtin
from .package_reference_base import PackageReferenceBase
class ChocolateyPackageReference(PackageReferenceBase):
"""A reference to a package to be installed using the Chocolatey package
manager on a Windows node.
:param str id: The name of the package.
:param str version: The version of the package to be installed. If omitted,
the latest version (according to the package repository) will be installed.
:param bool allow_empty_checksums: Whether Chocolatey will install packages
without a checksum for validation. Default is false.
"""
_validation = {
'type': {'required': True},
'id': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'allow_empty_checksums': {'key': 'allowEmptyChecksums', 'type': 'bool'}
}
def __init__(self, *, id: str, version: str=None, allow_empty_checksums: bool=None, **kwargs) -> None:
super(ChocolateyPackageReference, self).__init__(id=id, version=version, **kwargs)
self.allow_empty_checksums = allow_empty_checksums
self.type = 'chocolateyPackage'

Просмотреть файл

@ -138,26 +138,10 @@ class ExtendedJobParameter(JobAddParameter):
'application_template_info': {'key': 'applicationTemplateInfo', 'type': 'ApplicationTemplateInfo'}
}
def __init__(self, id, pool_info, display_name=None, priority=None, constraints=None, job_manager_task=None,
job_preparation_task=None, job_release_task=None, common_environment_settings=None,
on_all_tasks_complete=None, on_task_failure=None, metadata=None, uses_task_dependencies=None,
task_factory=None, application_template_info=None):
super(ExtendedJobParameter, self).__init__(
id=id,
display_name=display_name,
priority=priority,
constraints=constraints,
job_manager_task=job_manager_task,
job_preparation_task=job_preparation_task,
job_release_task=job_release_task,
common_environment_settings=common_environment_settings,
pool_info=pool_info,
on_all_tasks_complete=on_all_tasks_complete,
on_task_failure=on_task_failure,
metadata=metadata,
uses_task_dependencies=uses_task_dependencies)
self.task_factory = task_factory
self.application_template_info = application_template_info
def __init__(self, **kwargs):
super(ExtendedJobParameter, self).__init__(**kwargs)
self.task_factory = kwargs.get('task_factory', None)
self.application_template_info = kwargs.get('application_template_info', None)
if self.application_template_info:
# Rule: Jobs may not use properties reserved for template use
reserved = [k for k, v in self.__dict__.items() \

Просмотреть файл

@ -0,0 +1,170 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=redefined-builtin
from azure.batch.models import JobAddParameter
from .constants import ATTRS_RESERVED_FOR_TEMPLATES
class ExtendedJobParameter(JobAddParameter):
"""An Azure Batch job to add.
:param id: A string that uniquely identifies the job within the account.
The ID can contain any combination of alphanumeric characters including
hyphens and underscores, and cannot contain more than 64 characters. The
ID is case-preserving and case-insensitive (that is, you may not have two
IDs within an account that differ only by case).
:type id: str
:param display_name: The display name for the job. The display name need
not be unique and can contain any Unicode characters up to a maximum
length of 1024.
:type display_name: str
:param priority: The priority of the job. Priority values can range from
-1000 to 1000, with -1000 being the lowest priority and 1000 being the
highest priority. The default value is 0.
:type priority: int
:param constraints: The execution constraints for the job.
:type constraints: :class:`JobConstraints
<azure.batch.models.JobConstraints>`
:param job_manager_task: Details of a Job Manager task to be launched when
the job is started. If the job does not specify a Job Manager task, the
user must explicitly add tasks to the job. If the job does specify a Job
Manager task, the Batch service creates the Job Manager task when the job
is created, and will try to schedule the Job Manager task before
scheduling other tasks in the job. The Job Manager task's typical purpose
is to control and/or monitor job execution, for example by deciding what
additional tasks to run, determining when the work is complete, etc.
(However, a Job Manager task is not restricted to these activities - it is
a fully-fledged task in the system and perform whatever actions are
required for the job.) For example, a Job Manager task might download a
file specified as a parameter, analyze the contents of that file and
submit additional tasks based on those contents.
:type job_manager_task: :class:`JobManagerTask
<azure.batch.models.JobManagerTask>`
:param job_preparation_task: The Job Preparation task. If a job has a Job
Preparation task, the Batch service will run the Job Preparation task on a
compute node before starting any tasks of that job on that compute node.
:type job_preparation_task: :class:`JobPreparationTask
<azure.batch.models.JobPreparationTask>`
:param job_release_task: The Job Release task. A Job Release task cannot
be specified without also specifying a Job Preparation task for the job.
The Batch service runs the Job Release task on the compute nodes that have
run the Job Preparation task. The primary purpose of the Job Release task
is to undo changes to compute nodes made by the Job Preparation task.
Example activities include deleting local files, or shutting down services
that were started as part of job preparation.
:type job_release_task: :class:`JobReleaseTask
<azure.batch.models.JobReleaseTask>`
:param common_environment_settings: The list of common environment
variable settings. These environment variables are set for all tasks in
the job (including the Job Manager, Job Preparation and Job Release
tasks). Individual tasks can override an environment setting specified
here by specifying the same setting name with a different value.
:type common_environment_settings: list of :class:`EnvironmentSetting
<azure.batch.models.EnvironmentSetting>`
:param pool_info: The pool on which the Batch service runs the job's
tasks.
:type pool_info: :class:`PoolInformation
<azure.batch.models.PoolInformation>`
:param on_all_tasks_complete: The action the Batch service should take
when all tasks in the job are in the completed state. Note that if a job
contains no tasks, then all tasks are considered complete. This option is
therefore most commonly used with a Job Manager task; if you want to use
automatic job termination without a Job Manager, you should initially set
onAllTasksComplete to noAction and update the job properties to set
onAllTasksComplete to terminateJob once you have finished adding tasks.
Permitted values are: noAction - do nothing. The job remains active unless
terminated or disabled by some other means. terminateJob - terminate the
job. The job's terminateReason is set to 'AllTasksComplete'. The default
is noAction. Possible values include: 'noAction', 'terminateJob'
:type on_all_tasks_complete: str or :class:`OnAllTasksComplete
<azure.batch.models.OnAllTasksComplete>`
:param on_task_failure: The action the Batch service should take when any
task in the job fails. A task is considered to have failed if has a
failureInfo. A failureInfo is set if the task completes with a non-zero
exit code after exhausting its retry count, or if there was an error
starting the task, for example due to a resource file download error.
noAction - do nothing. performExitOptionsJobAction - take the action
associated with the task exit condition in the task's exitConditions
collection. (This may still result in no action being taken, if that is
what the task specifies.) The default is noAction. Possible values
include: 'noAction', 'performExitOptionsJobAction'
:type on_task_failure: str or :class:`OnTaskFailure
<azure.batch.models.OnTaskFailure>`
:param metadata: A list of name-value pairs associated with the job as
metadata. The Batch service does not assign any meaning to metadata; it is
solely for the use of user code.
:type metadata: list of :class:`MetadataItem
<azure.batch.models.MetadataItem>`
:param uses_task_dependencies: Whether tasks in the job can define
dependencies on each other. The default is false.
:type uses_task_dependencies: bool
:param task_factory: A task factory reference to automatically generate a set of
tasks to be added to the job.
:type task_factory: :class:`TaskFactoryBase
<azext.batch.models.TaskFactoryBase>`
:param application_template_info: A reference to an application template file to
be expanded to complete the job specification. If supplied, the following arugments
cannot also be supplied or they will be overwritten: 'job_manager_task',
'common_environment_settings', 'uses_task_dependencies', 'on_all_tasks_complete',
'on_task_failure', 'task_factory', 'job_preparation_task', 'job_release_task'.
:type application_template_info: :class:`ApplicationTemplateInfo
<azext.batch.models.ApplicationTemplateInfo>`
"""
_validation = {
'id': {'required': True},
'pool_info': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'priority': {'key': 'priority', 'type': 'int'},
'constraints': {'key': 'constraints', 'type': 'JobConstraints'},
'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'},
'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'},
'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'},
'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'},
'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'},
'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'},
'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'},
'metadata': {'key': 'metadata', 'type': '[MetadataItem]'},
'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'},
'task_factory': {'key': 'taskFactory', 'type': 'TaskFactoryBase'},
'application_template_info': {'key': 'applicationTemplateInfo', 'type': 'ApplicationTemplateInfo'}
}
def __init__(self, *, id: str, pool_info, display_name: str=None, priority: int=None, constraints=None,
job_manager_task=None, job_preparation_task=None, job_release_task=None,
common_environment_settings=None, on_all_tasks_complete=None, on_task_failure=None,
metadata=None, uses_task_dependencies: bool=None, task_factory=None,
application_template_info=None, **kwargs) -> None:
super(ExtendedJobParameter, self).__init__(
id=id,
display_name=display_name,
priority=priority,
constraints=constraints,
job_manager_task=job_manager_task,
job_preparation_task=job_preparation_task,
job_release_task=job_release_task,
common_environment_settings=common_environment_settings,
pool_info=pool_info,
on_all_tasks_complete=on_all_tasks_complete,
on_task_failure=on_task_failure,
metadata=metadata,
uses_task_dependencies=uses_task_dependencies,
**kwargs)
self.task_factory = task_factory
self.application_template_info = application_template_info
if self.application_template_info:
# Rule: Jobs may not use properties reserved for template use
reserved = [k for k, v in self.__dict__.items() \
if k in ATTRS_RESERVED_FOR_TEMPLATES and v is not None]
if reserved:
raise ValueError("Jobs using application templates may not use these "
"properties: {}".format(', '.join(reserved)))

Просмотреть файл

@ -25,8 +25,9 @@ class ExtendedOutputFileDestination(Model):
'auto_storage': {'key': 'autoStorage', 'type': 'OutputFileAutoStorageDestination'},
}
def __init__(self, container=None, auto_storage=None):
if container and auto_storage:
def __init__(self, **kwargs):
super(ExtendedOutputFileDestination, self).__init__(**kwargs)
self.container = kwargs.get('container', None)
self.auto_storage = kwargs.get('auto_storage', None)
if self.container and self.auto_storage:
raise ValueError("Cannot specify both container and auto_storage.")
self.container = container
self.auto_storage = auto_storage

Просмотреть файл

@ -0,0 +1,33 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class ExtendedOutputFileDestination(Model):
"""The specification for where output files should be uploaded to on task
completion.
:param container: A location in Azure blob storage to which files are
uploaded. This cannot be combined with auto_storage.
:type container: :class:`OutputFileBlobContainerDestination
<azure.batch.models.OutputFileBlobContainerDestination>`
:param auto_storage: An auto-storage file group reference. This cannot be
combined with container.
:type auto_storage: :class:`OutputFileAutoStorageDestination
<azext.batch.models.OutputFileAutoStorageDestination>`
"""
_attribute_map = {
'container': {'key': 'container', 'type': 'OutputFileBlobContainerDestination'},
'auto_storage': {'key': 'autoStorage', 'type': 'OutputFileAutoStorageDestination'},
}
def __init__(self, *, container=None, auto_storage=None, **kwargs) -> None:
super(ExtendedOutputFileDestination, self).__init__(**kwargs)
if container and auto_storage:
raise ValueError("Cannot specify both container and auto_storage.")
self.container = container
self.auto_storage = auto_storage

Просмотреть файл

@ -180,33 +180,6 @@ class ExtendedPoolParameter(PoolAddParameter):
'package_references': {'key': 'packageReferences', 'type': '[PackageReferenceBase]'}
}
def __init__(self, id, vm_size, display_name=None, cloud_service_configuration=None,
virtual_machine_configuration=None, resize_timeout=None, target_dedicated_nodes=None,
target_low_priority_nodes=None, enable_auto_scale=None, auto_scale_formula=None,
auto_scale_evaluation_interval=None, enable_inter_node_communication=None,
network_configuration=None, start_task=None, certificate_references=None,
application_package_references=None, application_licenses=None, max_tasks_per_node=None,
task_scheduling_policy=None, user_accounts=None, metadata=None, package_references=None):
super(ExtendedPoolParameter, self).__init__(
id=id,
display_name=display_name,
vm_size=vm_size,
cloud_service_configuration=cloud_service_configuration,
virtual_machine_configuration=virtual_machine_configuration,
resize_timeout=resize_timeout,
target_dedicated_nodes=target_dedicated_nodes,
target_low_priority_nodes=target_low_priority_nodes,
enable_auto_scale=enable_auto_scale,
auto_scale_formula=auto_scale_formula,
auto_scale_evaluation_interval=auto_scale_evaluation_interval,
enable_inter_node_communication=enable_inter_node_communication,
network_configuration=network_configuration,
start_task=start_task,
certificate_references=certificate_references,
application_package_references=application_package_references,
application_licenses=application_licenses,
max_tasks_per_node=max_tasks_per_node,
task_scheduling_policy=task_scheduling_policy,
user_accounts=user_accounts,
metadata=metadata)
self.package_references = package_references
def __init__(self, **kwargs):
super(ExtendedPoolParameter, self).__init__(**kwargs)
self.package_references = kwargs.get('package_references', None)

Просмотреть файл

@ -0,0 +1,214 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=redefined-builtin
from azure.batch.models import PoolAddParameter
class ExtendedPoolParameter(PoolAddParameter):
"""A pool in the Azure Batch service to add.
:param id: A string that uniquely identifies the pool within the account.
The ID can contain any combination of alphanumeric characters including
hyphens and underscores, and cannot contain more than 64 characters. The
ID is case-preserving and case-insensitive (that is, you may not have two
pool IDs within an account that differ only by case).
:type id: str
:param display_name: The display name for the pool. The display name need
not be unique and can contain any Unicode characters up to a maximum
length of 1024.
:type display_name: str
:param vm_size: The size of virtual machines in the pool. All virtual
machines in a pool are the same size. For information about available
sizes of virtual machines for Cloud Services pools (pools created with
cloudServiceConfiguration), see Sizes for Cloud Services
(http://azure.microsoft.com/documentation/articles/cloud-services-sizes-specs/).
Batch supports all Cloud Services VM sizes except ExtraSmall, A1V2 and
A2V2. For information about available VM sizes for pools using images from
the Virtual Machines Marketplace (pools created with
virtualMachineConfiguration) see Sizes for Virtual Machines (Linux)
(https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/)
or Sizes for Virtual Machines (Windows)
(https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/).
Batch supports all Azure VM sizes except STANDARD_A0 and those with
premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series).
:type vm_size: str
:param cloud_service_configuration: The cloud service configuration for
the pool. This property and virtualMachineConfiguration are mutually
exclusive and one of the properties must be specified. This property
cannot be specified if the Batch account was created with its
poolAllocationMode property set to 'UserSubscription'.
:type cloud_service_configuration: :class:`CloudServiceConfiguration
<azure.batch.models.CloudServiceConfiguration>`
:param virtual_machine_configuration: The virtual machine configuration
for the pool. This property and cloudServiceConfiguration are mutually
exclusive and one of the properties must be specified.
:type virtual_machine_configuration: :class:`VirtualMachineConfiguration
<azure.batch.models.VirtualMachineConfiguration>`
:param resize_timeout: The timeout for allocation of compute nodes to the
pool. This timeout applies only to manual scaling; it has no effect when
enableAutoScale is set to true. The default value is 15 minutes. The
minimum value is 5 minutes. If you specify a value less than 5 minutes,
the Batch service returns an error; if you are calling the REST API
directly, the HTTP status code is 400 (Bad Request).
:type resize_timeout: timedelta
:param target_dedicated_nodes: The desired number of dedicated compute
nodes in the pool. This property must not be specified if enableAutoScale
is set to true. If enableAutoScale is set to false, then you must set
either targetDedicatedNodes, targetLowPriorityNodes, or both.
:type target_dedicated_nodes: int
:param target_low_priority_nodes: The desired number of low-priority
compute nodes in the pool. This property must not be specified if
enableAutoScale is set to true. If enableAutoScale is set to false, then
you must set either targetDedicatedNodes, targetLowPriorityNodes, or both.
:type target_low_priority_nodes: int
:param enable_auto_scale: Whether the pool size should automatically
adjust over time. If false, at least one of targetDedicateNodes and
targetLowPriorityNodes must be specified. If true, the autoScaleFormula
property is required and the pool automatically resizes according to the
formula. The default value is false.
:type enable_auto_scale: bool
:param auto_scale_formula: A formula for the desired number of compute
nodes in the pool. This property must not be specified if enableAutoScale
is set to false. It is required if enableAutoScale is set to true. The
formula is checked for validity before the pool is created. If the formula
is not valid, the Batch service rejects the request with detailed error
information. For more information about specifying this formula, see
'Automatically scale compute nodes in an Azure Batch pool'
(https://azure.microsoft.com/documentation/articles/batch-automatic-scaling/).
:type auto_scale_formula: str
:param auto_scale_evaluation_interval: The time interval at which to
automatically adjust the pool size according to the autoscale formula. The
default value is 15 minutes. The minimum and maximum value are 5 minutes
and 168 hours respectively. If you specify a value less than 5 minutes or
greater than 168 hours, the Batch service returns an error; if you are
calling the REST API directly, the HTTP status code is 400 (Bad Request).
:type auto_scale_evaluation_interval: timedelta
:param enable_inter_node_communication: Whether the pool permits direct
communication between nodes. Enabling inter-node communication limits the
maximum size of the pool due to deployment restrictions on the nodes of
the pool. This may result in the pool not reaching its desired size. The
default value is false.
:type enable_inter_node_communication: bool
:param network_configuration: The network configuration for the pool.
:type network_configuration: :class:`NetworkConfiguration
<azure.batch.models.NetworkConfiguration>`
:param start_task: A task specified to run on each compute node as it
joins the pool. The task runs when the node is added to the pool or when
the node is restarted.
:type start_task: :class:`StartTask <azure.batch.models.StartTask>`
:param certificate_references: The list of certificates to be installed on
each compute node in the pool. For Windows compute nodes, the Batch
service installs the certificates to the specified certificate store and
location. For Linux compute nodes, the certificates are stored in a
directory inside the task working directory and an environment variable
AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this
location. For certificates with visibility of 'remoteUser', a 'certs'
directory is created in the user's home directory (e.g.,
/home/{user-name}/certs) and certificates are placed in that directory.
:type certificate_references: list of :class:`CertificateReference
<azure.batch.models.CertificateReference>`
:param application_package_references: The list of application packages to
be installed on each compute node in the pool.
:type application_package_references: list of
:class:`ApplicationPackageReference
<azure.batch.models.ApplicationPackageReference>`
:param application_licenses: The list of application licenses the Batch
service will make available on each compute node in the pool. The list of
application licenses must be a subset of available Batch service
application licenses. If a license is requested which is not supported,
pool creation will fail.
:type application_licenses: list of str
:param max_tasks_per_node: The maximum number of tasks that can run
concurrently on a single compute node in the pool. The default value is 1.
The maximum value of this setting depends on the size of the compute nodes
in the pool (the vmSize setting).
:type max_tasks_per_node: int
:param task_scheduling_policy: How tasks are distributed across compute
nodes in a pool.
:type task_scheduling_policy: :class:`TaskSchedulingPolicy
<azure.batch.models.TaskSchedulingPolicy>`
:param user_accounts: The list of user accounts to be created on each node
in the pool.
:type user_accounts: list of :class:`UserAccount
<azure.batch.models.UserAccount>`
:param metadata: A list of name-value pairs associated with the pool as
metadata. The Batch service does not assign any meaning to metadata; it is
solely for the use of user code.
:type metadata: list of :class:`MetadataItem
<azure.batch.models.MetadataItem>`
:param package_references: A list of packages to be installed on the compute
nodes. Must be of a Package Manager type in accordance with the selected
operating system.
:type package_references: list of :class:`PackageReferenceBase
<azext.batch.models.PackageReferenceBase>`
"""
_validation = {
'id': {'required': True},
'vm_size': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'vm_size': {'key': 'vmSize', 'type': 'str'},
'cloud_service_configuration': {'key': 'cloudServiceConfiguration',
'type': 'CloudServiceConfiguration'},
'virtual_machine_configuration': {'key': 'virtualMachineConfiguration',
'type': 'VirtualMachineConfiguration'},
'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'},
'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'},
'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'},
'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'},
'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'},
'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'},
'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'},
'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'},
'start_task': {'key': 'startTask', 'type': 'StartTask'},
'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'},
'application_package_references': {'key': 'applicationPackageReferences',
'type': '[ApplicationPackageReference]'},
'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'},
'max_tasks_per_node': {'key': 'maxTasksPerNode', 'type': 'int'},
'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'},
'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'},
'metadata': {'key': 'metadata', 'type': '[MetadataItem]'},
'package_references': {'key': 'packageReferences', 'type': '[PackageReferenceBase]'}
}
def __init__(self, *, id: str, vm_size: str, display_name: str=None, cloud_service_configuration=None,
virtual_machine_configuration=None, resize_timeout=None, target_dedicated_nodes: int=None,
target_low_priority_nodes: int=None, enable_auto_scale: bool=None, auto_scale_formula: str=None,
auto_scale_evaluation_interval=None, enable_inter_node_communication: bool=None,
network_configuration=None, start_task=None, certificate_references=None,
application_package_references=None, application_licenses=None, max_tasks_per_node: int=None,
task_scheduling_policy=None, user_accounts=None, metadata=None, package_references=None,
**kwargs) -> None:
super(ExtendedPoolParameter, self).__init__(
id=id,
display_name=display_name,
vm_size=vm_size,
cloud_service_configuration=cloud_service_configuration,
virtual_machine_configuration=virtual_machine_configuration,
resize_timeout=resize_timeout,
target_dedicated_nodes=target_dedicated_nodes,
target_low_priority_nodes=target_low_priority_nodes,
enable_auto_scale=enable_auto_scale,
auto_scale_formula=auto_scale_formula,
auto_scale_evaluation_interval=auto_scale_evaluation_interval,
enable_inter_node_communication=enable_inter_node_communication,
network_configuration=network_configuration,
start_task=start_task,
certificate_references=certificate_references,
application_package_references=application_package_references,
application_licenses=application_licenses,
max_tasks_per_node=max_tasks_per_node,
task_scheduling_policy=task_scheduling_policy,
user_accounts=user_accounts,
metadata=metadata,
**kwargs)
self.package_references = package_references

Просмотреть файл

@ -174,32 +174,6 @@ class ExtendedPoolSpecification(PoolSpecification):
'package_references': {'key': 'packageReferences', 'type': '[PackageReferenceBase]'}
}
def __init__(self, vm_size, display_name=None, cloud_service_configuration=None,
virtual_machine_configuration=None, max_tasks_per_node=None, task_scheduling_policy=None,
resize_timeout=None, target_dedicated_nodes=None, target_low_priority_nodes=None,
enable_auto_scale=None, auto_scale_formula=None, auto_scale_evaluation_interval=None,
enable_inter_node_communication=None, network_configuration=None, start_task=None,
certificate_references=None, application_package_references=None, application_licenses=None,
user_accounts=None, metadata=None, package_references=None):
super(ExtendedPoolSpecification, self).__init__(
display_name=display_name,
vm_size=vm_size,
cloud_service_configuration=cloud_service_configuration,
virtual_machine_configuration=virtual_machine_configuration,
max_tasks_per_node=max_tasks_per_node,
task_scheduling_policy=task_scheduling_policy,
resize_timeout=resize_timeout,
target_dedicated_nodes=target_dedicated_nodes,
target_low_priority_nodes=target_low_priority_nodes,
enable_auto_scale=enable_auto_scale,
auto_scale_formula=auto_scale_formula,
auto_scale_evaluation_interval=auto_scale_evaluation_interval,
enable_inter_node_communication=enable_inter_node_communication,
network_configuration=network_configuration,
start_task=start_task,
certificate_references=certificate_references,
application_package_references=application_package_references,
application_licenses=application_licenses,
user_accounts=user_accounts,
metadata=metadata)
self.package_references = package_references
def __init__(self, **kwargs):
super(ExtendedPoolSpecification, self).__init__(**kwargs)
self.package_references = kwargs.get('package_references', None)

Просмотреть файл

@ -0,0 +1,206 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.batch.models import PoolSpecification
class ExtendedPoolSpecification(PoolSpecification):
"""Specification for creating a new pool.
:param display_name: The display name for the pool. The display name need
not be unique and can contain any Unicode characters up to a maximum
length of 1024.
:type display_name: str
:param vm_size: The size of the virtual machines in the pool. All virtual
machines in a pool are the same size. For information about available
sizes of virtual machines for Cloud Services pools (pools created with
cloudServiceConfiguration), see Sizes for Cloud Services
(http://azure.microsoft.com/documentation/articles/cloud-services-sizes-specs/).
Batch supports all Cloud Services VM sizes except ExtraSmall, A1V2 and
A2V2. For information about available VM sizes for pools using images from
the Virtual Machines Marketplace (pools created with
virtualMachineConfiguration) see Sizes for Virtual Machines (Linux)
(https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/)
or Sizes for Virtual Machines (Windows)
(https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/).
Batch supports all Azure VM sizes except STANDARD_A0 and those with
premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series).
:type vm_size: str
:param cloud_service_configuration: The cloud service configuration for
the pool. This property must be specified if the pool needs to be created
with Azure PaaS VMs. This property and virtualMachineConfiguration are
mutually exclusive and one of the properties must be specified. If neither
is specified then the Batch service returns an error; if you are calling
the REST API directly, the HTTP status code is 400 (Bad Request). This
property cannot be specified if the Batch account was created with its
poolAllocationMode property set to 'UserSubscription'.
:type cloud_service_configuration: :class:`CloudServiceConfiguration
<azure.batch.models.CloudServiceConfiguration>`
:param virtual_machine_configuration: The virtual machine configuration
for the pool. This property must be specified if the pool needs to be
created with Azure IaaS VMs. This property and cloudServiceConfiguration
are mutually exclusive and one of the properties must be specified. If
neither is specified then the Batch service returns an error; if you are
calling the REST API directly, the HTTP status code is 400 (Bad Request).
:type virtual_machine_configuration: :class:`VirtualMachineConfiguration
<azure.batch.models.VirtualMachineConfiguration>`
:param max_tasks_per_node: The maximum number of tasks that can run
concurrently on a single compute node in the pool. The default value is 1.
The maximum value of this setting depends on the size of the compute nodes
in the pool (the vmSize setting).
:type max_tasks_per_node: int
:param task_scheduling_policy: How tasks are distributed across compute
nodes in a pool.
:type task_scheduling_policy: :class:`TaskSchedulingPolicy
<azure.batch.models.TaskSchedulingPolicy>`
:param resize_timeout: The timeout for allocation of compute nodes to the
pool. This timeout applies only to manual scaling; it has no effect when
enableAutoScale is set to true. The default value is 15 minutes. The
minimum value is 5 minutes. If you specify a value less than 5 minutes,
the Batch service rejects the request with an error; if you are calling
the REST API directly, the HTTP status code is 400 (Bad Request).
:type resize_timeout: timedelta
:param target_dedicated_nodes: The desired number of dedicated compute
nodes in the pool. This property must not be specified if enableAutoScale
is set to true. If enableAutoScale is set to false, then you must set
either targetDedicatedNodes, targetLowPriorityNodes, or both.
:type target_dedicated_nodes: int
:param target_low_priority_nodes: The desired number of low-priority
compute nodes in the pool. This property must not be specified if
enableAutoScale is set to true. If enableAutoScale is set to false, then
you must set either targetDedicatedNodes, targetLowPriorityNodes, or both.
:type target_low_priority_nodes: int
:param enable_auto_scale: Whether the pool size should automatically
adjust over time. If false, the targetDedicated element is required. If
true, the autoScaleFormula element is required. The pool automatically
resizes according to the formula. The default value is false.
:type enable_auto_scale: bool
:param auto_scale_formula: The formula for the desired number of compute
nodes in the pool. This property must not be specified if enableAutoScale
is set to false. It is required if enableAutoScale is set to true. The
formula is checked for validity before the pool is created. If the formula
is not valid, the Batch service rejects the request with detailed error
information.
:type auto_scale_formula: str
:param auto_scale_evaluation_interval: The time interval at which to
automatically adjust the pool size according to the autoscale formula. The
default value is 15 minutes. The minimum and maximum value are 5 minutes
and 168 hours respectively. If you specify a value less than 5 minutes or
greater than 168 hours, the Batch service rejects the request with an
invalid property value error; if you are calling the REST API directly,
the HTTP status code is 400 (Bad Request).
:type auto_scale_evaluation_interval: timedelta
:param enable_inter_node_communication: Whether the pool permits direct
communication between nodes. Enabling inter-node communication limits the
maximum size of the pool due to deployment restrictions on the nodes of
the pool. This may result in the pool not reaching its desired size. The
default value is false.
:type enable_inter_node_communication: bool
:param network_configuration: The network configuration for the pool.
:type network_configuration: :class:`NetworkConfiguration
<azure.batch.models.NetworkConfiguration>`
:param start_task: A task to run on each compute node as it joins the
pool. The task runs when the node is added to the pool or when the node is
restarted.
:type start_task: :class:`StartTask <azure.batch.models.StartTask>`
:param certificate_references: A list of certificates to be installed on
each compute node in the pool. For Windows compute nodes, the Batch
service installs the certificates to the specified certificate store and
location. For Linux compute nodes, the certificates are stored in a
directory inside the task working directory and an environment variable
AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this
location. For certificates with visibility of 'remoteUser', a 'certs'
directory is created in the user's home directory (e.g.,
/home/{user-name}/certs) and certificates are placed in that directory.
:type certificate_references: list of :class:`CertificateReference
<azure.batch.models.CertificateReference>`
:param application_package_references: The list of application packages to
be installed on each compute node in the pool.
:type application_package_references: list of
:class:`ApplicationPackageReference
<azure.batch.models.ApplicationPackageReference>`
:param application_licenses: The list of application licenses the Batch
service will make available on each compute node in the pool. The list of
application licenses must be a subset of available Batch service
application licenses. If a license is requested which is not supported,
pool creation will fail.
:type application_licenses: list of str
:param user_accounts: The list of user accounts to be created on each node
in the pool.
:type user_accounts: list of :class:`UserAccount
<azure.batch.models.UserAccount>`
:param metadata: A list of name-value pairs associated with the pool as
metadata. The Batch service does not assign any meaning to metadata; it is
solely for the use of user code.
:type metadata: list of :class:`MetadataItem
<azure.batch.models.MetadataItem>`
:param package_references: A list of packages to be installed on the compute
nodes. Must be of a Package Manager type in accordance with the selected
operating system.
:type package_references: list of :class:`PackageReferenceBase
<azext.batch.models.PackageReferenceBase>`
"""
_validation = {
'vm_size': {'required': True},
}
_attribute_map = {
'display_name': {'key': 'displayName', 'type': 'str'},
'vm_size': {'key': 'vmSize', 'type': 'str'},
'cloud_service_configuration': {'key': 'cloudServiceConfiguration',
'type': 'CloudServiceConfiguration'},
'virtual_machine_configuration': {'key': 'virtualMachineConfiguration',
'type': 'VirtualMachineConfiguration'},
'max_tasks_per_node': {'key': 'maxTasksPerNode', 'type': 'int'},
'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'},
'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'},
'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'},
'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'},
'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'},
'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'},
'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'},
'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'},
'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'},
'start_task': {'key': 'startTask', 'type': 'StartTask'},
'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'},
'application_package_references': {'key': 'applicationPackageReferences',
'type': '[ApplicationPackageReference]'},
'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'},
'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'},
'metadata': {'key': 'metadata', 'type': '[MetadataItem]'},
'package_references': {'key': 'packageReferences', 'type': '[PackageReferenceBase]'}
}
def __init__(self, *, vm_size: str, display_name: str=None, cloud_service_configuration=None,
virtual_machine_configuration=None, max_tasks_per_node: int=None, task_scheduling_policy=None,
resize_timeout=None, target_dedicated_nodes: int=None, target_low_priority_nodes: int=None,
enable_auto_scale: bool=None, auto_scale_formula: str=None, auto_scale_evaluation_interval=None,
enable_inter_node_communication: bool=None, network_configuration=None, start_task=None,
certificate_references=None, application_package_references=None, application_licenses=None,
user_accounts=None, metadata=None, package_references=None, **kwargs) -> None:
super(ExtendedPoolSpecification, self).__init__(
display_name=display_name,
vm_size=vm_size,
cloud_service_configuration=cloud_service_configuration,
virtual_machine_configuration=virtual_machine_configuration,
max_tasks_per_node=max_tasks_per_node,
task_scheduling_policy=task_scheduling_policy,
resize_timeout=resize_timeout,
target_dedicated_nodes=target_dedicated_nodes,
target_low_priority_nodes=target_low_priority_nodes,
enable_auto_scale=enable_auto_scale,
auto_scale_formula=auto_scale_formula,
auto_scale_evaluation_interval=auto_scale_evaluation_interval,
enable_inter_node_communication=enable_inter_node_communication,
network_configuration=network_configuration,
start_task=start_task,
certificate_references=certificate_references,
application_package_references=application_package_references,
application_licenses=application_licenses,
user_accounts=user_accounts,
metadata=metadata,
**kwargs)
self.package_references = package_references

Просмотреть файл

@ -42,6 +42,6 @@ class ExtendedResourceFile(ResourceFile):
'source': {'key': 'source', 'type': 'FileSource'}
}
def __init__(self, blob_source=None, file_path=None, file_mode=None, source=None):
super(ExtendedResourceFile, self).__init__(blob_source, file_path, file_mode)
self.source = source
def __init__(self, **kwargs):
super(ExtendedResourceFile, self).__init__(**kwargs)
self.source = kwargs.get('source', None)

Просмотреть файл

@ -0,0 +1,52 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.batch.models import ResourceFile
class ExtendedResourceFile(ResourceFile):
"""A file to be downloaded from Azure blob storage to a compute node.
:param blob_source: The URL of the file within Azure Blob Storage. This
URL must be readable using anonymous access; that is, the Batch service
does not present any credentials when downloading the blob. There are two
ways to get such a URL for a blob in Azure storage: include a Shared
Access Signature (SAS) granting read permissions on the blob, or set the
ACL for the blob or its container to allow public access.
:type blob_source: str
:param file_path: The location on the compute node to which to download
the file, relative to the task's working directory. If using a file group
source that references more than one file, this will be considered the name
of a directory, otherwise it will be treated as the destination file name.
:type file_path: str
:param file_mode: The file permission mode attribute in octal format. This
property applies only to files being downloaded to Linux compute nodes. It
will be ignored if it is specified for a resourceFile which will be
downloaded to a Windows node. If this property is not specified for a
Linux node, then a default value of 0770 is applied to the file.
If using a file group source that references more than one file, this will be
applied to all files in the group.
:type file_mode: str
:param source: A file source reference which could include a collection of files from
a Azure Storage container or an auto-storage file group.
:type source: :class:`FileSource
<azext.batch.models.FileSource>`
"""
_attribute_map = {
'blob_source': {'key': 'blobSource', 'type': 'str'},
'file_path': {'key': 'filePath', 'type': 'str'},
'file_mode': {'key': 'fileMode', 'type': 'str'},
'source': {'key': 'source', 'type': 'FileSource'}
}
def __init__(self, *, blob_source: str=None, file_path: str=None,
file_mode: str=None, source=None, **kwargs) -> None:
super(ExtendedResourceFile, self).__init__(
blob_source=blob_source,
file_path=file_path,
file_mode=file_mode,
**kwargs)
self.source = source

Просмотреть файл

@ -142,26 +142,6 @@ class ExtendedTaskParameter(TaskAddParameter):
'package_references': {'key': 'packageReferences', 'type': '[PackageReferenceBase]'}
}
def __init__(self, id, command_line, display_name=None, container_settings=None, exit_conditions=None,
resource_files=None, output_files=None, environment_settings=None,
affinity_info=None, constraints=None, user_identity=None,
multi_instance_settings=None, depends_on=None,
application_package_references=None, authentication_token_settings=None,
package_references=None):
super(ExtendedTaskParameter, self).__init__(
id=id,
display_name=display_name,
command_line=command_line,
container_settings=container_settings,
exit_conditions=exit_conditions,
resource_files=resource_files,
output_files=output_files,
environment_settings=environment_settings,
affinity_info=affinity_info,
constraints=constraints,
user_identity=user_identity,
multi_instance_settings=multi_instance_settings,
depends_on=depends_on,
application_package_references=application_package_references,
authentication_token_settings=authentication_token_settings)
self.package_references = package_references
def __init__(self, **kwargs):
super(ExtendedTaskParameter, self).__init__(**kwargs)
self.package_references = kwargs.get('package_references', None)

Просмотреть файл

@ -0,0 +1,168 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=redefined-builtin
from azure.batch.models import TaskAddParameter
class ExtendedTaskParameter(TaskAddParameter):
"""An Azure Batch task to add.
:param id: A string that uniquely identifies the task within the job. The
ID can contain any combination of alphanumeric characters including
hyphens and underscores, and cannot contain more than 64 characters. The
ID is case-preserving and case-insensitive (that is, you may not have two
IDs within a job that differ only by case).
:type id: str
:param display_name: A display name for the task. The display name need
not be unique and can contain any Unicode characters up to a maximum
length of 1024.
:type display_name: str
:param command_line: The command line of the task. For multi-instance
tasks, the command line is executed as the primary task, after the primary
task and all subtasks have finished executing the coordination command
line. The command line does not run under a shell, and therefore cannot
take advantage of shell features such as environment variable expansion.
If you want to take advantage of such features, you should invoke the
shell in the command line, for example using "cmd /c MyCommand" in Windows
or "/bin/sh -c MyCommand" in Linux.
:type command_line: str
:param container_settings: The settings for the container under which the
task runs. If the pool that will run this task has containerConfiguration
set, this must be set as well. If the pool that will run this task doesn't
have containerConfiguration set, this must not be set. When this is
specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR
(the root of Azure Batch directories on the node) are mapped into the
container, all task environment variables are mapped into the container,
and the task command line is executed in the container.
:type container_settings: :class:`TaskContainerSettings
<azure.batch.models.TaskContainerSettings>`
:param exit_conditions: How the Batch service should respond when the task
completes.
:type exit_conditions: :class:`ExitConditions
<azure.batch.models.ExitConditions>`
:param resource_files: A list of files that the Batch service will
download to the compute node before running the command line. For
multi-instance tasks, the resource files will only be downloaded to the
compute node on which the primary task is executed.
:type resource_files: list of :class:`ResourceFile
<azure.batch.models.ResourceFile>`
:param environment_settings: A list of environment variable settings for
the task.
:type environment_settings: list of :class:`EnvironmentSetting
<azure.batch.models.EnvironmentSetting>`
:param affinity_info: A locality hint that can be used by the Batch
service to select a compute node on which to start the new task.
:type affinity_info: :class:`AffinityInformation
<azure.batch.models.AffinityInformation>`
:param constraints: The execution constraints that apply to this task. If
you do not specify constraints, the maxTaskRetryCount is the
maxTaskRetryCount specified for the job, and the maxWallClockTime and
retentionTime are infinite.
:type constraints: :class:`TaskConstraints
<azure.batch.models.TaskConstraints>`
:param user_identity: The user identity under which the task runs. If
omitted, the task runs as a non-administrative user unique to the task.
:type user_identity: :class:`UserIdentity
<azure.batch.models.UserIdentity>`
:param multi_instance_settings: An object that indicates that the task is
a multi-instance task, and contains information about how to run the
multi-instance task.
:type multi_instance_settings: :class:`MultiInstanceSettings
<azure.batch.models.MultiInstanceSettings>`
:param depends_on: The tasks that this task depends on. This task will not
be scheduled until all tasks that it depends on have completed
successfully. If any of those tasks fail and exhaust their retry counts,
this task will never be scheduled. If the job does not have
usesTaskDependencies set to true, and this element is present, the request
fails with error code TaskDependenciesNotSpecifiedOnJob.
:type depends_on: :class:`TaskDependencies
<azure.batch.models.TaskDependencies>`
:param application_package_references: A list of application packages that
the Batch service will deploy to the compute node before running the
command line. Application packages are downloaded and deployed to a shared
directory, not the task working directory. Therefore, if a referenced
package is already on the compute node, and is up to date, then it is not
re-downloaded; the existing copy on the compute node is used. If a
referenced application package cannot be installed, for example because
the package has been deleted or because download failed, the task fails.
:type application_package_references: list of
:class:`ApplicationPackageReference
<azure.batch.models.ApplicationPackageReference>`
:param authentication_token_settings: The settings for an authentication
token that the task can use to perform Batch service operations. If this
property is set, the Batch service provides the task with an
authentication token which can be used to authenticate Batch service
operations without requiring an account access key. The token is provided
via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations
that the task can carry out using the token depend on the settings. For
example, a task can request job permissions in order to add other tasks to
the job, or check the status of the job or of other tasks under the job.
:type authentication_token_settings: :class:`AuthenticationTokenSettings
<azure.batch.models.AuthenticationTokenSettings>`
:param output_files: A list of files that the Batch service will upload
from the compute node after running the command line. For multi-instance
tasks, the files will only be uploaded from the compute node on which the
primary task is executed.
:type output_files: list of :class:`OutputFile
<azext.batch.models.OutputFile>`
:param package_references: A list of packages to be installed on the compute
nodes. Must be of a Package Manager type in accordance with the selected
operating system.
:type package_references: list of :class:`PackageReferenceBase
<azext.batch.models.PackageReferenceBase>`
"""
_validation = {
'id': {'required': True},
'command_line': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'command_line': {'key': 'commandLine', 'type': 'str'},
'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'},
'exit_conditions': {'key': 'exitConditions', 'type': 'ExitConditions'},
'resource_files': {'key': 'resourceFiles', 'type': '[ExtendedResourceFile]'},
'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'},
'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'},
'affinity_info': {'key': 'affinityInfo', 'type': 'AffinityInformation'},
'constraints': {'key': 'constraints', 'type': 'TaskConstraints'},
'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'},
'multi_instance_settings': {'key': 'multiInstanceSettings', 'type': 'MultiInstanceSettings'},
'depends_on': {'key': 'dependsOn', 'type': 'TaskDependencies'},
'application_package_references': {'key': 'applicationPackageReferences',
'type': '[ApplicationPackageReference]'},
'authentication_token_settings': {'key': 'authenticationTokenSettings',
'type': 'AuthenticationTokenSettings'},
'package_references': {'key': 'packageReferences', 'type': '[PackageReferenceBase]'}
}
def __init__(self, *, id: str, command_line: str, display_name: str=None, container_settings=None,
exit_conditions=None, resource_files=None, output_files=None, environment_settings=None,
affinity_info=None, constraints=None, user_identity=None,
multi_instance_settings=None, depends_on=None,
application_package_references=None, authentication_token_settings=None,
package_references=None, **kwargs) -> None:
super(ExtendedTaskParameter, self).__init__(
id=id,
display_name=display_name,
command_line=command_line,
container_settings=container_settings,
exit_conditions=exit_conditions,
resource_files=resource_files,
output_files=output_files,
environment_settings=environment_settings,
affinity_info=affinity_info,
constraints=constraints,
user_identity=user_identity,
multi_instance_settings=multi_instance_settings,
depends_on=depends_on,
application_package_references=application_package_references,
authentication_token_settings=authentication_token_settings,
**kwargs)
self.package_references = package_references

Просмотреть файл

@ -33,8 +33,8 @@ class FileCollectionTaskFactory(TaskFactoryBase):
'merge_task': {'key': 'mergeTask', 'type': 'MergeTask'}
}
def __init__(self, source, repeat_task, merge_task=None):
super(FileCollectionTaskFactory, self).__init__(merge_task)
self.source = source
self.repeat_task = repeat_task
def __init__(self, **kwargs):
super(FileCollectionTaskFactory, self).__init__(**kwargs)
self.source = kwargs.get('source', None)
self.repeat_task = kwargs.get('repeat_task', None)
self.type = 'taskPerFile'

Просмотреть файл

@ -0,0 +1,41 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from .task_factory_base import TaskFactoryBase
class FileCollectionTaskFactory(TaskFactoryBase):
"""A Task Factory for generating a set of tasks based on the contents
of an Azure Storage container or auto-storage file group. One task
will be generated per input file, and automatically added to the job.
:param source: The input file source from which the tasks will be generated.
:type source: :class:`FileSource <azext.batch.models.FileSource>`
:param repeat_task: The task template the will be used to generate each task.
:type repeat_task: :class:`RepeatTask <azext.batch.models.RepeatTask>`
:param merge_task: An optional additional task to be run after all the other
generated tasks have completed successfully.
:type merge_task: :class:`MergeTask <azext.batch.models.MergeTask>`
"""
_validation = {
'type': {'required': True},
'source': {'required': True},
'repeat_task': {'required': True}
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'source': {'key': 'source', 'type': 'FileSource'},
'repeat_task': {'key': 'repeatTask', 'type': 'RepeatTask'},
'merge_task': {'key': 'mergeTask', 'type': 'MergeTask'}
}
def __init__(self, *, source: str, repeat_task, merge_task=None, **kwargs) -> None:
super(FileCollectionTaskFactory, self).__init__(
merge_task=merge_task, **kwargs)
self.source = source
self.repeat_task = repeat_task
self.type = 'taskPerFile'

Просмотреть файл

@ -24,8 +24,9 @@ class FileSource(Model):
'prefix': {'key': 'prefix', 'type': 'str'},
}
def __init__(self, file_group=None, url=None, container_url=None, prefix=None):
self.file_group = file_group
self.url = url
self.container_url = container_url
self.prefix = prefix
def __init__(self, **kwargs):
super(FileSource, self).__init__(**kwargs)
self.file_group = kwargs.get('file_group', None)
self.url = kwargs.get('url', None)
self.container_url = kwargs.get('container_url', None)
self.prefix = kwargs.get('prefix', None)

Просмотреть файл

@ -0,0 +1,33 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class FileSource(Model):
"""A source of input files to be downloaded onto a compute node.
:param str file_group: The name of an auto-storage file group.
:param str url: The URL of a file to be downloaded.
:param str container_url: The SAS URL of an Azure Storage container.
:param str prefix: The filename prefix or subdirectory of input files
in either an auto-storage file group or container. Will be ignored if
conbined with url.
"""
_attribute_map = {
'file_group': {'key': 'fileGroup', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'container_url': {'key': 'containerUrl', 'type': 'str'},
'prefix': {'key': 'prefix', 'type': 'str'},
}
def __init__(self, *, file_group: str=None, url: str=None,
container_url: str=None, prefix: str=None, **kwargs) -> None:
super(FileSource, self).__init__(**kwargs)
self.file_group = file_group
self.url = url
self.container_url = container_url
self.prefix = prefix

Просмотреть файл

@ -149,22 +149,19 @@ class JobManagerTask(Model):
'allow_low_priority_node': {'key': 'allowLowPriorityNode', 'type': 'bool'},
}
def __init__(self, id, command_line, display_name=None, container_settings=None, resource_files=None,
output_files=None, environment_settings=None, constraints=None, kill_job_on_completion=None,
user_identity=None, run_exclusive=None, application_package_references=None,
authentication_token_settings=None, allow_low_priority_node=None):
super(JobManagerTask, self).__init__()
self.id = id
self.display_name = display_name
self.command_line = command_line
self.container_settings = container_settings
self.resource_files = resource_files
self.output_files = output_files
self.environment_settings = environment_settings
self.constraints = constraints
self.kill_job_on_completion = kill_job_on_completion
self.user_identity = user_identity
self.run_exclusive = run_exclusive
self.application_package_references = application_package_references
self.authentication_token_settings = authentication_token_settings
self.allow_low_priority_node = allow_low_priority_node
def __init__(self, **kwargs):
super(JobManagerTask, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.display_name = kwargs.get('display_name', None)
self.command_line = kwargs.get('command_line', None)
self.container_settings = kwargs.get('container_settings', None)
self.resource_files = kwargs.get('resource_files', None)
self.output_files = kwargs.get('output_files', None)
self.environment_settings = kwargs.get('environment_settings', None)
self.constraints = kwargs.get('constraints', None)
self.kill_job_on_completion = kwargs.get('kill_job_on_completion', None)
self.user_identity = kwargs.get('user_identity', None)
self.run_exclusive = kwargs.get('run_exclusive', None)
self.application_package_references = kwargs.get('application_package_references', None)
self.authentication_token_settings = kwargs.get('authentication_token_settings', None)
self.allow_low_priority_node = kwargs.get('allow_low_priority_node', None)

Просмотреть файл

@ -0,0 +1,171 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=redefined-builtin
from msrest.serialization import Model
class JobManagerTask(Model):
"""Specifies details of a Job Manager task.
The Job Manager task is automatically started when the job is created. The
Batch service tries to schedule the Job Manager task before any other tasks
in the job. When shrinking a pool, the Batch service tries to preserve
compute nodes where Job Manager tasks are running for as long as possible
(that is, nodes running 'normal' tasks are removed before nodes running Job
Manager tasks). When a Job Manager task fails and needs to be restarted,
the system tries to schedule it at the highest priority. If there are no
idle nodes available, the system may terminate one of the running tasks in
the pool and return it to the queue in order to make room for the Job
Manager task to restart. Note that a Job Manager task in one job does not
have priority over tasks in other jobs. Across jobs, only job level
priorities are observed. For example, if a Job Manager in a priority 0 job
needs to be restarted, it will not displace tasks of a priority 1 job.
:param id: A string that uniquely identifies the Job Manager task within
the job. The ID can contain any combination of alphanumeric characters
including hyphens and underscores and cannot contain more than 64
characters.
:type id: str
:param display_name: The display name of the Job Manager task. It need not
be unique and can contain any Unicode characters up to a maximum length of
1024.
:type display_name: str
:param command_line: The command line of the Job Manager task. The command
line does not run under a shell, and therefore cannot take advantage of
shell features such as environment variable expansion. If you want to take
advantage of such features, you should invoke the shell in the command
line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c
MyCommand" in Linux.
:type command_line: str
:param container_settings: The settings for the container under which the
Job Manager task runs. If the pool that will run this task has
containerConfiguration set, this must be set as well. If the pool that
will run this task doesn't have containerConfiguration set, this must not
be set. When this is specified, all directories recursively below the
AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node)
are mapped into the container, all task environment variables are mapped
into the container, and the task command line is executed in the
container.
:type container_settings: :class:`TaskContainerSettings
<azure.batch.models.TaskContainerSettings>`
:param resource_files: A list of files that the Batch service will
download to the compute node before running the command line. Files listed
under this element are located in the task's working directory.
:type resource_files: list of :class:`ExtendedResourceFile
<azext.batch.models.ExtendedResourceFile>`
:param output_files: A list of files that the Batch service will upload
from the compute node after running the command line. For multi-instance
tasks, the files will only be uploaded from the compute node on which the
primary task is executed.
:type output_files: list of :class:`OutputFile
<azure.batch.models.OutputFile>`
:param environment_settings: A list of environment variable settings for
the Job Manager task.
:type environment_settings: list of :class:`EnvironmentSetting
<azure.batch.models.EnvironmentSetting>`
:param constraints: Constraints that apply to the Job Manager task.
:type constraints: :class:`TaskConstraints
<azure.batch.models.TaskConstraints>`
:param kill_job_on_completion: Whether completion of the Job Manager task
signifies completion of the entire job. If true, when the Job Manager task
completes, the Batch service marks the job as complete. If any tasks are
still running at this time (other than Job Release), those tasks are
terminated. If false, the completion of the Job Manager task does not
affect the job status. In this case, you should either use the
onAllTasksComplete attribute to terminate the job, or have a client or
user terminate the job explicitly. An example of this is if the Job
Manager creates a set of tasks but then takes no further role in their
execution. The default value is true. If you are using the
onAllTasksComplete and onTaskFailure attributes to control job lifetime,
and using the Job Manager task only to create the tasks for the job (not
to monitor progress), then it is important to set killJobOnCompletion to
false.
:type kill_job_on_completion: bool
:param user_identity: The user identity under which the Job Manager task
runs. If omitted, the task runs as a non-administrative user unique to the
task.
:type user_identity: :class:`UserIdentity
<azure.batch.models.UserIdentity>`
:param run_exclusive: Whether the Job Manager task requires exclusive use
of the compute node where it runs. If true, no other tasks will run on the
same compute node for as long as the Job Manager is running. If false,
other tasks can run simultaneously with the Job Manager on a compute node.
The Job Manager task counts normally against the node's concurrent task
limit, so this is only relevant if the node allows multiple concurrent
tasks. The default value is true.
:type run_exclusive: bool
:param application_package_references: A list of application packages that
the Batch service will deploy to the compute node before running the
command line. Application packages are downloaded and deployed to a shared
directory, not the task working directory. Therefore, if a referenced
package is already on the compute node, and is up to date, then it is not
re-downloaded; the existing copy on the compute node is used. If a
referenced application package cannot be installed, for example because
the package has been deleted or because download failed, the task fails.
:type application_package_references: list of
:class:`ApplicationPackageReference
<azure.batch.models.ApplicationPackageReference>`
:param authentication_token_settings: The settings for an authentication
token that the task can use to perform Batch service operations. If this
property is set, the Batch service provides the task with an
authentication token which can be used to authenticate Batch service
operations without requiring an account access key. The token is provided
via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations
that the task can carry out using the token depend on the settings. For
example, a task can request job permissions in order to add other tasks to
the job, or check the status of the job or of other tasks under the job.
:type authentication_token_settings: :class:`AuthenticationTokenSettings
<azure.batch.models.AuthenticationTokenSettings>`
:param allow_low_priority_node: Whether the Job Manager task may run on a
low-priority compute node. The default value is false.
:type allow_low_priority_node: bool
"""
_validation = {
'id': {'required': True},
'command_line': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'command_line': {'key': 'commandLine', 'type': 'str'},
'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'},
'resource_files': {'key': 'resourceFiles', 'type': '[ExtendedResourceFile]'},
'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'},
'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'},
'constraints': {'key': 'constraints', 'type': 'TaskConstraints'},
'kill_job_on_completion': {'key': 'killJobOnCompletion', 'type': 'bool'},
'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'},
'run_exclusive': {'key': 'runExclusive', 'type': 'bool'},
'application_package_references': {'key': 'applicationPackageReferences',
'type': '[ApplicationPackageReference]'},
'authentication_token_settings': {'key': 'authenticationTokenSettings',
'type': 'AuthenticationTokenSettings'},
'allow_low_priority_node': {'key': 'allowLowPriorityNode', 'type': 'bool'},
}
def __init__(self, *, id: str, command_line: str, display_name: str=None, container_settings=None,
resource_files=None, output_files=None, environment_settings=None, constraints=None,
kill_job_on_completion: bool=None, user_identity=None, run_exclusive: bool=None,
application_package_references=None, authentication_token_settings=None,
allow_low_priority_node: bool=None, **kwargs) -> None:
super(JobManagerTask, self).__init__(**kwargs)
self.id = id
self.display_name = display_name
self.command_line = command_line
self.container_settings = container_settings
self.resource_files = resource_files
self.output_files = output_files
self.environment_settings = environment_settings
self.constraints = constraints
self.kill_job_on_completion = kill_job_on_completion
self.user_identity = user_identity
self.run_exclusive = run_exclusive
self.application_package_references = application_package_references
self.authentication_token_settings = authentication_token_settings
self.allow_low_priority_node = allow_low_priority_node

Просмотреть файл

@ -114,16 +114,14 @@ class JobPreparationTask(Model):
'rerun_on_node_reboot_after_success': {'key': 'rerunOnNodeRebootAfterSuccess', 'type': 'bool'},
}
def __init__(self, command_line, id=None, container_settings=None, resource_files=None,
environment_settings=None, constraints=None, wait_for_success=None, user_identity=None,
rerun_on_node_reboot_after_success=None):
super(JobPreparationTask, self).__init__()
self.id = id
self.command_line = command_line
self.container_settings = container_settings
self.resource_files = resource_files
self.environment_settings = environment_settings
self.constraints = constraints
self.wait_for_success = wait_for_success
self.user_identity = user_identity
self.rerun_on_node_reboot_after_success = rerun_on_node_reboot_after_success
def __init__(self, **kwargs):
super(JobPreparationTask, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.command_line = kwargs.get('command_line', None)
self.container_settings = kwargs.get('container_settings', None)
self.resource_files = kwargs.get('resource_files', None)
self.environment_settings = kwargs.get('environment_settings', None)
self.constraints = kwargs.get('constraints', None)
self.wait_for_success = kwargs.get('wait_for_success', None)
self.user_identity = kwargs.get('user_identity', None)
self.rerun_on_node_reboot_after_success = kwargs.get('rerun_on_node_reboot_after_success', None)

Просмотреть файл

@ -0,0 +1,129 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=redefined-builtin
from msrest.serialization import Model
class JobPreparationTask(Model):
"""A Job Preparation task to run before any tasks of the job on any given
compute node.
You can use Job Preparation to prepare a compute node to run tasks for the
job. Activities commonly performed in Job Preparation include: Downloading
common resource files used by all the tasks in the job. The Job Preparation
task can download these common resource files to the shared location on the
compute node. (AZ_BATCH_NODE_ROOT_DIR\\shared), or starting a local service
on the compute node so that all tasks of that job can communicate with it.
If the Job Preparation task fails (that is, exhausts its retry count before
exiting with exit code 0), Batch will not run tasks of this job on the
compute node. The node remains ineligible to run tasks of this job until it
is reimaged. The node remains active and can be used for other jobs. The
Job Preparation task can run multiple times on the same compute node.
Therefore, you should write the Job Preparation task to handle
re-execution. If the compute node is rebooted, the Job Preparation task is
run again on the node before scheduling any other task of the job, if
rerunOnNodeRebootAfterSuccess is true or if the Job Preparation task did
not previously complete. If the compute node is reimaged, the Job
Preparation task is run again before scheduling any task of the job.
:param id: A string that uniquely identifies the Job Preparation task
within the job. The ID can contain any combination of alphanumeric
characters including hyphens and underscores and cannot contain more than
64 characters. If you do not specify this property, the Batch service
assigns a default value of 'jobpreparation'. No other task in the job can
have the same ID as the Job Preparation task. If you try to submit a task
with the same ID, the Batch service rejects the request with error code
TaskIdSameAsJobPreparationTask; if you are calling the REST API directly,
the HTTP status code is 409 (Conflict).
:type id: str
:param command_line: The command line of the Job Preparation task. The
command line does not run under a shell, and therefore cannot take
advantage of shell features such as environment variable expansion. If you
want to take advantage of such features, you should invoke the shell in
the command line, for example using "cmd /c MyCommand" in Windows or
"/bin/sh -c MyCommand" in Linux.
:type command_line: str
:param container_settings: The settings for the container under which the
Job Preparation task runs. When this is specified, all directories
recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch
directories on the node) are mapped into the container, all task
environment variables are mapped into the container, and the task command
line is executed in the container.
:type container_settings: :class:`TaskContainerSettings
<azure.batch.models.TaskContainerSettings>`
:param resource_files: A list of files that the Batch service will
download to the compute node before running the command line. Files listed
under this element are located in the task's working directory.
:type resource_files: list of :class:`ExtendedResourceFile
<azext.batch.models.ExtendedResourceFile>`
:param environment_settings: A list of environment variable settings for
the Job Preparation task.
:type environment_settings: list of :class:`EnvironmentSetting
<azure.batch.models.EnvironmentSetting>`
:param constraints: Constraints that apply to the Job Preparation task.
:type constraints: :class:`TaskConstraints
<azure.batch.models.TaskConstraints>`
:param wait_for_success: Whether the Batch service should wait for the Job
Preparation task to complete successfully before scheduling any other
tasks of the job on the compute node. A Job Preparation task has completed
successfully if it exits with exit code 0. If true and the Job Preparation
task fails on a compute node, the Batch service retries the Job
Preparation task up to its maximum retry count (as specified in the
constraints element). If the task has still not completed successfully
after all retries, then the Batch service will not schedule tasks of the
job to the compute node. The compute node remains active and eligible to
run tasks of other jobs. If false, the Batch service will not wait for the
Job Preparation task to complete. In this case, other tasks of the job can
start executing on the compute node while the Job Preparation task is
still running; and even if the Job Preparation task fails, new tasks will
continue to be scheduled on the node. The default value is true.
:type wait_for_success: bool
:param user_identity: The user identity under which the Job Preparation
task runs. If omitted, the task runs as a non-administrative user unique
to the task on Windows nodes, or a a non-administrative user unique to the
pool on Linux nodes.
:type user_identity: :class:`UserIdentity
<azure.batch.models.UserIdentity>`
:param rerun_on_node_reboot_after_success: Whether the Batch service
should rerun the Job Preparation task after a compute node reboots. The
Job Preparation task is always rerun if a compute node is reimaged, or if
the Job Preparation task did not complete (e.g. because the reboot
occurred while the task was running). Therefore, you should always write a
Job Preparation task to be idempotent and to behave correctly if run
multiple times. The default value is true.
:type rerun_on_node_reboot_after_success: bool
"""
_validation = {
'command_line': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'command_line': {'key': 'commandLine', 'type': 'str'},
'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'},
'resource_files': {'key': 'resourceFiles', 'type': '[ExtendedResourceFile]'},
'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'},
'constraints': {'key': 'constraints', 'type': 'TaskConstraints'},
'wait_for_success': {'key': 'waitForSuccess', 'type': 'bool'},
'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'},
'rerun_on_node_reboot_after_success': {'key': 'rerunOnNodeRebootAfterSuccess', 'type': 'bool'},
}
def __init__(self, *, command_line: str, id: str=None, container_settings=None, resource_files=None,
environment_settings=None, constraints=None, wait_for_success: bool=None, user_identity=None,
rerun_on_node_reboot_after_success: bool=None, **kwargs) -> None:
super(JobPreparationTask, self).__init__(**kwargs)
self.id = id
self.command_line = command_line
self.container_settings = container_settings
self.resource_files = resource_files
self.environment_settings = environment_settings
self.constraints = constraints
self.wait_for_success = wait_for_success
self.user_identity = user_identity
self.rerun_on_node_reboot_after_success = rerun_on_node_reboot_after_success

Просмотреть файл

@ -99,14 +99,13 @@ class JobReleaseTask(Model):
'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'},
}
def __init__(self, command_line, id=None, container_settings=None, resource_files=None,
environment_settings=None, max_wall_clock_time=None, retention_time=None, user_identity=None):
super(JobReleaseTask, self).__init__()
self.id = id
self.command_line = command_line
self.container_settings = container_settings
self.resource_files = resource_files
self.environment_settings = environment_settings
self.max_wall_clock_time = max_wall_clock_time
self.retention_time = retention_time
self.user_identity = user_identity
def __init__(self, **kwargs):
super(JobReleaseTask, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.command_line = kwargs.get('command_line', None)
self.container_settings = kwargs.get('container_settings', None)
self.resource_files = kwargs.get('resource_files', None)
self.environment_settings = kwargs.get('environment_settings', None)
self.max_wall_clock_time = kwargs.get('max_wall_clock_time', None)
self.retention_time = kwargs.get('retention_time', None)
self.user_identity = kwargs.get('user_identity', None)

Просмотреть файл

@ -0,0 +1,113 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=redefined-builtin
from msrest.serialization import Model
class JobReleaseTask(Model):
"""A Job Release task to run on job completion on any compute node where the
job has run.
The Job Release task runs when the job ends, because of one of the
following: The user calls the Terminate Job API, or the Delete Job API
while the job is still active, the job's maximum wall clock time constraint
is reached, and the job is still active, or the job's Job Manager task
completed, and the job is configured to terminate when the Job Manager
completes. The Job Release task runs on each compute node where tasks of
the job have run and the Job Preparation task ran and completed. If you
reimage a compute node after it has run the Job Preparation task, and the
job ends without any further tasks of the job running on that compute node
(and hence the Job Preparation task does not re-run), then the Job Release
task does not run on that node. If a compute node reboots while the Job
Release task is still running, the Job Release task runs again when the
compute node starts up. The job is not marked as complete until all Job
Release tasks have completed. The Job Release task runs in the background.
It does not occupy a scheduling slot; that is, it does not count towards
the maxTasksPerNode limit specified on the pool.
:param id: A string that uniquely identifies the Job Release task within
the job. The ID can contain any combination of alphanumeric characters
including hyphens and underscores and cannot contain more than 64
characters. If you do not specify this property, the Batch service assigns
a default value of 'jobrelease'. No other task in the job can have the
same ID as the Job Release task. If you try to submit a task with the same
ID, the Batch service rejects the request with error code
TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the
HTTP status code is 409 (Conflict).
:type id: str
:param command_line: The command line of the Job Release task. The command
line does not run under a shell, and therefore cannot take advantage of
shell features such as environment variable expansion. If you want to take
advantage of such features, you should invoke the shell in the command
line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c
MyCommand" in Linux.
:type command_line: str
:param container_settings: The settings for the container under which the
Job Release task runs. When this is specified, all directories recursively
below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on
the node) are mapped into the container, all task environment variables
are mapped into the container, and the task command line is executed in
the container.
:type container_settings: :class:`TaskContainerSettings
<azure.batch.models.TaskContainerSettings>`
:param resource_files: A list of files that the Batch service will
download to the compute node before running the command line. Files listed
under this element are located in the task's working directory.
:type resource_files: list of :class:`ExtendedResourceFile
<azext.batch.models.ExtendedResourceFile>`
:param environment_settings: A list of environment variable settings for
the Job Release task.
:type environment_settings: list of :class:`EnvironmentSetting
<azure.batch.models.EnvironmentSetting>`
:param max_wall_clock_time: The maximum elapsed time that the Job Release
task may run on a given compute node, measured from the time the task
starts. If the task does not complete within the time limit, the Batch
service terminates it. The default value is 15 minutes. You may not
specify a timeout longer than 15 minutes. If you do, the Batch service
rejects it with an error; if you are calling the REST API directly, the
HTTP status code is 400 (Bad Request).
:type max_wall_clock_time: timedelta
:param retention_time: The minimum time to retain the task directory for
the Job Release task on the compute node. After this time, the Batch
service may delete the task directory and all its contents. The default is
infinite, i.e. the task directory will be retained until the compute node
is removed or reimaged.
:type retention_time: timedelta
:param user_identity: The user identity under which the Job Release task
runs. If omitted, the task runs as a non-administrative user unique to the
task.
:type user_identity: :class:`UserIdentity
<azure.batch.models.UserIdentity>`
"""
_validation = {
'command_line': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'command_line': {'key': 'commandLine', 'type': 'str'},
'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'},
'resource_files': {'key': 'resourceFiles', 'type': '[ExtendedResourceFile]'},
'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'},
'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'},
'retention_time': {'key': 'retentionTime', 'type': 'duration'},
'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'},
}
def __init__(self, *, command_line: str, id: str=None, container_settings=None, resource_files=None,
environment_settings=None, max_wall_clock_time=None, retention_time=None, user_identity=None,
**kwargs) -> None:
super(JobReleaseTask, self).__init__(**kwargs)
self.id = id
self.command_line = command_line
self.container_settings = container_settings
self.resource_files = resource_files
self.environment_settings = environment_settings
self.max_wall_clock_time = max_wall_clock_time
self.retention_time = retention_time
self.user_identity = user_identity

Просмотреть файл

@ -31,6 +31,7 @@ class JobTemplate(Model):
type = "Microsoft.Batch/batchAccounts/jobs"
def __init__(self, properties, api_version=None):
self.properties = properties
self.api_version = api_version
def __init__(self, **kwargs):
super(JobTemplate, self).__init__(**kwargs)
self.properties = kwargs.get('properties')
self.api_version = kwargs.get('api_version', None)

Просмотреть файл

@ -0,0 +1,37 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class JobTemplate(Model):
"""A Job Template.
:ivar type: The type of object described by the template. Must be:
"Microsoft.Batch/batchAccounts/jobs"
:type type: str
:param api_version: The API version that the template conforms to.
:type api_version: str
:param properties: The specificaton of the job.
:type properties: :class:`ExtendedJobParameter<azext.batch.models.ExtendedJobParameter>`
"""
_validation = {
'type': {'required': True, 'constant': True},
'properties': {'required': True},
}
_attribute_map = {
'type': {'key': 'id', 'type': 'str'},
'api_version': {'key': 'apiVersion', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'ExtendedJobParameter'},
}
type = "Microsoft.Batch/batchAccounts/jobs"
def __init__(self, *, properties, api_version: str=None, **kwargs) -> None:
super(JobTemplate, self).__init__(**kwargs)
self.properties = properties
self.api_version = api_version

Просмотреть файл

@ -112,21 +112,19 @@ class MergeTask(Model):
'package_references': {'key': 'packageReferences', 'type': '[PackageReferenceBase]'},
}
def __init__(self, command_line, id=None, display_name=None, exit_conditions=None,
resource_files=None, environment_settings=None, affinity_info=None, constraints=None,
user_identity=None, depends_on=None, application_package_references=None,
authentication_token_settings=None, output_files=None, package_references=None):
self.id = id
self.display_name = display_name
self.command_line = command_line
self.exit_conditions = exit_conditions
self.resource_files = resource_files
self.environment_settings = environment_settings
self.affinity_info = affinity_info
self.constraints = constraints
self.user_identity = user_identity
self.depends_on = depends_on
self.application_package_references = application_package_references
self.authentication_token_settings = authentication_token_settings
self.output_files = output_files
self.package_references = package_references
def __init__(self, **kwargs):
super(MergeTask, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.display_name = kwargs.get('display_name', None)
self.command_line = kwargs.get('command_line')
self.exit_conditions = kwargs.get('exit_conditions', None)
self.resource_files = kwargs.get('resource_files', None)
self.environment_settings = kwargs.get('environment_settings', None)
self.affinity_info = kwargs.get('affinity_info', None)
self.constraints = kwargs.get('constraints', None)
self.user_identity = kwargs.get('user_identity', None)
self.depends_on = kwargs.get('depends_on', None)
self.application_package_references = kwargs.get('application_package_references', None)
self.authentication_token_settings = kwargs.get('authentication_token_settings', None)
self.output_files = kwargs.get('output_files', None)
self.package_references = kwargs.get('package_references', None)

Просмотреть файл

@ -0,0 +1,133 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=redefined-builtin
from msrest.serialization import Model
class MergeTask(Model):
"""An Azure Batch task template to repeat.
:param str id: The ID of the merge task.
:param display_name: A display name for the task. The display name need
not be unique and can contain any Unicode characters up to a maximum
length of 1024.
:type display_name: str
:param command_line: The command line of the task. For multi-instance
tasks, the command line is executed as the primary task, after the primary
task and all subtasks have finished executing the coordination command
line. The command line does not run under a shell, and therefore cannot
take advantage of shell features such as environment variable expansion.
If you want to take advantage of such features, you should invoke the
shell in the command line, for example using "cmd /c MyCommand" in Windows
or "/bin/sh -c MyCommand" in Linux.
:type command_line: str
:param exit_conditions: How the Batch service should respond when the task
completes.
:type exit_conditions: :class:`ExitConditions
<azure.batch.models.ExitConditions>`
:param resource_files: A list of files that the Batch service will
download to the compute node before running the command line. For
multi-instance tasks, the resource files will only be downloaded to the
compute node on which the primary task is executed.
:type resource_files: list of :class:`ExtendedResourceFile
<azext.batch.models.ExtendedResourceFile>`
:param environment_settings: A list of environment variable settings for
the task.
:type environment_settings: list of :class:`EnvironmentSetting
<azure.batch.models.EnvironmentSetting>`
:param affinity_info: A locality hint that can be used by the Batch
service to select a compute node on which to start the new task.
:type affinity_info: :class:`AffinityInformation
<azure.batch.models.AffinityInformation>`
:param constraints: The execution constraints that apply to this task. If
you do not specify constraints, the maxTaskRetryCount is the
maxTaskRetryCount specified for the job, and the maxWallClockTime and
retentionTime are infinite.
:type constraints: :class:`TaskConstraints
<azure.batch.models.TaskConstraints>`
:param user_identity: The user identity under which the task runs. If
omitted, the task runs as a non-administrative user unique to the task.
:type user_identity: :class:`UserIdentity
<azure.batch.models.UserIdentity>`
:param depends_on: The tasks that this task depends on. This task will not
be scheduled until all tasks that it depends on have completed
successfully. If any of those tasks fail and exhaust their retry counts,
this task will never be scheduled. If the job does not have
usesTaskDependencies set to true, and this element is present, the request
fails with error code TaskDependenciesNotSpecifiedOnJob.
:type depends_on: :class:`TaskDependencies
<azure.batch.models.TaskDependencies>`
:param application_package_references: A list of application packages that
the Batch service will deploy to the compute node before running the
command line.
:type application_package_references: list of
:class:`ApplicationPackageReference
<azure.batch.models.ApplicationPackageReference>`
:param authentication_token_settings: The settings for an authentication
token that the task can use to perform Batch service operations. If this
property is set, the Batch service provides the task with an
authentication token which can be used to authenticate Batch service
operations without requiring an account access key. The token is provided
via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations
that the task can carry out using the token depend on the settings. For
example, a task can request job permissions in order to add other tasks to
the job, or check the status of the job or of other tasks under the job.
:type authentication_token_settings: :class:`AuthenticationTokenSettings
<azure.batch.models.AuthenticationTokenSettings>`
:param output_files: A list of output file references to up persisted once
the task has completed.
:type output_files: list of :class:`OutputFile
<azext.batch.models.OutputFile>`
:param package_references: A list of packages to be installed on the compute
nodes. Must be of a Package Manager type in accordance with the selected
operating system.
:type package_references: list of :class:`PackageReferenceBase
<azext.batch.models.PackageReferenceBase>`
"""
_validation = {
'command_line': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'command_line': {'key': 'commandLine', 'type': 'str'},
'exit_conditions': {'key': 'exitConditions', 'type': 'ExitConditions'},
'resource_files': {'key': 'resourceFiles', 'type': '[ExtendedResourceFile]'},
'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'},
'affinity_info': {'key': 'affinityInfo', 'type': 'AffinityInformation'},
'constraints': {'key': 'constraints', 'type': 'TaskConstraints'},
'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'},
'depends_on': {'key': 'dependsOn', 'type': 'TaskDependencies'},
'application_package_references': {'key': 'applicationPackageReferences',
'type': '[ApplicationPackageReference]'},
'authentication_token_settings': {'key': 'authenticationTokenSettings',
'type': 'AuthenticationTokenSettings'},
'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'},
'package_references': {'key': 'packageReferences', 'type': '[PackageReferenceBase]'},
}
def __init__(self, *, command_line: str, id: str=None, display_name: str=None, exit_conditions=None,
resource_files=None, environment_settings=None, affinity_info=None, constraints=None,
user_identity=None, depends_on=None, application_package_references=None,
authentication_token_settings=None, output_files=None, package_references=None, **kwargs) -> None:
super(MergeTask, self).__init__(**kwargs)
self.id = id
self.display_name = display_name
self.command_line = command_line
self.exit_conditions = exit_conditions
self.resource_files = resource_files
self.environment_settings = environment_settings
self.affinity_info = affinity_info
self.constraints = constraints
self.user_identity = user_identity
self.depends_on = depends_on
self.application_package_references = application_package_references
self.authentication_token_settings = authentication_token_settings
self.output_files = output_files
self.package_references = package_references

Просмотреть файл

@ -41,8 +41,8 @@ class MultiInstanceSettings(Model):
'common_resource_files': {'key': 'commonResourceFiles', 'type': '[ExtendedResourceFile]'},
}
def __init__(self, coordination_command_line, number_of_instances=None, common_resource_files=None):
super(MultiInstanceSettings, self).__init__()
self.number_of_instances = number_of_instances
self.coordination_command_line = coordination_command_line
self.common_resource_files = common_resource_files
def __init__(self, **kwargs):
super(MultiInstanceSettings, self).__init__(**kwargs)
self.number_of_instances = kwargs.get('number_of_instances')
self.coordination_command_line = kwargs.get('coordination_command_line', None)
self.common_resource_files = kwargs.get('common_resource_files', None)

Просмотреть файл

@ -0,0 +1,49 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class MultiInstanceSettings(Model):
"""Settings which specify how to run a multi-instance task.
Multi-instance tasks are commonly used to support MPI tasks.
:param number_of_instances: The number of compute nodes required by the
task. If omitted, the default is 1.
:type number_of_instances: int
:param coordination_command_line: The command line to run on all the
compute nodes to enable them to coordinate when the primary runs the main
task command. A typical coordination command line launches a background
service and verifies that the service is ready to process inter-node
messages.
:type coordination_command_line: str
:param common_resource_files: A list of files that the Batch service will
download before running the coordination command line. The difference
between common resource files and task resource files is that common
resource files are downloaded for all subtasks including the primary,
whereas task resource files are downloaded only for the primary. Also note
that these resource files are not downloaded to the task working
directory, but instead are downloaded to the task root directory (one
directory above the working directory).
:type common_resource_files: list of :class:`ExtendedResourceFile
<azext.batch.models.ExtendedResourceFile>`
"""
_validation = {
'coordination_command_line': {'required': True},
}
_attribute_map = {
'number_of_instances': {'key': 'numberOfInstances', 'type': 'int'},
'coordination_command_line': {'key': 'coordinationCommandLine', 'type': 'str'},
'common_resource_files': {'key': 'commonResourceFiles', 'type': '[ExtendedResourceFile]'},
}
def __init__(self, *, coordination_command_line: int, number_of_instances: str=None,
common_resource_files=None, **kwargs) -> None:
super(MultiInstanceSettings, self).__init__(**kwargs)
self.number_of_instances = number_of_instances
self.coordination_command_line = coordination_command_line
self.common_resource_files = common_resource_files

Просмотреть файл

@ -49,8 +49,8 @@ class OutputFile(Model):
'upload_options': {'key': 'uploadOptions', 'type': 'OutputFileUploadOptions'},
}
def __init__(self, file_pattern, destination, upload_options):
super(OutputFile, self).__init__()
self.file_pattern = file_pattern
self.destination = destination
self.upload_options = upload_options
def __init__(self, **kwargs):
super(OutputFile, self).__init__(**kwargs)
self.file_pattern = kwargs.get('file_pattern')
self.destination = kwargs.get('destination')
self.upload_options = kwargs.get('upload_options')

Просмотреть файл

@ -27,6 +27,7 @@ class OutputFileAutoStorageDestination(Model):
'path': {'key': 'path', 'type': 'str'},
}
def __init__(self, file_group, path=None):
self.file_group = file_group
self.path = path
def __init__(self, **kwargs):
super(OutputFileAutoStorageDestination, self).__init__(**kwargs)
self.file_group = kwargs.get('file_group')
self.path = kwargs.get('path', None)

Просмотреть файл

@ -0,0 +1,33 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class OutputFileAutoStorageDestination(Model):
"""An speficition of output files upload destination that uses an
auto-storage file group.
:param str file_group: The name of the file group that the output files will
be uploaded to.
:param str path: The destination path within the file group that the files will
be uploaded to. Is the output file specification refers to a single file, this will
be treated as a file name. If the output file specification refers to potentially
multiple files, this will be treated as a subfolder.
"""
_validation = {
'file_group': {'required': True}
}
_attribute_map = {
'file_group': {'key': 'fileGroup', 'type': 'str'},
'path': {'key': 'path', 'type': 'str'},
}
def __init__(self, *, file_group: str, path: str=None, **kwargs) -> None:
super(OutputFileAutoStorageDestination, self).__init__(**kwargs)
self.file_group = file_group
self.path = path

Просмотреть файл

@ -0,0 +1,56 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class OutputFile(Model):
"""A specification for uploading files from an Azure Batch node to another
location after the Batch service has finished executing the task process.
:param file_pattern: A pattern indicating which file(s) to upload. Both
relative and absolute paths are supported. Relative paths are relative to
the task working directory. The following wildcards are supported: *
matches 0 or more characters (for example pattern abc* would match abc or
abcdef), ** matches any directory, ? matches any single character, [abc]
matches one character in the brackets, and [a-c] matches one character in
the range. Brackets can include a negation to match any character not
specified (for example [!abc] matches any character but a, b, or c). If a
file name starts with "." it is ignored by default but may be matched by
specifying it explicitly (for example *.gif will not match .a.gif, but
.*.gif will). A simple example: **\\*.txt matches any file that does not
start in '.' and ends with .txt in the task working directory or any
subdirectory. If the filename contains a wildcard character it can be
escaped using brackets (for example abc[*] would match a file named abc*).
Note that both \\ and / are treated as directory separators on Windows,
but only / is on Linux. Environment variables (%var% on Windows or $var on
Linux) are expanded prior to the pattern being applied.
:type file_pattern: str
:param destination: The destination for the output file(s).
:type destination: :class:`ExtendedOutputFileDestination
<azext.batch.models.ExtendedOutputFileDestination>`
:param upload_options: Additional options for the upload operation,
including under what conditions to perform the upload.
:type upload_options: :class:`OutputFileUploadOptions
<azure.batch.models.OutputFileUploadOptions>`
"""
_validation = {
'file_pattern': {'required': True},
'destination': {'required': True},
'upload_options': {'required': True},
}
_attribute_map = {
'file_pattern': {'key': 'filePattern', 'type': 'str'},
'destination': {'key': 'destination', 'type': 'ExtendedOutputFileDestination'},
'upload_options': {'key': 'uploadOptions', 'type': 'OutputFileUploadOptions'},
}
def __init__(self, *, file_pattern: str, destination, upload_options, **kwargs) -> None:
super(OutputFile, self).__init__(**kwargs)
self.file_pattern = file_pattern
self.destination = destination
self.upload_options = upload_options

Просмотреть файл

@ -34,7 +34,8 @@ class PackageReferenceBase(Model):
'yumPackage': 'YumPackageReference'}
}
def __init__(self, id, version=None):
def __init__(self, **kwargs):
super(PackageReferenceBase, self).__init__(**kwargs)
self.type = None
self.id = id
self.version = version
self.id = kwargs.get('id')
self.version = kwargs.get('version', None)

Просмотреть файл

@ -0,0 +1,41 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=redefined-builtin
from msrest.serialization import Model
class PackageReferenceBase(Model):
"""A reference to a package to be installed on the compute nodes using
a package manager.
:param str id: The name of the package.
:param str version: The version of the package to be installed. If omitted,
the latest version (according to the package repository) will be installed.
"""
_validation = {
'type': {'required': True},
'id': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
}
_subtype_map = {
'type': {'aptPackage': 'AptPackageReference',
'chocolateyPackage': 'ChocolateyPackageReference',
'yumPackage': 'YumPackageReference'}
}
def __init__(self, *, id: str, version: str=None, **kwargs) -> None:
super(PackageReferenceBase, self).__init__(**kwargs)
self.type = None
self.id = id
self.version = version

Просмотреть файл

@ -27,18 +27,19 @@ class ParameterSet(Model):
'step': {'key': 'step', 'type': 'int'},
}
def __init__(self, start, end, step=1):
def __init__(self, **kwargs):
super(ParameterSet, self).__init__(**kwargs)
try:
self.start = int(start)
self.end = int(end)
self.step = int(step)
self.start = int(kwargs.get('start'))
self.end = int(kwargs.get('end'))
self.step = int(kwargs.get('step', 1))
except (TypeError, ValueError):
raise ValueError("'start', 'end' and 'step' parameters must be integers.")
if step == 0:
if self.step == 0:
raise ValueError("'step' parameter cannot be 0.")
elif start > end and step > 0:
elif self.start > self.end and self.step > 0:
raise ValueError(
"'step' must be a negative number when 'start' is greater than 'end'")
elif start < end and step < 0:
elif self.start < self.end and self.step < 0:
raise ValueError(
"'step' must be a positive number when 'end' is greater than 'start'")

Просмотреть файл

@ -0,0 +1,45 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class ParameterSet(Model):
"""A set of parametric sweep range range parameters.
:param int start: The starting value of the sweep.
:param int end: The ending value of the sweep (inclusive).
:param int step: The incremental step value, default is 1. The step value
can be negative (i.e. a decending sweep), but only id the start value is
a higher value than the end.
"""
_validation = {
'start': {'required': True},
'end': {'required': True},
}
_attribute_map = {
'start': {'key': 'start', 'type': 'int'},
'end': {'key': 'end', 'type': 'int'},
'step': {'key': 'step', 'type': 'int'},
}
def __init__(self, *, start: int, end: int, step: int=1, **kwargs) -> None:
super(ParameterSet, self).__init__(**kwargs)
try:
self.start = int(start)
self.end = int(end)
self.step = int(step)
except (TypeError, ValueError):
raise ValueError("'start', 'end' and 'step' parameters must be integers.")
if step == 0:
raise ValueError("'step' parameter cannot be 0.")
elif start > end and step > 0:
raise ValueError(
"'step' must be a negative number when 'start' is greater than 'end'")
elif start < end and step < 0:
raise ValueError(
"'step' must be a positive number when 'end' is greater than 'start'")

Просмотреть файл

@ -34,10 +34,10 @@ class ParametricSweepTaskFactory(TaskFactoryBase):
'merge_task': {'key': 'mergeTask', 'type': 'MergeTask'}
}
def __init__(self, parameter_sets, repeat_task, merge_task=None):
super(ParametricSweepTaskFactory, self).__init__(merge_task)
if not parameter_sets:
raise ValueError("Parametric Sweep task factory requires at least one parameter set.")
self.parameter_sets = parameter_sets
self.repeat_task = repeat_task
def __init__(self, **kwargs):
super(ParametricSweepTaskFactory, self).__init__(**kwargs)
self.parameter_sets = kwargs.get('parameter_sets', None)
self.repeat_task = kwargs.get('repeat_task', None)
self.type = 'parametricSweep'
if not self.parameter_sets:
raise ValueError("Parametric Sweep task factory requires at least one parameter set.")

Просмотреть файл

@ -0,0 +1,43 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from .task_factory_base import TaskFactoryBase
class ParametricSweepTaskFactory(TaskFactoryBase):
"""A Task Factory for generating a set of tasks based on one or more parameter
sets to define a numeric input range. Each parameter set will have a start, end
and step value. A task will be generated for each integer in this range. Multiple
parameter sets can be combined for a multi-dimensional sweep.
:param parameter_sets: A list if parameter sets from which tasks will be generated.
:type parameter_sets: A list of :class:`ParameterSet<azext.batch.models.ParameterSet>`
:param repeat_task: The task template the will be used to generate each task.
:type repeat_task: :class:`RepeatTask <azext.batch.models.RepeatTask>`
:param merge_task: An optional additional task to be run after all the other
generated tasks have completed successfully.
:type merge_task: :class:`MergeTask <azext.batch.models.MergeTask>`
"""
_validation = {
'type': {'required': True},
'parameter_sets': {'required': True, 'min_items': 1},
'repeat_task': {'required': True}
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'parameter_sets': {'key': 'parameterSets', 'type': '[ParameterSet]'},
'repeat_task': {'key': 'repeatTask', 'type': 'RepeatTask'},
'merge_task': {'key': 'mergeTask', 'type': 'MergeTask'}
}
def __init__(self, *, parameter_sets, repeat_task, merge_task=None, **kwargs) -> None:
super(ParametricSweepTaskFactory, self).__init__(merge_task=merge_task, **kwargs)
if not parameter_sets:
raise ValueError("Parametric Sweep task factory requires at least one parameter set.")
self.parameter_sets = parameter_sets
self.repeat_task = repeat_task
self.type = 'parametricSweep'

Просмотреть файл

@ -31,6 +31,7 @@ class PoolTemplate(Model):
type = "Microsoft.Batch/batchAccounts/pools"
def __init__(self, properties, api_version=None):
self.properties = properties
self.api_version = api_version
def __init__(self, **kwargs):
super(PoolTemplate, self).__init__(**kwargs)
self.properties = kwargs.get('properties')
self.api_version = kwargs.get('api_version', None)

Просмотреть файл

@ -0,0 +1,37 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class PoolTemplate(Model):
"""A Pool Template.
:ivar type: The type of object described by the template. Must be:
"Microsoft.Batch/batchAccounts/pools"
:type type: str
:param api_version: The API version that the template conforms to.
:type api_version: str
:param properties: The specificaton of the pool.
:type properties: :class:`ExtendedPoolParameter<azext.batch.models.ExtendedPoolParameter>`
"""
_validation = {
'type': {'required': True, 'constant': True},
'properties': {'required': True},
}
_attribute_map = {
'type': {'key': 'id', 'type': 'str'},
'api_version': {'key': 'apiVersion', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'ExtendedPoolParameter'},
}
type = "Microsoft.Batch/batchAccounts/pools"
def __init__(self, *, properties: str, api_version=None, **kwargs) -> None:
super(PoolTemplate, self).__init__(**kwargs)
self.properties = properties
self.api_version = api_version

Просмотреть файл

@ -101,20 +101,17 @@ class RepeatTask(Model):
'package_references': {'key': 'packageReferences', 'type': '[PackageReferenceBase]'}
}
def __init__(self, command_line, display_name=None, container_settings=None, exit_conditions=None,
resource_files=None, environment_settings=None, affinity_info=None, constraints=None,
user_identity=None, application_package_references=None, authentication_token_settings=None,
output_files=None, package_references=None):
self.display_name = display_name
self.command_line = command_line
self.container_settings = container_settings
self.exit_conditions = exit_conditions
self.resource_files = resource_files
self.environment_settings = environment_settings
self.affinity_info = affinity_info
self.constraints = constraints
self.user_identity = user_identity
self.application_package_references = application_package_references
self.authentication_token_settings = authentication_token_settings
self.output_files = output_files
self.package_references = package_references
def __init__(self, **kwargs):
super(RepeatTask, self).__init__(**kwargs)
self.display_name = kwargs.get('display_name')
self.command_line = kwargs.get('command_line', None)
self.exit_conditions = kwargs.get('exit_conditions', None)
self.resource_files = kwargs.get('resource_files', None)
self.environment_settings = kwargs.get('environment_settings', None)
self.affinity_info = kwargs.get('affinity_info', None)
self.constraints = kwargs.get('constraints', None)
self.user_identity = kwargs.get('user_identity', None)
self.application_package_references = kwargs.get('application_package_references', None)
self.authentication_token_settings = kwargs.get('authentication_token_settings', None)
self.output_files = kwargs.get('output_files', None)
self.package_references = kwargs.get('package_references', None)

Просмотреть файл

@ -0,0 +1,121 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class RepeatTask(Model):
"""An Azure Batch task template to repeat.
:param display_name: A display name for the task. The display name need
not be unique and can contain any Unicode characters up to a maximum
length of 1024.
:type display_name: str
:param command_line: The command line of the task.
:type command_line: str
:param container_settings: The settings for the container under which the
task runs. If the pool that will run this task has containerConfiguration
set, this must be set as well. If the pool that will run this task doesn't
have containerConfiguration set, this must not be set. When this is
specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR
(the root of Azure Batch directories on the node) are mapped into the
container, all task environment variables are mapped into the container,
and the task command line is executed in the container.
:type container_settings: :class:`TaskContainerSettings
<azure.batch.models.TaskContainerSettings>`
:param exit_conditions: How the Batch service should respond when the task
completes.
:type exit_conditions: :class:`ExitConditions
<azure.batch.models.ExitConditions>`
:param resource_files: A list of files that the Batch service will
download to the compute node before running the command line.
:type resource_files: list of :class:`ExtendedResourceFile
<azext.batch.models.ExtendedResourceFile>`
:param environment_settings: A list of environment variable settings for
the task.
:type environment_settings: list of :class:`EnvironmentSetting
<azure.batch.models.EnvironmentSetting>`
:param affinity_info: A locality hint that can be used by the Batch
service to select a compute node on which to start the new task.
:type affinity_info: :class:`AffinityInformation
<azure.batch.models.AffinityInformation>`
:param constraints: The execution constraints that apply to this task. If
you do not specify constraints, the maxTaskRetryCount is the
maxTaskRetryCount specified for the job, and the maxWallClockTime and
retentionTime are infinite.
:type constraints: :class:`TaskConstraints
<azure.batch.models.TaskConstraints>`
:param user_identity: The user identity under which the task runs. If
omitted, the task runs as a non-administrative user unique to the task.
:type user_identity: :class:`UserIdentity
<azure.batch.models.UserIdentity>`
:param application_package_references: A list of application packages that
the Batch service will deploy to the compute node before running the
command line.
:type application_package_references: list of
:class:`ApplicationPackageReference
<azure.batch.models.ApplicationPackageReference>`
:param authentication_token_settings: The settings for an authentication
token that the task can use to perform Batch service operations. If this
property is set, the Batch service provides the task with an
authentication token which can be used to authenticate Batch service
operations without requiring an account access key. The token is provided
via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations
that the task can carry out using the token depend on the settings. For
example, a task can request job permissions in order to add other tasks to
the job, or check the status of the job or of other tasks under the job.
:type authentication_token_settings: :class:`AuthenticationTokenSettings
<azure.batch.models.AuthenticationTokenSettings>`
:param output_files: A list of output file references to up persisted once
the task has completed.
:type output_files: list of :class:`OutputFile
<azext.batch.models.OutputFile>`
:param package_references: A list of packages to be installed on the compute
nodes. Must be of a Package Manager type in accordance with the selected
operating system.
:type package_references: list of :class:`PackageReferenceBase
<azext.batch.models.PackageReferenceBase>`
"""
_validation = {
'command_line': {'required': True},
}
_attribute_map = {
'display_name': {'key': 'displayName', 'type': 'str'},
'command_line': {'key': 'commandLine', 'type': 'str'},
'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'},
'exit_conditions': {'key': 'exitConditions', 'type': 'ExitConditions'},
'resource_files': {'key': 'resourceFiles', 'type': '[ExtendedResourceFile]'},
'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'},
'affinity_info': {'key': 'affinityInfo', 'type': 'AffinityInformation'},
'constraints': {'key': 'constraints', 'type': 'TaskConstraints'},
'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'},
'application_package_references': {'key': 'applicationPackageReferences',
'type': '[ApplicationPackageReference]'},
'authentication_token_settings': {'key': 'authenticationTokenSettings',
'type': 'AuthenticationTokenSettings'},
'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'},
'package_references': {'key': 'packageReferences', 'type': '[PackageReferenceBase]'}
}
def __init__(self, *, command_line: str, display_name: str=None, container_settings=None, exit_conditions=None,
resource_files=None, environment_settings=None, affinity_info=None, constraints=None,
user_identity=None, application_package_references=None, authentication_token_settings=None,
output_files=None, package_references=None, **kwargs) -> None:
super(RepeatTask, self).__init__(**kwargs)
self.display_name = display_name
self.command_line = command_line
self.container_settings = container_settings
self.exit_conditions = exit_conditions
self.resource_files = resource_files
self.environment_settings = environment_settings
self.affinity_info = affinity_info
self.constraints = constraints
self.user_identity = user_identity
self.application_package_references = application_package_references
self.authentication_token_settings = authentication_token_settings
self.output_files = output_files
self.package_references = package_references

Просмотреть файл

@ -76,13 +76,12 @@ class StartTask(Model):
'wait_for_success': {'key': 'waitForSuccess', 'type': 'bool'},
}
def __init__(self, command_line, container_settings=None, resource_files=None, environment_settings=None,
user_identity=None, max_task_retry_count=None, wait_for_success=None):
super(StartTask, self).__init__()
self.command_line = command_line
self.container_settings = container_settings
self.resource_files = resource_files
self.environment_settings = environment_settings
self.user_identity = user_identity
self.max_task_retry_count = max_task_retry_count
self.wait_for_success = wait_for_success
def __init__(self, **kwargs):
super(StartTask, self).__init__(**kwargs)
self.command_line = kwargs.get('command_line')
self.container_settings = kwargs.get('container_settings', None)
self.resource_files = kwargs.get('resource_files', None)
self.environment_settings = kwargs.get('environment_settings', None)
self.user_identity = kwargs.get('user_identity', None)
self.max_task_retry_count = kwargs.get('max_task_retry_count', None)
self.wait_for_success = kwargs.get('wait_for_success', None)

Просмотреть файл

@ -0,0 +1,88 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class StartTask(Model):
"""A task which is run when a compute node joins a pool in the Azure Batch
service, or when the compute node is rebooted or reimaged.
:param command_line: The command line of the start task. The command line
does not run under a shell, and therefore cannot take advantage of shell
features such as environment variable expansion. If you want to take
advantage of such features, you should invoke the shell in the command
line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c
MyCommand" in Linux.
:type command_line: str
:param container_settings: The settings for the container under which the
start task runs. When this is specified, all directories recursively below
the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the
node) are mapped into the container, all task environment variables are
mapped into the container, and the task command line is executed in the
container.
:type container_settings: :class:`TaskContainerSettings
<azure.batch.models.TaskContainerSettings>`
:param resource_files: A list of files that the Batch service will
download to the compute node before running the command line. Files listed
under this element are located in the task's working directory.
:type resource_files: list of :class:`ExtendedResourceFile
<azext.batch.models.ExtendedResourceFile>`
:param environment_settings: A list of environment variable settings for
the start task.
:type environment_settings: list of :class:`EnvironmentSetting
<azure.batch.models.EnvironmentSetting>`
:param user_identity: The user identity under which the start task runs.
If omitted, the task runs as a non-administrative user unique to the task.
:type user_identity: :class:`UserIdentity
<azure.batch.models.UserIdentity>`
:param max_task_retry_count: The maximum number of times the task may be
retried. The Batch service retries a task if its exit code is nonzero.
Note that this value specifically controls the number of retries. The
Batch service will try the task once, and may then retry up to this limit.
For example, if the maximum retry count is 3, Batch tries the task up to 4
times (one initial try and 3 retries). If the maximum retry count is 0,
the Batch service does not retry the task. If the maximum retry count is
-1, the Batch service retries the task without limit.
:type max_task_retry_count: int
:param wait_for_success: Whether the Batch service should wait for the
start task to complete successfully (that is, to exit with exit code 0)
before scheduling any tasks on the compute node. If true and the start
task fails on a compute node, the Batch service retries the start task up
to its maximum retry count (maxTaskRetryCount). If the task has still not
completed successfully after all retries, then the Batch service marks the
compute node unusable, and will not schedule tasks to it. This condition
can be detected via the node state and failure info details. If false, the
Batch service will not wait for the start task to complete. In this case,
other tasks can start executing on the compute node while the start task
is still running; and even if the start task fails, new tasks will
continue to be scheduled on the node. The default is false.
:type wait_for_success: bool
"""
_validation = {
'command_line': {'required': True},
}
_attribute_map = {
'command_line': {'key': 'commandLine', 'type': 'str'},
'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'},
'resource_files': {'key': 'resourceFiles', 'type': '[ExtendedResourceFile]'},
'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'},
'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'},
'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'},
'wait_for_success': {'key': 'waitForSuccess', 'type': 'bool'},
}
def __init__(self, *, command_line: str, container_settings=None, resource_files=None, environment_settings=None,
user_identity=None, max_task_retry_count: int=None, wait_for_success: bool=None, **kwargs) -> None:
super(StartTask, self).__init__(**kwargs)
self.command_line = command_line
self.container_settings = container_settings
self.resource_files = resource_files
self.environment_settings = environment_settings
self.user_identity = user_identity
self.max_task_retry_count = max_task_retry_count
self.wait_for_success = wait_for_success

Просмотреть файл

@ -25,7 +25,7 @@ class TaskCollectionTaskFactory(TaskFactoryBase):
'tasks': {'key': 'tasks', 'type': '[ExtendedTaskParameter]'},
}
def __init__(self, tasks):
super(TaskCollectionTaskFactory, self).__init__()
self.tasks = tasks
def __init__(self, **kwargs):
super(TaskCollectionTaskFactory, self).__init__(**kwargs)
self.tasks = kwargs.get('tasks')
self.type = 'taskCollection'

Просмотреть файл

@ -0,0 +1,31 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from .task_factory_base import TaskFactoryBase
class TaskCollectionTaskFactory(TaskFactoryBase):
"""A Task Factory for adding a predefined collection of tasks automatically
to a job on submission.
:param tasks: A list if task parameters, each of which will be added straight to the job.
:type tasks: A list of :class:`ExtendedTaskParameter
<azext.batch.models.ExtendedTaskParameter>`
"""
_validation = {
'type': {'required': True},
'tasks': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'tasks': {'key': 'tasks', 'type': '[ExtendedTaskParameter]'},
}
def __init__(self, *, tasks, **kwargs) -> None:
super(TaskCollectionTaskFactory, self).__init__(**kwargs)
self.tasks = tasks
self.type = 'taskCollection'

Просмотреть файл

@ -30,6 +30,7 @@ class TaskFactoryBase(Model):
'taskCollection': 'TaskCollectionTaskFactory'}
}
def __init__(self, merge_task=None):
self.merge_task = merge_task
def __init__(self, **kwargs):
super(TaskFactoryBase, self).__init__(**kwargs)
self.merge_task = kwargs.get('merge_task', None)
self.type = None

Просмотреть файл

@ -0,0 +1,36 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class TaskFactoryBase(Model):
"""A Task Factory for automatically adding a collection of tasks to a job on
submission.
:param merge_task: An optional additional task to be run after all the other
generated tasks have completed successfully.
:type merge_task: :class:`MergeTask <azext.batch.models.MergeTask>`
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'merge_task': {'key': 'mergeTask', 'type': 'MergeTask'}
}
_subtype_map = {
'type': {'parametricSweep': 'ParametricSweepTaskFactory',
'taskPerFile': 'FileCollectionTaskFactory',
'taskCollection': 'TaskCollectionTaskFactory'}
}
def __init__(self, *, merge_task=None, **kwargs) -> None:
super(TaskFactoryBase, self).__init__(**kwargs)
self.merge_task = merge_task
self.type = None

Просмотреть файл

@ -31,7 +31,8 @@ class YumPackageReference(PackageReferenceBase):
'disable_excludes': {'key': 'disableExcludes', 'type': 'bool'}
}
def __init__(self, id, version=None, disable_excludes=None):
super(YumPackageReference, self).__init__(id=id, version=version)
self.disable_excludes = disable_excludes
self.type = 'yumPackage'
def __init__(self, **kwargs):
super(YumPackageReference, self).__init__(**kwargs)
self.id = kwargs.get('id')
self.version = kwargs.get('version', None)
self.disable = kwargs.get('disable', None)

Просмотреть файл

@ -0,0 +1,37 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=redefined-builtin
from .package_reference_base import PackageReferenceBase
class YumPackageReference(PackageReferenceBase):
"""A reference to a package to be installed using the YUM package
manager on a Linux node.
:param str id: The name of the package.
:param str version: The version of the package to be installed. If omitted,
the latest version (according to the package repository) will be installed.
:param bool disable_excludes: Whether to allow packages that might otherwise
be excluded by VM configuration (e.g. kernel packages). Default is False.
"""
_validation = {
'type': {'required': True},
'id': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'disable_excludes': {'key': 'disableExcludes', 'type': 'bool'}
}
def __init__(self, *, id: str, version: str=None, disable_excludes: bool=None, **kwargs) -> None:
super(YumPackageReference, self).__init__(id=id, version=version, **kwargs)
self.disable_excludes = disable_excludes
self.type = 'yumPackage'

Просмотреть файл

@ -3,4 +3,4 @@
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
VERSION = "3.1.2"
VERSION = "4.0.0"

Просмотреть файл

@ -3,6 +3,11 @@
Release History
===============
2.5.0 (2018-08-29)
------------------
* Use the new azure-batch-extensions SDK.
2.4.1 (2018-07-20)
------------------

Просмотреть файл

@ -13,9 +13,9 @@ from azext.batch import __version__ as batch_ext_version
logger = get_logger(__name__)
SUPPORTED_BATCH_VERSION = "4.2"
SUPPORTED_BMGMT_VERSION = "4.2"
SUPPORTED_BATCH_EXT_VERSION = "3.2"
SUPPORTED_BATCH_VERSION = "5.1"
SUPPORTED_BMGMT_VERSION = "5.1"
SUPPORTED_BATCH_EXT_VERSION = "4.1"
def confirm_version(current, supported, package):

Просмотреть файл

@ -69,7 +69,7 @@ def create_pool(client, template=None, parameters=None, json_file=None, id=None,
pool.enable_inter_node_communication = enable_inter_node_communication
if os_family:
pool.cloud_service_configuration = CloudServiceConfiguration(os_family)
pool.cloud_service_configuration = CloudServiceConfiguration(os_family=os_family)
else:
if image:
version = 'latest'
@ -92,7 +92,7 @@ def create_pool(client, template=None, parameters=None, json_file=None, id=None,
node_agent_sku_id=node_agent_sku_id)
if start_task_command_line:
pool.start_task = StartTask(start_task_command_line)
pool.start_task = StartTask(command_line=start_task_command_line)
pool.start_task.wait_for_success = start_task_wait_for_success
pool.start_task.resource_files = start_task_resource_files
if resize_timeout:
@ -161,8 +161,8 @@ def create_job(client, template=None, parameters=None, json_file=None, id=None,
job.metadata = metadata
if job_manager_task_command_line and job_manager_task_id:
job_manager_task = JobManagerTask(job_manager_task_id,
job_manager_task_command_line,
job_manager_task = JobManagerTask(id=job_manager_task_id,
command_line=job_manager_task_command_line,
resource_files=job_manager_task_resource_files,
environment_settings=job_manager_task_environment_settings) # pylint: disable=line-too-long
job.job_manager_task = job_manager_task

Просмотреть файл

@ -3,4 +3,4 @@
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
VERSION = "2.4.1"
VERSION = "2.5.0"

Просмотреть файл

@ -27,7 +27,7 @@ CLASSIFIERS = [
]
DEPENDENCIES = [
'azure-batch-extensions>=3.0.0,<3.2'
'azure-batch-extensions>=3.4.0,<3.5'
]
# Version extraction inspired from 'requests'

Просмотреть файл

@ -1,5 +1,5 @@
[MASTER]
ignore-patterns=test_*
ignore-patterns=test_*,.*?_py3.py
reports=no
[MESSAGES CONTROL]

Просмотреть файл

@ -17,5 +17,5 @@ setuptools==30.4.0
six==1.10.0
tabulate==0.7.5
vcrpy==1.10.3
azure-cli==2.0.37
azure-cli>=2.0.46,<2.1
azure-storage-blob==1.1.0

Просмотреть файл

@ -64,7 +64,7 @@ if __name__ == '__main__':
# Create a pool model with an application template reference
pool_ref = models.PoolInformation(pool_id=pool_id)
job_id = 'blender-app-template-test'
blender_job = models.ExtendedJobParameter(job_id, pool_ref)
blender_job = models.ExtendedJobParameter(id=job_id, pool_info=pool_ref)
blender_job.display_name = "Blender Render using Application Templates"
blender_job.on_all_tasks_complete = models.OnAllTasksComplete.terminate_job
blender_job.application_template_info = models.ApplicationTemplateInfo(

Просмотреть файл

@ -79,16 +79,16 @@ if __name__ == '__main__':
# Create parametric sweep job using models
job_id = "ffmpeg-parametric-sweep-test"
task_factory = models.ParametricSweepTaskFactory(
parameter_sets=[models.ParameterSet(1, 5)],
parameter_sets=[models.ParameterSet(start=1, end=5)],
repeat_task=models.RepeatTask(
command_line="ffmpeg -y -i sample{0}.mp3 -acodec libmp3lame output.mp3",
resource_files=[models.ExtendedResourceFile(source=models.FileSource(file_group=filegroup))],
output_files=[models.OutputFile(
"output.mp3",
file_pattern="output.mp3",
destination=models.ExtendedOutputFileDestination(
auto_storage=models.OutputFileAutoStorageDestination(job_id, path="audio{0}.mp3")),
upload_options=models.OutputFileUploadOptions(models.OutputFileUploadCondition.task_success))],
package_references=[models.AptPackageReference("ffmpeg")]))
package_references=[models.AptPackageReference(id="ffmpeg")]))
job = models.ExtendedJobParameter(
id=job_id,
pool_info=models.PoolInformation(pool_id=pool_param.properties.id),

Просмотреть файл

@ -28,7 +28,7 @@ CLASSIFIERS = [
DEPENDENCIES = [
'msrestazure>=0.4.14,<1',
'azure-batch>=4.0,<5',
'azure-batch>=5.0,<6',
'azure-mgmt-batch>=4.0,<5',
'azure-storage-blob>=1.1.0,<2',
'azure-mgmt-storage>=1.0,<2'

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -6,28 +6,28 @@ interactions:
Accept-Encoding: ['gzip, deflate']
Connection: [keep-alive]
Content-Type: [application/json; charset=utf-8]
User-Agent: [python/3.6.1 (Windows-10-10.0.15063-SP0) requests/2.9.1 msrest/0.4.14
msrest_azure/0.4.14 batchmanagementclient/4.1.0 Azure-SDK-For-Python]
User-Agent: [python/3.6.5 (Windows-10-10.0.17134-SP0) requests/2.19.1 msrest/0.5.4
msrest_azure/0.5.0 batchmanagementclient/4.1.0 Azure-SDK-For-Python]
accept-language: [en-US]
x-ms-client-request-id: [a4c61314-a55b-11e7-9cd1-ecb1d755839a]
x-ms-client-request-id: [d90e14e8-ad44-11e8-9507-44032c851683]
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test_rg/providers/Microsoft.Batch/batchAccounts/test1?api-version=2017-05-01
response:
body: {string: '{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test_rg/providers/Microsoft.Batch/batchAccounts/test1","name":"test1","type":"Microsoft.Batch/batchAccounts","location":"westus","properties":{"accountEndpoint":"test1.westus.batch.azure.com","provisioningState":"Succeeded","dedicatedCoreQuota":20,"lowPriorityCoreQuota":50,"poolQuota":20,"activeJobAndJobScheduleQuota":20,"autoStorage":{"storageAccountId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test_rg/providers/Microsoft.Storage/storageAccounts/batchtest1","lastKeySync":"2017-07-22T23:00:06.9397085Z"},"poolAllocationMode":"batchservice"},"tags":{"Name":"tagName","Value":"tagValue"}}'}
body: {string: '{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test_rg/providers/Microsoft.Batch/batchAccounts/test1","name":"brkltest","type":"Microsoft.Batch/batchAccounts","location":"eastus2","properties":{"accountEndpoint":"brkltest.eastus2.batch.azure.com","provisioningState":"Succeeded","dedicatedCoreQuota":20,"lowPriorityCoreQuota":100,"poolQuota":100,"activeJobAndJobScheduleQuota":300,"autoStorage":{"storageAccountId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test_rg/providers/Microsoft.Storage/storageAccounts/brkltest","lastKeySync":"2018-08-27T21:09:28.1417679Z"},"poolAllocationMode":"batchservice"}}'}
headers:
Cache-Control: [no-cache]
Content-Type: [application/json; charset=utf-8]
Date: ['Fri, 29 Sep 2017 21:17:49 GMT']
ETag: ['"0x8D4D1556561825D"']
Date: ['Fri, 31 Aug 2018 17:39:44 GMT']
ETag: ['"0x8D60C615FFF15CF"']
Expires: ['-1']
Last-Modified: ['Sat, 22 Jul 2017 23:00:06 GMT']
Last-Modified: ['Mon, 27 Aug 2018 21:09:28 GMT']
Pragma: [no-cache]
Server: [Microsoft-HTTPAPI/2.0]
Strict-Transport-Security: [max-age=31536000; includeSubDomains]
Transfer-Encoding: [chunked]
Vary: [Accept-Encoding]
X-Content-Type-Options: [nosniff]
content-length: ['761']
content-length: ['660']
status: {code: 200, message: OK}
- request:
body: null
@ -37,25 +37,27 @@ interactions:
Connection: [keep-alive]
Content-Length: ['0']
Content-Type: [application/json; charset=utf-8]
User-Agent: [python/3.6.1 (Windows-10-10.0.15063-SP0) requests/2.9.1 msrest/0.4.14
msrest_azure/0.4.14 storagemanagementclient/1.2.0 Azure-SDK-For-Python]
User-Agent: [python/3.6.5 (Windows-10-10.0.17134-SP0) requests/2.19.1 msrest/0.5.4
msrest_azure/0.5.0 azure-mgmt-storage/1.5.0 Azure-SDK-For-Python]
accept-language: [en-US]
x-ms-client-request-id: [a59b4f9c-a55b-11e7-a5d0-ecb1d755839a]
x-ms-client-request-id: [d92b6286-ad44-11e8-8081-44032c851683]
method: POST
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test_rg/providers/Microsoft.Storage/storageAccounts/batchtest1/listKeys?api-version=2017-10-01
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test_rg/providers/Microsoft.Storage/storageAccounts/brkltest/listKeys?api-version=2017-10-01
response:
body: {string: '{"keys":[{"keyName":"key1","permissions":"Full","value":"abc=="},{"keyName":"key2","permissions":"Full","value":"xyz=="}]}'}
body: {string: '{"keys":[{"keyName":"key1","value":"abc==","permissions":"FULL"},{"keyName":"key2","value":"abc==","permissions":"FULL"}]}'}
headers:
Cache-Control: [no-cache]
Content-Type: [application/json]
Date: ['Fri, 29 Sep 2017 21:17:50 GMT']
Date: ['Fri, 31 Aug 2018 17:39:44 GMT']
Expires: ['-1']
Pragma: [no-cache]
Server: [Microsoft-Azure-Storage-Resource-Provider/1.0, Microsoft-HTTPAPI/2.0]
Server: ['Microsoft-Azure-Storage-Resource-Provider/1.0,Microsoft-HTTPAPI/2.0
Microsoft-HTTPAPI/2.0']
Strict-Transport-Security: [max-age=31536000; includeSubDomains]
Transfer-Encoding: [chunked]
Vary: [Accept-Encoding]
content-length: ['289']
X-Content-Type-Options: [nosniff]
content-length: ['288']
x-ms-ratelimit-remaining-subscription-writes: ['1199']
status: {code: 200, message: OK}
- request:
@ -63,43 +65,43 @@ interactions:
headers:
Connection: [keep-alive]
Content-Length: ['0']
User-Agent: [Azure-Storage/0.34.0 (Python CPython 3.6.1; Windows 10)]
x-ms-client-request-id: [a61a7ff6-a55b-11e7-b360-ecb1d755839a]
x-ms-date: ['Fri, 29 Sep 2017 21:17:50 GMT']
x-ms-version: ['2016-05-31']
User-Agent: [Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)]
x-ms-client-request-id: [d97c063e-ad44-11e8-a5f1-44032c851683]
x-ms-date: ['Fri, 31 Aug 2018 17:39:45 GMT']
x-ms-version: ['2017-07-29']
method: PUT
uri: https://batchtest1.blob.core.windows.net/fgrp-cli-batch-extensions-live-tests?restype=container
uri: https://brkltest.blob.core.windows.net/fgrp-cli-batch-extensions-live-tests?restype=container
response:
body: {string: "\uFEFF<?xml version=\"1.0\" encoding=\"utf-8\"?><Error><Code>ContainerAlreadyExists</Code><Message>The\
\ specified container already exists.\nRequestId:96e53d8d-001e-0012-7168-3992dc000000\n\
Time:2017-09-29T21:17:51.1443037Z</Message></Error>"}
body: {string: ''}
headers:
Content-Length: ['230']
Content-Type: [application/xml]
Date: ['Fri, 29 Sep 2017 21:17:50 GMT']
Date: ['Fri, 31 Aug 2018 17:39:45 GMT']
ETag: ['"0x8D60F68BDEA04E2"']
Last-Modified: ['Fri, 31 Aug 2018 17:39:45 GMT']
Server: [Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0]
x-ms-version: ['2016-05-31']
status: {code: 409, message: The specified container already exists.}
Transfer-Encoding: [chunked]
x-ms-version: ['2017-07-29']
status: {code: 201, message: Created}
- request:
body: null
headers:
Connection: [keep-alive]
User-Agent: [Azure-Storage/0.34.0 (Python CPython 3.6.1; Windows 10)]
x-ms-client-request-id: [a694cbc0-a55b-11e7-8fc8-ecb1d755839a]
x-ms-date: ['Fri, 29 Sep 2017 21:17:51 GMT']
x-ms-version: ['2016-05-31']
User-Agent: [Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)]
x-ms-client-request-id: [d9bd586c-ad44-11e8-b21c-44032c851683]
x-ms-date: ['Fri, 31 Aug 2018 17:39:45 GMT']
x-ms-version: ['2017-07-29']
method: GET
uri: https://batchtest1.blob.core.windows.net/fgrp-cli-batch-extensions-live-tests/foo.txt?comp=metadata
uri: https://brkltest.blob.core.windows.net/fgrp-cli-batch-extensions-live-tests/foo.txt?comp=metadata
response:
body: {string: "\uFEFF<?xml version=\"1.0\" encoding=\"utf-8\"?><Error><Code>BlobNotFound</Code><Message>The\
\ specified blob does not exist.\nRequestId:96e53da4-001e-0012-0368-3992dc000000\n\
Time:2017-09-29T21:17:51.3434456Z</Message></Error>"}
\ specified blob does not exist.\nRequestId:3af498d4-e01e-00a0-4b51-417970000000\n\
Time:2018-08-31T17:39:45.7700738Z</Message></Error>"}
headers:
Content-Length: ['215']
Content-Type: [application/xml]
Date: ['Fri, 29 Sep 2017 21:17:51 GMT']
Date: ['Fri, 31 Aug 2018 17:39:45 GMT']
Server: [Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0]
x-ms-version: ['2016-05-31']
x-ms-error-code: [BlobNotFound]
x-ms-version: ['2017-07-29']
status: {code: 404, message: The specified blob does not exist.}
- request:
body: '1'
@ -107,25 +109,25 @@ interactions:
Connection: [keep-alive]
Content-Length: ['1']
Content-MD5: [xMpCOKC5I4INzFCab3WEmw==]
User-Agent: [Azure-Storage/0.34.0 (Python CPython 3.6.1; Windows 10)]
User-Agent: [Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)]
x-ms-blob-type: [BlockBlob]
x-ms-client-request-id: [a6b33c64-a55b-11e7-bbc4-ecb1d755839a]
x-ms-date: ['Fri, 29 Sep 2017 21:17:51 GMT']
x-ms-meta-lastmodified: ['1490129895.872718']
x-ms-version: ['2016-05-31']
x-ms-client-request-id: [d9cb3c92-ad44-11e8-a3e3-44032c851683]
x-ms-date: ['Fri, 31 Aug 2018 17:39:45 GMT']
x-ms-meta-lastmodified: ['1535564284.7045333']
x-ms-version: ['2017-07-29']
method: PUT
uri: https://batchtest1.blob.core.windows.net/fgrp-cli-batch-extensions-live-tests/foo.txt
uri: https://brkltest.blob.core.windows.net/fgrp-cli-batch-extensions-live-tests/foo.txt
response:
body: {string: ''}
headers:
Content-MD5: [xMpCOKC5I4INzFCab3WEmw==]
Date: ['Fri, 29 Sep 2017 21:17:51 GMT']
ETag: ['"0x8D5077F8B4C6D2B"']
Last-Modified: ['Fri, 29 Sep 2017 21:17:52 GMT']
Date: ['Fri, 31 Aug 2018 17:39:45 GMT']
ETag: ['"0x8D60F68BE06839B"']
Last-Modified: ['Fri, 31 Aug 2018 17:39:45 GMT']
Server: [Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0]
Transfer-Encoding: [chunked]
x-ms-request-server-encrypted: ['false']
x-ms-version: ['2016-05-31']
x-ms-request-server-encrypted: ['true']
x-ms-version: ['2017-07-29']
status: {code: 201, message: Created}
- request:
body: null
@ -134,28 +136,28 @@ interactions:
Accept-Encoding: ['gzip, deflate']
Connection: [keep-alive]
Content-Type: [application/json; charset=utf-8]
User-Agent: [python/3.6.1 (Windows-10-10.0.15063-SP0) requests/2.9.1 msrest/0.4.14
msrest_azure/0.4.14 batchmanagementclient/4.1.0 Azure-SDK-For-Python]
User-Agent: [python/3.6.5 (Windows-10-10.0.17134-SP0) requests/2.19.1 msrest/0.5.4
msrest_azure/0.5.0 batchmanagementclient/4.1.0 Azure-SDK-For-Python]
accept-language: [en-US]
x-ms-client-request-id: [a6d68fba-a55b-11e7-95af-ecb1d755839a]
x-ms-client-request-id: [d9ee4a7a-ad44-11e8-ae6d-44032c851683]
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test_rg/providers/Microsoft.Batch/batchAccounts/test1?api-version=2017-05-01
response:
body: {string: '{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test_rg/providers/Microsoft.Batch/batchAccounts/test1","name":"test1","type":"Microsoft.Batch/batchAccounts","location":"westus","properties":{"accountEndpoint":"test1.westus.batch.azure.com","provisioningState":"Succeeded","dedicatedCoreQuota":20,"lowPriorityCoreQuota":50,"poolQuota":20,"activeJobAndJobScheduleQuota":20,"autoStorage":{"storageAccountId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test_rg/providers/Microsoft.Storage/storageAccounts/batchtest1","lastKeySync":"2017-07-22T23:00:06.9397085Z"},"poolAllocationMode":"batchservice"},"tags":{"Name":"tagName","Value":"tagValue"}}'}
body: {string: '{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test_rg/providers/Microsoft.Batch/batchAccounts/test1","name":"brkltest","type":"Microsoft.Batch/batchAccounts","location":"eastus2","properties":{"accountEndpoint":"brkltest.eastus2.batch.azure.com","provisioningState":"Succeeded","dedicatedCoreQuota":20,"lowPriorityCoreQuota":100,"poolQuota":100,"activeJobAndJobScheduleQuota":300,"autoStorage":{"storageAccountId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test_rg/providers/Microsoft.Storage/storageAccounts/brkltest","lastKeySync":"2018-08-27T21:09:28.1417679Z"},"poolAllocationMode":"batchservice"}}'}
headers:
Cache-Control: [no-cache]
Content-Type: [application/json; charset=utf-8]
Date: ['Fri, 29 Sep 2017 21:17:52 GMT']
ETag: ['"0x8D4D1556561825D"']
Date: ['Fri, 31 Aug 2018 17:39:46 GMT']
ETag: ['"0x8D60C615FFF15CF"']
Expires: ['-1']
Last-Modified: ['Sat, 22 Jul 2017 23:00:06 GMT']
Last-Modified: ['Mon, 27 Aug 2018 21:09:28 GMT']
Pragma: [no-cache]
Server: [Microsoft-HTTPAPI/2.0]
Strict-Transport-Security: [max-age=31536000; includeSubDomains]
Transfer-Encoding: [chunked]
Vary: [Accept-Encoding]
X-Content-Type-Options: [nosniff]
content-length: ['761']
content-length: ['660']
status: {code: 200, message: OK}
- request:
body: null
@ -165,25 +167,27 @@ interactions:
Connection: [keep-alive]
Content-Length: ['0']
Content-Type: [application/json; charset=utf-8]
User-Agent: [python/3.6.1 (Windows-10-10.0.15063-SP0) requests/2.9.1 msrest/0.4.14
msrest_azure/0.4.14 storagemanagementclient/1.2.0 Azure-SDK-For-Python]
User-Agent: [python/3.6.5 (Windows-10-10.0.17134-SP0) requests/2.19.1 msrest/0.5.4
msrest_azure/0.5.0 azure-mgmt-storage/1.5.0 Azure-SDK-For-Python]
accept-language: [en-US]
x-ms-client-request-id: [a782c0d2-a55b-11e7-9903-ecb1d755839a]
x-ms-client-request-id: [da34e512-ad44-11e8-9c6e-44032c851683]
method: POST
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test_rg/providers/Microsoft.Storage/storageAccounts/batchtest1/listKeys?api-version=2017-10-01
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test_rg/providers/Microsoft.Storage/storageAccounts/brkltest/listKeys?api-version=2017-10-01
response:
body: {string: '{"keys":[{"keyName":"key1","permissions":"Full","value":"abc=="},{"keyName":"key2","permissions":"Full","value":"xyz=="}]}'}
body: {string: '{"keys":[{"keyName":"key1","value":"abc==","permissions":"FULL"},{"keyName":"key2","value":"abc==","permissions":"FULL"}]}'}
headers:
Cache-Control: [no-cache]
Content-Type: [application/json]
Date: ['Fri, 29 Sep 2017 21:17:52 GMT']
Date: ['Fri, 31 Aug 2018 17:39:45 GMT']
Expires: ['-1']
Pragma: [no-cache]
Server: [Microsoft-Azure-Storage-Resource-Provider/1.0, Microsoft-HTTPAPI/2.0]
Server: ['Microsoft-Azure-Storage-Resource-Provider/1.0,Microsoft-HTTPAPI/2.0
Microsoft-HTTPAPI/2.0']
Strict-Transport-Security: [max-age=31536000; includeSubDomains]
Transfer-Encoding: [chunked]
Vary: [Accept-Encoding]
content-length: ['289']
X-Content-Type-Options: [nosniff]
content-length: ['288']
x-ms-ratelimit-remaining-subscription-writes: ['1199']
status: {code: 200, message: OK}
- request:
@ -191,43 +195,45 @@ interactions:
headers:
Connection: [keep-alive]
Content-Length: ['0']
User-Agent: [Azure-Storage/0.34.0 (Python CPython 3.6.1; Windows 10)]
x-ms-client-request-id: [a7b10f40-a55b-11e7-b17e-ecb1d755839a]
x-ms-date: ['Fri, 29 Sep 2017 21:17:53 GMT']
x-ms-version: ['2016-05-31']
User-Agent: [Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)]
x-ms-client-request-id: [da66b27e-ad44-11e8-89da-44032c851683]
x-ms-date: ['Fri, 31 Aug 2018 17:39:46 GMT']
x-ms-version: ['2017-07-29']
method: PUT
uri: https://batchtest1.blob.core.windows.net/fgrp-cli-batch-extensions-live-tests?restype=container
uri: https://brkltest.blob.core.windows.net/fgrp-cli-batch-extensions-live-tests?restype=container
response:
body: {string: "\uFEFF<?xml version=\"1.0\" encoding=\"utf-8\"?><Error><Code>ContainerAlreadyExists</Code><Message>The\
\ specified container already exists.\nRequestId:f3a32ec0-001e-005f-7768-39543e000000\n\
Time:2017-09-29T21:17:53.6939273Z</Message></Error>"}
\ specified container already exists.\nRequestId:7bf2f4c4-b01e-0019-0651-419a7e000000\n\
Time:2018-08-31T17:39:47.1628735Z</Message></Error>"}
headers:
Content-Length: ['230']
Content-Type: [application/xml]
Date: ['Fri, 29 Sep 2017 21:17:53 GMT']
Date: ['Fri, 31 Aug 2018 17:39:46 GMT']
Server: [Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0]
x-ms-version: ['2016-05-31']
x-ms-error-code: [ContainerAlreadyExists]
x-ms-version: ['2017-07-29']
status: {code: 409, message: The specified container already exists.}
- request:
body: null
headers:
Connection: [keep-alive]
User-Agent: [Azure-Storage/0.34.0 (Python CPython 3.6.1; Windows 10)]
x-ms-client-request-id: [a82b9b64-a55b-11e7-9188-ecb1d755839a]
x-ms-date: ['Fri, 29 Sep 2017 21:17:54 GMT']
x-ms-version: ['2016-05-31']
User-Agent: [Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)]
x-ms-client-request-id: [daa01f08-ad44-11e8-bc86-44032c851683]
x-ms-date: ['Fri, 31 Aug 2018 17:39:47 GMT']
x-ms-version: ['2017-07-29']
method: GET
uri: https://batchtest1.blob.core.windows.net/fgrp-cli-batch-extensions-live-tests/test/data/foo.txt?comp=metadata
uri: https://brkltest.blob.core.windows.net/fgrp-cli-batch-extensions-live-tests/test/data/foo.txt?comp=metadata
response:
body: {string: "\uFEFF<?xml version=\"1.0\" encoding=\"utf-8\"?><Error><Code>BlobNotFound</Code><Message>The\
\ specified blob does not exist.\nRequestId:f3a32ede-001e-005f-1068-39543e000000\n\
Time:2017-09-29T21:17:53.8960714Z</Message></Error>"}
\ specified blob does not exist.\nRequestId:7bf2f513-b01e-0019-4d51-419a7e000000\n\
Time:2018-08-31T17:39:47.2619675Z</Message></Error>"}
headers:
Content-Length: ['215']
Content-Type: [application/xml]
Date: ['Fri, 29 Sep 2017 21:17:53 GMT']
Date: ['Fri, 31 Aug 2018 17:39:47 GMT']
Server: [Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0]
x-ms-version: ['2016-05-31']
x-ms-error-code: [BlobNotFound]
x-ms-version: ['2017-07-29']
status: {code: 404, message: The specified blob does not exist.}
- request:
body: '1'
@ -235,24 +241,24 @@ interactions:
Connection: [keep-alive]
Content-Length: ['1']
Content-MD5: [xMpCOKC5I4INzFCab3WEmw==]
User-Agent: [Azure-Storage/0.34.0 (Python CPython 3.6.1; Windows 10)]
User-Agent: [Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)]
x-ms-blob-type: [BlockBlob]
x-ms-client-request-id: [a84a6dd8-a55b-11e7-8328-ecb1d755839a]
x-ms-date: ['Fri, 29 Sep 2017 21:17:54 GMT']
x-ms-meta-lastmodified: ['1490129895.872718']
x-ms-version: ['2016-05-31']
x-ms-client-request-id: [daaf798c-ad44-11e8-a293-44032c851683]
x-ms-date: ['Fri, 31 Aug 2018 17:39:47 GMT']
x-ms-meta-lastmodified: ['1535564284.7045333']
x-ms-version: ['2017-07-29']
method: PUT
uri: https://batchtest1.blob.core.windows.net/fgrp-cli-batch-extensions-live-tests/test/data/foo.txt
uri: https://brkltest.blob.core.windows.net/fgrp-cli-batch-extensions-live-tests/test/data/foo.txt
response:
body: {string: ''}
headers:
Content-MD5: [xMpCOKC5I4INzFCab3WEmw==]
Date: ['Fri, 29 Sep 2017 21:17:53 GMT']
ETag: ['"0x8D5077F8CE3FC7D"']
Last-Modified: ['Fri, 29 Sep 2017 21:17:54 GMT']
Date: ['Fri, 31 Aug 2018 17:39:47 GMT']
ETag: ['"0x8D60F68BEEC60B2"']
Last-Modified: ['Fri, 31 Aug 2018 17:39:47 GMT']
Server: [Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0]
Transfer-Encoding: [chunked]
x-ms-request-server-encrypted: ['false']
x-ms-version: ['2016-05-31']
x-ms-request-server-encrypted: ['true']
x-ms-version: ['2017-07-29']
status: {code: 201, message: Created}
version: 1

Просмотреть файл

@ -473,29 +473,29 @@ class TestBatchExtensions(unittest.TestCase):
def test_batch_extensions_parse_parametricsweep_factory(self):
template = models.ParametricSweepTaskFactory(
parameter_sets=[
models.ParameterSet(1, 2),
models.ParameterSet(3, 5)
models.ParameterSet(start=1, end=2),
models.ParameterSet(start=3, end=5)
],
repeat_task= models.RepeatTask("cmd {0}.mp3 {1}.mp3"))
repeat_task= models.RepeatTask(command_line="cmd {0}.mp3 {1}.mp3"))
result = utils._expand_parametric_sweep(template) # pylint:disable=protected-access
expected = [
models.ExtendedTaskParameter('0', 'cmd 1.mp3 3.mp3'),
models.ExtendedTaskParameter('1', 'cmd 1.mp3 4.mp3'),
models.ExtendedTaskParameter('2', 'cmd 1.mp3 5.mp3'),
models.ExtendedTaskParameter('3', 'cmd 2.mp3 3.mp3'),
models.ExtendedTaskParameter('4', 'cmd 2.mp3 4.mp3'),
models.ExtendedTaskParameter('5', 'cmd 2.mp3 5.mp3')
models.ExtendedTaskParameter(id='0', command_line='cmd 1.mp3 3.mp3'),
models.ExtendedTaskParameter(id='1', command_line='cmd 1.mp3 4.mp3'),
models.ExtendedTaskParameter(id='2', command_line='cmd 1.mp3 5.mp3'),
models.ExtendedTaskParameter(id='3', command_line='cmd 2.mp3 3.mp3'),
models.ExtendedTaskParameter(id='4', command_line='cmd 2.mp3 4.mp3'),
models.ExtendedTaskParameter(id='5', command_line='cmd 2.mp3 5.mp3')
]
for index, task in enumerate(result):
self.assertEqual(expected[index].id, task.id)
self.assertEqual(expected[index].command_line, task.command_line)
template = models.ParametricSweepTaskFactory(
parameter_sets=[models.ParameterSet(1, 3)],
repeat_task= models.RepeatTask("cmd {0}.mp3",
parameter_sets=[models.ParameterSet(start=1, end=3)],
repeat_task= models.RepeatTask(command_line="cmd {0}.mp3",
resource_files=[
models.ResourceFile("http://account.blob/run.exe", "run.exe"),
models.ResourceFile("http://account.blob/{0}.dat", "{0}.mp3")],
models.ResourceFile(blob_source="http://account.blob/run.exe", file_path="run.exe"),
models.ResourceFile(blob_source="http://account.blob/{0}.dat", file_path="{0}.mp3")],
output_files=[models.OutputFile(
file_pattern="{0}.txt",
destination=models.ExtendedOutputFileDestination(
@ -509,10 +509,10 @@ class TestBatchExtensions(unittest.TestCase):
)
)]))
expected = [
models.ExtendedTaskParameter('0', 'cmd 1.mp3',
models.ExtendedTaskParameter(id='0', command_line='cmd 1.mp3',
resource_files=[
models.ResourceFile("http://account.blob/run.exe", "run.exe"),
models.ResourceFile("http://account.blob/1.dat", "1.mp3")],
models.ResourceFile(blob_source="http://account.blob/run.exe", file_path="run.exe"),
models.ResourceFile(blob_source="http://account.blob/1.dat", file_path="1.mp3")],
output_files=[models.OutputFile(
file_pattern="1.txt",
destination=models.ExtendedOutputFileDestination(
@ -524,10 +524,10 @@ class TestBatchExtensions(unittest.TestCase):
upload_options=models.OutputFileUploadOptions(
upload_condition=models.OutputFileUploadCondition.task_success
))]),
models.ExtendedTaskParameter('1', 'cmd 2.mp3',
models.ExtendedTaskParameter(id='1', command_line='cmd 2.mp3',
resource_files=[
models.ResourceFile("http://account.blob/run.exe", "run.exe"),
models.ResourceFile("http://account.blob/2.dat", "2.mp3")],
models.ResourceFile(blob_source="http://account.blob/run.exe", file_path="run.exe"),
models.ResourceFile(blob_source="http://account.blob/2.dat", file_path="2.mp3")],
output_files=[models.OutputFile(
file_pattern="2.txt",
destination=models.ExtendedOutputFileDestination(
@ -539,10 +539,10 @@ class TestBatchExtensions(unittest.TestCase):
upload_options=models.OutputFileUploadOptions(
upload_condition=models.OutputFileUploadCondition.task_success
))]),
models.ExtendedTaskParameter('2', 'cmd 3.mp3',
models.ExtendedTaskParameter(id='2', command_line='cmd 3.mp3',
resource_files=[
models.ResourceFile("http://account.blob/run.exe", "run.exe"),
models.ResourceFile("http://account.blob/3.dat", "3.mp3")],
models.ResourceFile(blob_source="http://account.blob/run.exe", file_path="run.exe"),
models.ResourceFile(blob_source="http://account.blob/3.dat", file_path="3.mp3")],
output_files=[models.OutputFile(
file_pattern="3.txt",
destination=models.ExtendedOutputFileDestination(
@ -564,16 +564,16 @@ class TestBatchExtensions(unittest.TestCase):
template = models.ParametricSweepTaskFactory(
parameter_sets=[
models.ParameterSet(1, 3)
models.ParameterSet(start=1, end=3)
],
repeat_task= models.RepeatTask("cmd {0}.mp3"),
merge_task=models.MergeTask("summary.exe"))
repeat_task= models.RepeatTask(command_line="cmd {0}.mp3"),
merge_task=models.MergeTask(command_line="summary.exe"))
expected = [
models.ExtendedTaskParameter('0', 'cmd 1.mp3'),
models.ExtendedTaskParameter('1', 'cmd 2.mp3'),
models.ExtendedTaskParameter('2', 'cmd 3.mp3'),
models.ExtendedTaskParameter('merge', 'summary.exe',
depends_on=models.TaskDependencies(task_id_ranges=models.TaskIdRange(0, 2)))
models.ExtendedTaskParameter(id='0', command_line='cmd 1.mp3'),
models.ExtendedTaskParameter(id='1', command_line='cmd 2.mp3'),
models.ExtendedTaskParameter(id='2', command_line='cmd 3.mp3'),
models.ExtendedTaskParameter(id='merge', command_line='summary.exe',
depends_on=models.TaskDependencies(task_id_ranges=models.TaskIdRange(start=0, end=2)))
]
result = utils._expand_parametric_sweep(template) # pylint: disable=protected-access
for index, task in enumerate(result):
@ -585,12 +585,12 @@ class TestBatchExtensions(unittest.TestCase):
def test_batch_extensions_parse_invalid_parametricsweep(self):
with self.assertRaises(ValueError):
utils._expand_parametric_sweep(Mock(parameter_sets=None, repeat_task=models.RepeatTask('cmd {0}.mp3'))) # pylint: disable=protected-access
utils._expand_parametric_sweep(Mock(parameter_sets=None, repeat_task=models.RepeatTask(command_line='cmd {0}.mp3'))) # pylint: disable=protected-access
with self.assertRaises(ValueError):
utils._expand_parametric_sweep(Mock(parameter_sets=[models.ParameterSet(1, 3)], repeat_task=None)) # pylint: disable=protected-access
utils._expand_parametric_sweep(Mock(parameter_sets=[models.ParameterSet(start=1, end=3)], repeat_task=None)) # pylint: disable=protected-access
template = models.ParametricSweepTaskFactory(
parameter_sets=[
models.ParameterSet(1, 3)
models.ParameterSet(start=1, end=3)
],
repeat_task=models.RepeatTask(
command_line=None,
@ -608,7 +608,7 @@ class TestBatchExtensions(unittest.TestCase):
utils._expand_parametric_sweep(template) # pylint: disable=protected-access
template = models.ParametricSweepTaskFactory(
parameter_sets=[
models.ParameterSet(1, 3)
models.ParameterSet(start=1, end=3)
],
repeat_task=models.RepeatTask(
command_line="cmd {0}.mp3",
@ -737,8 +737,8 @@ class TestBatchExtensions(unittest.TestCase):
target_dedicated_nodes="10",
enable_auto_scale=False,
package_references=[
models.AptPackageReference("ffmpeg"),
models.AptPackageReference("apache2", "12.34")
models.AptPackageReference(id="ffmpeg"),
models.AptPackageReference(id="apache2", version="12.34")
]
)
commands = [utils.process_pool_package_references(pool)]
@ -767,8 +767,10 @@ class TestBatchExtensions(unittest.TestCase):
target_dedicated_nodes="10",
enable_auto_scale=False,
package_references=[
models.ChocolateyPackageReference("ffmpeg"),
models.ChocolateyPackageReference("testpkg", "12.34", True)
models.ChocolateyPackageReference(id="ffmpeg"),
models.ChocolateyPackageReference(id="testpkg",
version="12.34",
allow_empty_checksums=True)
]
)
commands = [utils.process_pool_package_references(pool)]
@ -815,8 +817,8 @@ class TestBatchExtensions(unittest.TestCase):
]
),
package_references=[
models.AptPackageReference("ffmpeg"),
models.AptPackageReference("apache2", "12.34")
models.AptPackageReference(id="ffmpeg"),
models.AptPackageReference(id="apache2", version="12.34")
]
)
commands = [utils.process_pool_package_references(pool)]
@ -838,12 +840,12 @@ class TestBatchExtensions(unittest.TestCase):
job = Mock(
job_preparation_task=None,
task_factory=models.ParametricSweepTaskFactory(
parameter_sets=[models.ParameterSet(1, 2), models.ParameterSet(3, 5)],
parameter_sets=[models.ParameterSet(start=1, end=2), models.ParameterSet(start=3, end=5)],
repeat_task=models.RepeatTask(
command_line="cmd {0}.mp3 {1}.mp3",
package_references=[
models.AptPackageReference("ffmpeg"),
models.AptPackageReference("apache2", "12.34")
models.AptPackageReference(id="ffmpeg"),
models.AptPackageReference(id="apache2", version="12.34")
]
)
)
@ -867,8 +869,8 @@ class TestBatchExtensions(unittest.TestCase):
job = Mock(
job_preparation_task=None,
task_factory=models.ParametricSweepTaskFactory(
parameter_sets=[models.ParameterSet(1, 2), models.ParameterSet(3, 5)],
repeat_task=models.RepeatTask("cmd {0}.mp3 {1}.mp3")
parameter_sets=[models.ParameterSet(start=1, end=2), models.ParameterSet(start=3, end=5)],
repeat_task=models.RepeatTask(command_line="cmd {0}.mp3 {1}.mp3")
)
)
collection = utils.expand_task_factory(job, None)
@ -898,19 +900,19 @@ class TestBatchExtensions(unittest.TestCase):
target_dedicated_nodes="10",
enable_auto_scale=False,
package_references=[
models.AptPackageReference("ffmpeg"),
models.AptPackageReference("apache2", "12.34")
models.AptPackageReference(id="ffmpeg"),
models.AptPackageReference(id="apache2", version="12.34")
]
)
pool.package_references[0].type = "newPackage"
with self.assertRaises(ValueError):
utils.process_pool_package_references(pool)
pool.package_references[0] = models.ChocolateyPackageReference("ffmpeg")
pool.package_references[0] = models.ChocolateyPackageReference(id="ffmpeg")
with self.assertRaises(ValueError):
utils.process_pool_package_references(pool)
pool.package_references = [models.AptPackageReference("ffmpeg", "12.34")]
pool.package_references = [models.AptPackageReference(id="ffmpeg", version="12.34")]
pool.package_references[0].id = None
with self.assertRaises(ValueError):
utils.process_pool_package_references(pool)
@ -918,22 +920,22 @@ class TestBatchExtensions(unittest.TestCase):
def test_batch_extensions_validate_job_requesting_app_template(self):
# Should do nothing for a job not using an application template'
job = models.ExtendedJobParameter('jobid', None)
job = models.ExtendedJobParameter(id='jobid', pool_info=None)
# Should throw an error if job does not specify template location
with self.assertRaises(TypeError):
appTemplate = models.ApplicationTemplateInfo(None)
appTemplate = models.ApplicationTemplateInfo(file_path=None)
# Should throw an error if the template referenced by the job does not
# exist
with self.assertRaises(ValueError):
appTemplate = models.ApplicationTemplateInfo(self.static_apptemplate_path + '.notfound')
appTemplate = models.ApplicationTemplateInfo(file_path=(self.static_apptemplate_path + '.notfound'))
# Should throw an error if job uses property reserved for application
# template use
app_template = models.ApplicationTemplateInfo(self.static_apptemplate_path)
app_template = models.ApplicationTemplateInfo(file_path=self.static_apptemplate_path)
with self.assertRaises(ValueError):
job = models.ExtendedJobParameter('jobid', None, application_template_info=app_template,
job = models.ExtendedJobParameter(id='jobid', pool_info=None, application_template_info=app_template,
uses_task_dependencies=True)
def test_batch_extensions_merge_metadata(self):
@ -1061,32 +1063,32 @@ class TestBatchExtensions(unittest.TestCase):
# should do nothing when no application template is required
# should throw error if no filePath supplied for application template
job = models.ExtendedJobParameter("jobid", None,
application_template_info=models.ApplicationTemplateInfo(self.static_apptemplate_path))
job = models.ExtendedJobParameter(id="jobid", pool_info=None,
application_template_info=models.ApplicationTemplateInfo(file_path=self.static_apptemplate_path))
job.application_template_info.file_path = None
with self.assertRaises(ValueError):
utils.expand_application_template(job, self._deserialize)
# should merge a template with no parameters
job = models.ExtendedJobParameter("jobid", None,
application_template_info=models.ApplicationTemplateInfo(self.static_apptemplate_path))
job = models.ExtendedJobParameter(id="jobid", pool_info=None,
application_template_info=models.ApplicationTemplateInfo(file_path=self.static_apptemplate_path))
result = utils.expand_application_template(job, self._deserialize)
self.assertIsNotNone(job.job_manager_task,
"expect the template to have provided jobManagerTask.")
# should preserve properties on the job when expanding the template
job = models.ExtendedJobParameter("importantjob", None,
job = models.ExtendedJobParameter(id="importantjob", pool_info=None,
priority=500,
application_template_info=models.ApplicationTemplateInfo(self.static_apptemplate_path))
application_template_info=models.ApplicationTemplateInfo(file_path=self.static_apptemplate_path))
result = utils.expand_application_template(job, self._deserialize)
self.assertEqual(job.id, 'importantjob')
self.assertEqual(job.priority, 500)
# should use parameters from the job to expand the template
job = models.ExtendedJobParameter("parameterJob", None,
job = models.ExtendedJobParameter(id="parameterJob", pool_info=None,
application_template_info=models.ApplicationTemplateInfo(
self.apptemplate_with_params_path,
file_path=self.apptemplate_with_params_path,
parameters={
'blobName': "music.mp3",
'keyValue': "yale"
@ -1103,9 +1105,9 @@ class TestBatchExtensions(unittest.TestCase):
# should throw an error if any parameter has an undefined type
untyped_parameter_path = os.path.join(self.data_dir,
'batch-applicationTemplate-untypedParameter.json')
job = models.ExtendedJobParameter("parameterJob", None,
job = models.ExtendedJobParameter(id="parameterJob", pool_info=None,
application_template_info=models.ApplicationTemplateInfo(
untyped_parameter_path,
file_path=untyped_parameter_path,
parameters={
'blobName': "music.mp3",
'keyValue': "yale"
@ -1116,31 +1118,31 @@ class TestBatchExtensions(unittest.TestCase):
'Expect parameter \'blobName\' to be mentioned')
# should not have an applicationTemplateInfo property on the expanded job
job = models.ExtendedJobParameter("importantjob", None,
job = models.ExtendedJobParameter(id="importantjob", pool_info=None,
priority=500,
application_template_info=models.ApplicationTemplateInfo(self.static_apptemplate_path))
application_template_info=models.ApplicationTemplateInfo(file_path=self.static_apptemplate_path))
utils.expand_application_template(job, self._deserialize)
self.assertIsNone(job.application_template_info)
# should not copy templateMetadata to the expanded job
job = models.ExtendedJobParameter("importantjob", None,
job = models.ExtendedJobParameter(id="importantjob", pool_info=None,
priority=500,
application_template_info=models.ApplicationTemplateInfo(self.static_apptemplate_path))
application_template_info=models.ApplicationTemplateInfo(file_path=self.static_apptemplate_path))
utils.expand_application_template(job, self._deserialize)
self.assertFalse(hasattr(job, 'template_metadata'))
# should not have a parameters property on the expanded job
job = models.ExtendedJobParameter("importantjob", None,
job = models.ExtendedJobParameter(id="importantjob", pool_info=None,
priority=500,
application_template_info=models.ApplicationTemplateInfo(self.static_apptemplate_path))
application_template_info=models.ApplicationTemplateInfo(file_path=self.static_apptemplate_path))
utils.expand_application_template(job, self._deserialize)
self.assertFalse(hasattr(job, 'parameters'))
# should throw error if application template specifies \'id\' property
templateFilePath = os.path.join(self.data_dir,
'batch-applicationTemplate-prohibitedId.json')
job = models.ExtendedJobParameter("jobid", None,
application_template_info=models.ApplicationTemplateInfo(templateFilePath))
job = models.ExtendedJobParameter(id="jobid", pool_info=None,
application_template_info=models.ApplicationTemplateInfo(file_path=templateFilePath))
with self.assertRaises(ValueError) as ve:
utils.expand_application_template(job, self._deserialize)
self.assertIn('id', ve.exception.args[0], 'Expect property \'id\' to be mentioned')
@ -1148,8 +1150,8 @@ class TestBatchExtensions(unittest.TestCase):
# should throw error if application template specifies \'poolInfo\' property
templateFilePath = os.path.join(self.data_dir,
'batch-applicationTemplate-prohibitedPoolInfo.json')
job = models.ExtendedJobParameter("jobid", None,
application_template_info=models.ApplicationTemplateInfo(templateFilePath))
job = models.ExtendedJobParameter(id="jobid", pool_info=None,
application_template_info=models.ApplicationTemplateInfo(file_path=templateFilePath))
with self.assertRaises(ValueError) as ve:
utils.expand_application_template(job, self._deserialize)
self.assertIn('poolInfo', ve.exception.args[0],
@ -1158,8 +1160,8 @@ class TestBatchExtensions(unittest.TestCase):
# should throw error if application template specifies \'applicationTemplateInfo\' property
templateFilePath = os.path.join(self.data_dir,
'batch-applicationTemplate-prohibitedApplicationTemplateInfo.json')
job = models.ExtendedJobParameter("jobid", None,
application_template_info=models.ApplicationTemplateInfo(templateFilePath))
job = models.ExtendedJobParameter(id="jobid", pool_info=None,
application_template_info=models.ApplicationTemplateInfo(file_path=templateFilePath))
with self.assertRaises(ValueError) as ve:
utils.expand_application_template(job, self._deserialize)
self.assertIn('applicationTemplateInfo', ve.exception.args[0],
@ -1168,8 +1170,8 @@ class TestBatchExtensions(unittest.TestCase):
# should throw error if application template specifies \'priority\' property', function(_){
templateFilePath = os.path.join(self.data_dir,
'batch-applicationTemplate-prohibitedPriority.json')
job = models.ExtendedJobParameter("jobid", None,
application_template_info=models.ApplicationTemplateInfo(templateFilePath))
job = models.ExtendedJobParameter(id="jobid", pool_info=None,
application_template_info=models.ApplicationTemplateInfo(file_path=templateFilePath))
with self.assertRaises(ValueError) as ve:
utils.expand_application_template(job, self._deserialize)
self.assertIn('priority', ve.exception.args[0],
@ -1178,19 +1180,19 @@ class TestBatchExtensions(unittest.TestCase):
# should throw error if application template specifies unrecognized property
templateFilePath = os.path.join(self.data_dir,
'batch-applicationTemplate-unsupportedProperty.json')
job = models.ExtendedJobParameter("jobid", None,
application_template_info=models.ApplicationTemplateInfo(templateFilePath))
job = models.ExtendedJobParameter(id="jobid", pool_info=None,
application_template_info=models.ApplicationTemplateInfo(file_path=templateFilePath))
with self.assertRaises(ValueError) as ve:
utils.expand_application_template(job, self._deserialize)
self.assertIn('fluxCapacitorModel', ve.exception.args[0],
'Expect property \'fluxCapacitorModel\' to be mentioned')
# should include metadata from original job on generated job
job = models.ExtendedJobParameter("importantjob", None,
job = models.ExtendedJobParameter(id="importantjob", pool_info=None,
priority=500,
metadata=[models.MetadataItem('author', 'batman')],
metadata=[models.MetadataItem(name='author', value='batman')],
application_template_info=models.ApplicationTemplateInfo(
self.apptemplate_with_params_path,
file_path=self.apptemplate_with_params_path,
parameters={
'blobName': 'henry',
'keyValue': 'yale'
@ -1201,11 +1203,11 @@ class TestBatchExtensions(unittest.TestCase):
self.assertTrue([m for m in job.metadata if m.name=='author' and m.value=='batman'])
# should include metadata from template on generated job
job = models.ExtendedJobParameter("importantjob", None,
job = models.ExtendedJobParameter(id="importantjob", pool_info=None,
priority=500,
metadata=[models.MetadataItem('author', 'batman')],
metadata=[models.MetadataItem(name='author', value='batman')],
application_template_info=models.ApplicationTemplateInfo(
self.apptemplate_with_params_path,
file_path=self.apptemplate_with_params_path,
parameters={
'blobName': 'henry',
'keyValue': 'yale'
@ -1216,20 +1218,20 @@ class TestBatchExtensions(unittest.TestCase):
self.assertTrue([m for m in job.metadata if m.name=='myproperty' and m.value=='yale'])
# should add a metadata property with the template location
job = models.ExtendedJobParameter("importantjob", None,
job = models.ExtendedJobParameter(id="importantjob", pool_info=None,
priority=500,
application_template_info=models.ApplicationTemplateInfo(self.static_apptemplate_path))
application_template_info=models.ApplicationTemplateInfo(file_path=self.static_apptemplate_path))
utils.expand_application_template(job, self._deserialize)
self.assertTrue(job.metadata)
self.assertTrue([m for m in job.metadata
if m.name=='az_batch:template_filepath' and m.value==self.static_apptemplate_path])
# should not allow the job to use a metadata property with our reserved prefix
job = models.ExtendedJobParameter("importantjob", None,
job = models.ExtendedJobParameter(id="importantjob", pool_info=None,
priority=500,
metadata=[models.MetadataItem('az_batch:property', 'something')],
metadata=[models.MetadataItem(name='az_batch:property', value='something')],
application_template_info=models.ApplicationTemplateInfo(
self.static_apptemplate_path))
file_path=self.static_apptemplate_path))
with self.assertRaises(ValueError) as ve:
utils.expand_application_template(job, self._deserialize)
@ -1528,7 +1530,7 @@ class TestBatchExtensions(unittest.TestCase):
def add_collection(
job_id, value, task_add_collection_options=None, custom_headers=None, raw=False, **operation_config):
status = models.TaskAddStatus.success
response = models.TaskAddCollectionResult([models.TaskAddResult(status, s.id) for s in value])
response = models.TaskAddCollectionResult(value=[models.TaskAddResult(status=status, task_id=s.id) for s in value])
return response
num_calls = 7
@ -1537,7 +1539,7 @@ class TestBatchExtensions(unittest.TestCase):
task_ops = operations.ExtendedTaskOperations(None, None, None, self._serialize, self._deserialize, None)
task_collection = []
for i in range(task_ops.MAX_TASKS_PER_REQUEST * num_calls):
task_collection.append(models.TaskAddParameter("task" + str(i), "sleep 1"))
task_collection.append(models.TaskAddParameter(id=("task" + str(i)), command_line="sleep 1"))
task_add_result = task_ops.add_collection("job", task_collection)
assert type(task_add_result) is models.TaskAddCollectionResult
assert set(result.task_id for result in task_add_result.value) == set(task.id for task in task_collection)
@ -1547,7 +1549,7 @@ class TestBatchExtensions(unittest.TestCase):
task_ops = operations.ExtendedTaskOperations(None, None, None, self._serialize, self._deserialize, None)
task_collection = []
for i in range(task_ops.MAX_TASKS_PER_REQUEST * num_calls):
task_collection.append(models.TaskAddParameter("task" + str(i), "sleep 1"))
task_collection.append(models.TaskAddParameter(id=("task" + str(i)), command_line="sleep 1"))
task_add_result = task_ops.add_collection("job", task_collection, 4)
assert type(task_add_result) is models.TaskAddCollectionResult
assert set(result.task_id for result in task_add_result.value) == set(task.id for task in task_collection)
@ -1565,7 +1567,7 @@ class TestBatchExtensions(unittest.TestCase):
task_ops = operations.ExtendedTaskOperations(None, None, None, self._serialize, self._deserialize, None)
task_collection = []
for i in range(task_ops.MAX_TASKS_PER_REQUEST):
task_collection.append(models.TaskAddParameter("task" + str(i), "sleep 1"))
task_collection.append(models.TaskAddParameter(id="task" + str(i), command_line="sleep 1"))
task_ops.add_collection("job", task_collection)
assert task_ops._TaskWorkflowManager._bulk_add_tasks.call_count == 1
@ -1577,7 +1579,7 @@ class TestBatchExtensions(unittest.TestCase):
task_ops = operations.ExtendedTaskOperations(None, None, None, self._serialize, self._deserialize, None)
task_collection = []
for i in range(task_ops.MAX_TASKS_PER_REQUEST):
task_collection.append(models.TaskAddParameter("task" + str(i), "sleep 1"))
task_collection.append(models.TaskAddParameter(id="task" + str(i), command_line="sleep 1"))
task_ops.add_collection("job", task_collection, threads=4)
assert task_ops._TaskWorkflowManager._bulk_add_tasks.call_count == 1
@ -1598,7 +1600,7 @@ class TestBatchExtensions(unittest.TestCase):
task_ops = operations.ExtendedTaskOperations(None, None, None, self._serialize, self._deserialize, None)
task_collection = []
for i in range(task_ops.MAX_TASKS_PER_REQUEST*num_calls):
task_collection.append(models.TaskAddParameter("task" + str(i), "sleep 1"))
task_collection.append(models.TaskAddParameter(id="task" + str(i), command_line="sleep 1"))
task_ops.add_collection("job", task_collection)
assert task_ops._TaskWorkflowManager._bulk_add_tasks.call_count == num_calls
@ -1610,7 +1612,7 @@ class TestBatchExtensions(unittest.TestCase):
task_ops = operations.ExtendedTaskOperations(None, None, None, self._serialize, self._deserialize, None)
task_collection = []
for i in range(task_ops.MAX_TASKS_PER_REQUEST*num_calls):
task_collection.append(models.TaskAddParameter("task" + str(i), "sleep 1"))
task_collection.append(models.TaskAddParameter(id="task" + str(i), command_line="sleep 1"))
task_ops.add_collection("job", task_collection, threads=4)
assert task_ops._TaskWorkflowManager._bulk_add_tasks.call_count == num_calls
@ -1632,7 +1634,7 @@ class TestBatchExtensions(unittest.TestCase):
raise err
submitted_tasks.extendleft(value)
status = models.TaskAddStatus.success
response = models.TaskAddCollectionResult([models.TaskAddResult(status, s.id) for s in value])
response = models.TaskAddCollectionResult(value=[models.TaskAddResult(value=status, task_id=s.id) for s in value])
return response
with patch('azure.batch.operations.task_operations.TaskOperations.add_collection',
@ -1640,7 +1642,7 @@ class TestBatchExtensions(unittest.TestCase):
task_ops = operations.ExtendedTaskOperations(None, None, None, self._serialize, self._deserialize, None)
task_collection = collections.deque()
for i in range(task_ops.MAX_TASKS_PER_REQUEST):
task_collection.appendleft(models.TaskAddParameter("task" + str(i), "sleep 1"))
task_collection.appendleft(models.TaskAddParameter(id="task" + str(i), command_line="sleep 1"))
task_workflow_manager = task_ops._TaskWorkflowManager(
task_ops,
@ -1666,7 +1668,7 @@ class TestBatchExtensions(unittest.TestCase):
task_ops = operations.ExtendedTaskOperations(None, None, None, self._serialize, self._deserialize, None)
task_collection = collections.deque()
for i in range(1):
task_collection.appendleft(models.TaskAddParameter("task" + str(i), "sleep 1"))
task_collection.appendleft(models.TaskAddParameter(id="task" + str(i), command_line="sleep 1"))
task_workflow_manager = task_ops._TaskWorkflowManager(
task_ops,
"job",
@ -1691,7 +1693,7 @@ class TestBatchExtensions(unittest.TestCase):
resource_files=[],
wait_for_success=True,
user_identity=base_sdk_models.UserIdentity())
job_release_task = base_sdk_models.JobReleaseTask("jobrelease")
job_release_task = base_sdk_models.JobReleaseTask(command_line="jobrelease")
multi_instance_settings = base_sdk_models.MultiInstanceSettings(
coordination_command_line="sleep 1")
output_file = base_sdk_models.OutputFile(
@ -1751,14 +1753,14 @@ class TestBatchExtensions(unittest.TestCase):
results = []
for task in value:
error = BatchError(code="testError", message="test error")
result = TaskAddResult(TaskAddStatus.client_error, task.id, error=error)
result = TaskAddResult(status=TaskAddStatus.client_error, task_id=task.id, error=error)
results.append(result)
collection = TaskAddCollectionResult()
collection.value = results
return collection
submitted_tasks.extendleft(value)
status = models.TaskAddStatus.success
response = models.TaskAddCollectionResult([models.TaskAddResult(status, s.id) for s in value])
response = models.TaskAddCollectionResult(value=[models.TaskAddResult(status=status, task_id=s.id) for s in value])
return response
with patch('azure.batch.operations.task_operations.TaskOperations.add_collection',
@ -1766,7 +1768,7 @@ class TestBatchExtensions(unittest.TestCase):
task_ops = operations.ExtendedTaskOperations(None, None, None, self._serialize, self._deserialize, None)
task_collection = collections.deque()
for i in range(task_ops.MAX_TASKS_PER_REQUEST):
task_collection.appendleft(models.TaskAddParameter("task" + str(i), "sleep 1"))
task_collection.appendleft(models.TaskAddParameter(id=("task" + str(i)), command_line="sleep 1"))
try:
task_ops.add_collection("job", task_collection)
@ -1788,14 +1790,15 @@ class TestBatchExtensions(unittest.TestCase):
results = []
for task in value:
error = BatchError(code="testError", message="test error")
result = TaskAddResult(TaskAddStatus.server_error, task.id, error=error)
result = TaskAddResult(status=TaskAddStatus.server_error, task_id=task.id, error=error)
results.append(result)
collection = TaskAddCollectionResult()
collection.value = results
return collection
submitted_tasks.extendleft(value)
status = models.TaskAddStatus.success
response = models.TaskAddCollectionResult([models.TaskAddResult(status, s.id) for s in value])
response = models.TaskAddCollectionResult(
value=[models.TaskAddResult(status=status, task_id=s.id) for s in value])
return response
with patch('azure.batch.operations.task_operations.TaskOperations.add_collection',
@ -1803,12 +1806,12 @@ class TestBatchExtensions(unittest.TestCase):
task_ops = operations.ExtendedTaskOperations(None, None, None, self._serialize, self._deserialize, None)
task_collection = collections.deque()
for i in range(task_ops.MAX_TASKS_PER_REQUEST):
task_collection.appendleft(models.TaskAddParameter("task" + str(i), "sleep 1"))
task_collection.appendleft(models.TaskAddParameter(id=("task" + str(i)), command_line="sleep 1"))
try:
task_add_result = task_ops.add_collection("job", task_collection)
task_add_result = task_ops.add_collection(job_id="job", value=task_collection)
assert set(result.task_id for result in task_add_result.value) == set(task.id for task in task_collection)
except CreateTasksErrorException as e:
self.fail()
self.fail(True)
except Exception as e:
self.fail()
self.fail(True)

Просмотреть файл

@ -12,6 +12,7 @@ from azext.batch.models import BatchErrorException, AllocationState, ComputeNode
import azure.batch.batch_auth as batchauth
import azext.batch as batch
from tests.vcr_test_base import VCRTestBase
from azure.common import AzureMissingResourceHttpError
from azure.storage.common import CloudStorageAccount
from azure.storage.blob import BlobPermissions
@ -24,15 +25,19 @@ class TestFileUpload(VCRTestBase):
self.account_endpoint = 'https://test1.westus.batch.azure.com/'
else:
self.account_name = os.environ.get('AZURE_BATCH_ACCOUNT', 'test1')
self.resource_name = os.environ.get('AZURE_BATCH_RESORCE_GROUP', 'test_rg')
self.resource_name = os.environ.get('AZURE_BATCH_RESOURCE_GROUP', 'test_rg')
self.account_endpoint = os.environ.get('AZURE_BATCH_ENDPOINT', 'https://test1.westus.batch.azure.com/')
self.testPrefix = 'cli-batch-extensions-live-tests'
def cmd(self, command, checks=None, allowed_exceptions=None,
debug=False):
command = '{} --resource-group {} --account-name {} --account-endpoint {}'.\
format(command, self.resource_name, self.account_name, self.account_endpoint)
return super(TestFileUpload, self).cmd(command, checks, allowed_exceptions, debug)
command = '{} --resource-group {} --account-name {} --account-endpoint {}'.format(
command,
self.resource_name,
self.account_name,
self.account_endpoint)
return super(TestFileUpload, self).cmd(command=command, checks=checks,
allowed_exceptions=allowed_exceptions, debug=debug)
def test_batch_upload_live(self):
self.execute()
@ -40,12 +45,12 @@ class TestFileUpload(VCRTestBase):
def body(self):
# should upload a local file to auto-storage
input_str = os.path.join(os.path.dirname(__file__), 'data', 'file_tests', 'foo.txt')
result = self.cmd('batch file upload --local-path "{}" --file-group {}'.
result = self.cmd(command=r'batch file upload --local-path "{}" --file-group {}'.
format(input_str, self.testPrefix))
print('Result text:{}'.format(result))
# should upload a local file to auto-storage with path prefix
result = self.cmd('batch file upload --local-path "{}" --file-group {} '
result = self.cmd(command=r'batch file upload --local-path "{}" --file-group {} '
'--remote-path "test/data"'.format(input_str, self.testPrefix))
print('Result text:{}'.format(result))
@ -64,8 +69,8 @@ class TestBatchExtensionsLive(VCRTestBase):
self.account_name = os.environ.get('AZURE_BATCH_ACCOUNT', 'test1')
self.account_endpoint = os.environ.get('AZURE_BATCH_ENDPOINT', 'https://test1.westus.batch.azure.com/')
self.account_key = os.environ['AZURE_BATCH_ACCESS_KEY']
storage_account = os.environ.get('AZURE_STORAGE_ACCOUNT', 'testaccountforbatch')
storage_key = os.environ.get('AZURE_STORAGE_ACCESS_KEY', 'ZmFrZV9hY29jdW50X2tleQ==')
storage_account = os.environ.get('AZURE_STORAGE_ACCOUNT', 'testaccountforbatch')
storage_key = os.environ.get('AZURE_STORAGE_ACCESS_KEY', 'ZmFrZV9hY29jdW50X2tleQ==')
self.blob_client = CloudStorageAccount(storage_account, storage_key)\
.create_block_blob_service()
@ -74,7 +79,7 @@ class TestBatchExtensionsLive(VCRTestBase):
self.output_blob_container = 'aaatestcontainer'
sas_token = self.blob_client.generate_container_shared_access_signature(
self.output_blob_container,
container_name=self.output_blob_container,
permission=BlobPermissions(read=True, write=True),
start=datetime.datetime.utcnow(),
expiry=datetime.datetime.utcnow() + datetime.timedelta(days=1))
@ -87,8 +92,11 @@ class TestBatchExtensionsLive(VCRTestBase):
def cmd(self, command, checks=None, allowed_exceptions=None,
debug=False):
command = '{} --account-name {} --account-key "{}" --account-endpoint {}'.\
format(command, self.account_name, self.account_key, self.account_endpoint)
command = r'{} --account-name {} --account-key "{}" --account-endpoint {}'.format(
command,
self.account_name,
self.account_key,
self.account_endpoint)
return super(TestBatchExtensionsLive, self).cmd(command, checks, allowed_exceptions, debug)
def test_batch_extensions_live(self):
@ -96,7 +104,7 @@ class TestBatchExtensionsLive(VCRTestBase):
def submit_job_wrapper(self, file_name):
try:
result = self.cmd('batch job create --template "{}"'.format(file_name))
result = self.cmd(r'batch job create --template "{}"'.format(file_name))
except Exception as exp:
result = exp
print('Result text:{}'.format(result))
@ -164,11 +172,14 @@ class TestBatchExtensionsLive(VCRTestBase):
time.sleep(wait_for)
def clear_container(self, container_name):
print('clearing container {}'.format(container_name))
blobs = self.blob_client.list_blobs(container_name)
blobs = [b.name for b in blobs]
for blob in blobs:
self.blob_client.delete_blob(container_name, blob)
try:
print('clearing container {}'.format(container_name))
blobs = self.blob_client.list_blobs(container_name)
blobs = [b.name for b in blobs]
for blob in blobs:
self.blob_client.delete_blob(container_name, blob)
except AzureMissingResourceHttpError:
pass
def create_basic_spec(self, job_id, pool_id, task_id, text, is_windows): # pylint: disable=too-many-arguments
cmd_line = None
@ -382,7 +393,7 @@ class TestBatchExtensionsLive(VCRTestBase):
self.assertTrue(stdout_blob.properties.content_length>=4)
finally:
print('Deleting job {}'.format(job_id))
self.batch_client.job.delete(job_id)
self.batch_client.job.delete(job_id=job_id)
def body(self):
# file egress should work on ubuntu 14.04