2016-11-11 20:19:04 +03:00
|
|
|
# Copyright (c) Microsoft Corporation
|
|
|
|
#
|
|
|
|
# All rights reserved.
|
|
|
|
#
|
|
|
|
# MIT License
|
|
|
|
#
|
|
|
|
# Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
# copy of this software and associated documentation files (the "Software"),
|
|
|
|
# to deal in the Software without restriction, including without limitation
|
|
|
|
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
# and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
# Software is furnished to do so, subject to the following conditions:
|
|
|
|
#
|
|
|
|
# The above copyright notice and this permission notice shall be included in
|
|
|
|
# all copies or substantial portions of the Software.
|
|
|
|
#
|
|
|
|
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
|
|
# DEALINGS IN THE SOFTWARE.
|
|
|
|
|
|
|
|
# compat imports
|
|
|
|
from __future__ import (
|
|
|
|
absolute_import, division, print_function, unicode_literals
|
|
|
|
)
|
|
|
|
from builtins import ( # noqa
|
|
|
|
bytes, dict, int, list, object, range, str, ascii, chr, hex, input,
|
|
|
|
next, oct, open, pow, round, super, filter, map, zip)
|
|
|
|
# stdlib imports
|
2016-11-12 02:11:13 +03:00
|
|
|
import collections
|
2017-07-20 01:34:46 +03:00
|
|
|
import datetime
|
2016-11-12 23:35:56 +03:00
|
|
|
try:
|
|
|
|
import pathlib2 as pathlib
|
|
|
|
except ImportError:
|
|
|
|
import pathlib
|
2018-10-31 00:22:27 +03:00
|
|
|
import re
|
2016-11-11 20:19:04 +03:00
|
|
|
# non-stdlib imports
|
2017-09-30 00:59:15 +03:00
|
|
|
import azure.batch.models as batchmodels
|
2017-08-08 05:42:09 +03:00
|
|
|
import dateutil.parser
|
2016-11-11 20:19:04 +03:00
|
|
|
# local imports
|
2017-08-04 20:19:04 +03:00
|
|
|
from . import task_factory
|
2016-11-11 20:19:04 +03:00
|
|
|
from . import util
|
|
|
|
|
2016-11-12 02:11:13 +03:00
|
|
|
# global defines
|
2017-06-26 23:20:49 +03:00
|
|
|
_METADATA_VERSION_NAME = 'batch_shipyard_version'
|
2017-03-12 06:15:14 +03:00
|
|
|
_GLUSTER_DEFAULT_VOLNAME = 'gv0'
|
2017-10-05 23:02:30 +03:00
|
|
|
_GLUSTER_ON_COMPUTE_VOLUME = 'gluster_on_compute/{}'.format(
|
|
|
|
_GLUSTER_DEFAULT_VOLNAME)
|
2017-10-05 03:59:30 +03:00
|
|
|
_HOST_MOUNTS_DIR = '$AZ_BATCH_NODE_ROOT_DIR/mounts'
|
2017-11-05 21:38:22 +03:00
|
|
|
_HOST_MOUNTS_DIR_WINDOWS = '%AZ_BATCH_NODE_ROOT_DIR%\\mounts'
|
2017-04-28 20:56:03 +03:00
|
|
|
_TENSORBOARD_DOCKER_IMAGE = (
|
2017-08-03 01:11:20 +03:00
|
|
|
'gcr.io/tensorflow/tensorflow',
|
2017-11-13 02:11:26 +03:00
|
|
|
'/usr/local/lib/python2.7/dist-packages/tensorboard/main.py',
|
2017-04-30 08:16:35 +03:00
|
|
|
6006
|
2017-04-28 20:56:03 +03:00
|
|
|
)
|
2018-10-31 00:22:27 +03:00
|
|
|
_GPU_CC37_INSTANCES = re.compile(
|
|
|
|
# standard nc
|
2019-07-16 18:49:37 +03:00
|
|
|
r'^standard_nc[\d]+r?(_promo)?$',
|
2018-10-31 00:22:27 +03:00
|
|
|
re.IGNORECASE
|
|
|
|
)
|
|
|
|
_GPU_COMPUTE_INSTANCES = re.compile(
|
2018-11-29 20:39:34 +03:00
|
|
|
# standard nc, ncv2, ncv3, nd, ndv2
|
2019-07-16 18:49:37 +03:00
|
|
|
r'^standard_n[cd][\d]+r?s?(_v[\d])?(_promo)?$',
|
2018-10-31 00:22:27 +03:00
|
|
|
re.IGNORECASE
|
|
|
|
)
|
|
|
|
_GPU_VISUALIZATION_INSTANCES = re.compile(
|
|
|
|
# standard nv, nvv2
|
2019-07-16 18:49:37 +03:00
|
|
|
r'^standard_nv[\d]+s?(_v2)?(_promo)?$',
|
2018-10-31 00:22:27 +03:00
|
|
|
re.IGNORECASE
|
|
|
|
)
|
2019-06-27 23:08:11 +03:00
|
|
|
_SRIOV_RDMA_INSTANCES = re.compile(
|
|
|
|
# standard hb/hc
|
|
|
|
r'^standard_((hb|hc)[\d]+m?rs?(_v[\d])?)$',
|
|
|
|
re.IGNORECASE
|
|
|
|
)
|
2019-11-16 02:22:11 +03:00
|
|
|
_SRIOV_RDMA_TRANSITION_INSTANCES = re.compile(
|
|
|
|
# standard nc+r_v3
|
|
|
|
r'^standard_(nc[\d]+rs_v3)$',
|
|
|
|
re.IGNORECASE
|
|
|
|
)
|
2019-06-27 23:08:11 +03:00
|
|
|
_NETWORKDIRECT_RDMA_INSTANCES = re.compile(
|
|
|
|
# standard a8/a9, h+r, nc+r, nd+r
|
2019-07-16 18:49:37 +03:00
|
|
|
r'^standard_((a8|a9)|((h|nc|nd)[\d]+m?rs?(_v[1-3])?))(_promo)?$',
|
2018-10-31 00:22:27 +03:00
|
|
|
re.IGNORECASE
|
|
|
|
)
|
|
|
|
_PREMIUM_STORAGE_INSTANCES = re.compile(
|
|
|
|
r'^standard_(([a-z]+[\d]+.*s(_v[\d])?)|([dg]s[\d]+(_v2)?))$',
|
|
|
|
re.IGNORECASE
|
|
|
|
)
|
|
|
|
_NESTED_VIRTUALIZATION_INSTANCES = re.compile(
|
|
|
|
# standard dv3/ev3, fv2, m
|
|
|
|
r'^standard_(([de][\d]+s?_v3)|(f[\d]+s_v2)|(m[\d]+[lmst]*))$',
|
|
|
|
re.IGNORECASE
|
|
|
|
)
|
2017-09-22 18:21:16 +03:00
|
|
|
_VM_TCP_NO_TUNE = frozenset((
|
2017-11-08 23:23:23 +03:00
|
|
|
# basic
|
|
|
|
'basic_a0', 'basic_a1', 'basic_a2', 'basic_a3', 'basic_a4',
|
|
|
|
# standard_a
|
|
|
|
'standard_a0', 'standard_a1', 'standard_a2', 'standard_a3', 'standard_a5',
|
|
|
|
'standard_a6',
|
|
|
|
# standard_a_v2
|
|
|
|
'standard_a1_v2', 'standard_a2_v2', 'standard_a4_v2', 'standard_a2m_v2',
|
|
|
|
'standard_a4m_v2',
|
|
|
|
# standard_d
|
|
|
|
'standard_d1', 'standard_ds1', 'standard_d2', 'standard_ds2',
|
|
|
|
# standard_d_v2
|
|
|
|
'standard_d1_v2', 'standard_ds1_v2',
|
|
|
|
# standard_d_v3
|
|
|
|
'standard_d2_v3', 'standard_d2s_v3',
|
|
|
|
# standard_e_v3
|
|
|
|
'standard_e2_v3', 'standard_e2s_v3',
|
|
|
|
# standard_f
|
|
|
|
'standard_f1', 'standard_f1s',
|
|
|
|
# standard_f_v2
|
|
|
|
'standard_f2s_v2',
|
|
|
|
# standard_b
|
|
|
|
'standard_b1s', 'standard_b1ms', 'standard_b2s', 'standard_b2ms',
|
|
|
|
'standard_b4ms', 'standard_b8ms',
|
2017-09-22 18:21:16 +03:00
|
|
|
))
|
2019-01-15 20:56:03 +03:00
|
|
|
_VM_GPU_COUNT = {
|
2019-07-16 18:49:37 +03:00
|
|
|
1: re.compile(r'^standard_n[cdv]6r?s?(_v[\d])?(_promo)?$', re.IGNORECASE),
|
|
|
|
2: re.compile(r'^standard_n[cdv]12r?s?(_v[\d])?(_promo)?$', re.IGNORECASE),
|
|
|
|
4: re.compile(r'^standard_n[cdv]24r?s?(_v[\d])?(_promo)?$', re.IGNORECASE),
|
2019-01-15 20:56:03 +03:00
|
|
|
8: re.compile(r'^standard_nd40s_v2$', re.IGNORECASE),
|
|
|
|
}
|
|
|
|
_VM_GPU_CLASS = {
|
2019-07-16 18:49:37 +03:00
|
|
|
'tesla_k80': re.compile(r'^standard_n[c][\d]+r?(_promo)?$', re.IGNORECASE),
|
2019-01-15 20:56:03 +03:00
|
|
|
'tesla_p40': re.compile(r'^standard_n[d][\d]+r?s?$', re.IGNORECASE),
|
|
|
|
'tesla_p100': re.compile(r'^standard_n[c][\d]+r?s_v2$', re.IGNORECASE),
|
|
|
|
'tesla_v100': re.compile(
|
|
|
|
r'^standard_n(([c][\d]+r?s_v3)|(d40s_v2))$', re.IGNORECASE),
|
2019-07-16 18:49:37 +03:00
|
|
|
'tesla_m60': re.compile(
|
|
|
|
r'^standard_nv[\d]+s?(_v2)?(_promo)?$', re.IGNORECASE),
|
2019-01-15 20:56:03 +03:00
|
|
|
}
|
|
|
|
_VM_IB_CLASS = {
|
|
|
|
'qdr_ib': re.compile(r'^standard_(a8|a9)$', re.IGNORECASE),
|
|
|
|
'fdr_ib': re.compile(
|
2019-07-16 18:49:37 +03:00
|
|
|
r'^standard_(((h|nc|nd)+[\d]+m?rs?(_v[1-3])?))(_promo)?$',
|
|
|
|
re.IGNORECASE),
|
2019-01-15 20:56:03 +03:00
|
|
|
'edr_ib': re.compile(r'^standard_(hc|hb)+[\d]+rs$', re.IGNORECASE),
|
|
|
|
}
|
2019-06-25 01:06:20 +03:00
|
|
|
_VALID_PUBLISHERS = frozenset((
|
|
|
|
'canonical', 'credativ', 'microsoft-azure-batch',
|
|
|
|
'microsoftwindowsserver', 'openlogic'
|
|
|
|
))
|
2017-10-21 06:15:46 +03:00
|
|
|
_SINGULARITY_COMMANDS = frozenset(('exec', 'run'))
|
2018-01-24 22:30:34 +03:00
|
|
|
_FORBIDDEN_MERGE_TASK_PROPERTIES = frozenset((
|
|
|
|
'depends_on', 'depends_on_range', 'multi_instance', 'task_factory'
|
|
|
|
))
|
2016-11-12 02:11:13 +03:00
|
|
|
# named tuples
|
2017-05-13 00:40:06 +03:00
|
|
|
PoolVmCountSettings = collections.namedtuple(
|
|
|
|
'PoolVmCountSettings', [
|
|
|
|
'dedicated',
|
|
|
|
'low_priority',
|
|
|
|
]
|
|
|
|
)
|
2017-06-06 18:29:44 +03:00
|
|
|
PoolVmPlatformImageSettings = collections.namedtuple(
|
|
|
|
'PoolVmPlatformImageSettings', [
|
|
|
|
'publisher',
|
|
|
|
'offer',
|
|
|
|
'sku',
|
2017-09-23 04:23:24 +03:00
|
|
|
'version',
|
2017-09-27 19:24:43 +03:00
|
|
|
'native',
|
2018-02-28 19:38:57 +03:00
|
|
|
'license_type',
|
2017-06-06 18:29:44 +03:00
|
|
|
]
|
|
|
|
)
|
|
|
|
PoolVmCustomImageSettings = collections.namedtuple(
|
|
|
|
'PoolVmCustomImageSettings', [
|
2017-09-29 07:29:42 +03:00
|
|
|
'arm_image_id',
|
2017-06-06 18:29:44 +03:00
|
|
|
'node_agent',
|
2017-09-27 19:24:43 +03:00
|
|
|
'native',
|
2018-02-28 19:38:57 +03:00
|
|
|
'license_type',
|
2017-06-06 18:29:44 +03:00
|
|
|
]
|
|
|
|
)
|
2017-07-20 01:34:46 +03:00
|
|
|
PoolAutoscaleScenarioSettings = collections.namedtuple(
|
|
|
|
'PoolAutoscaleScenarioSettings', [
|
|
|
|
'name',
|
|
|
|
'maximum_vm_count',
|
2018-06-08 01:07:21 +03:00
|
|
|
'maximum_vm_increment_per_evaluation',
|
2017-07-20 01:34:46 +03:00
|
|
|
'node_deallocation_option',
|
|
|
|
'sample_lookback_interval',
|
|
|
|
'required_sample_percentage',
|
2017-07-29 00:32:28 +03:00
|
|
|
'rebalance_preemption_percentage',
|
2017-07-20 01:34:46 +03:00
|
|
|
'bias_last_sample',
|
|
|
|
'bias_node_type',
|
2018-06-08 01:07:21 +03:00
|
|
|
'weekday_start',
|
|
|
|
'weekday_end',
|
|
|
|
'workhour_start',
|
|
|
|
'workhour_end',
|
2017-07-20 01:34:46 +03:00
|
|
|
]
|
|
|
|
)
|
|
|
|
PoolAutoscaleSettings = collections.namedtuple(
|
|
|
|
'PoolAutoscaleSettings', [
|
|
|
|
'evaluation_interval',
|
|
|
|
'formula',
|
|
|
|
'scenario',
|
|
|
|
]
|
|
|
|
)
|
2017-07-21 20:32:04 +03:00
|
|
|
PoolAutopoolSettings = collections.namedtuple(
|
|
|
|
'PoolAutopoolSettings', [
|
|
|
|
'pool_lifetime',
|
|
|
|
'keep_alive',
|
|
|
|
]
|
|
|
|
)
|
2018-05-01 01:06:52 +03:00
|
|
|
PrometheusSettings = collections.namedtuple(
|
|
|
|
'PrometheusSettings', [
|
|
|
|
'ne_enabled', 'ne_port', 'ne_options', 'ca_enabled', 'ca_port',
|
|
|
|
'ca_options',
|
|
|
|
]
|
|
|
|
)
|
2019-01-15 21:50:17 +03:00
|
|
|
AdditionalNodePrepSettings = collections.namedtuple(
|
|
|
|
'AdditionalNodePrepSettings', [
|
|
|
|
'commands_pre',
|
|
|
|
'commands_post',
|
|
|
|
'environment_variables',
|
|
|
|
'environment_variables_keyvault_secret_id',
|
|
|
|
]
|
|
|
|
)
|
2016-11-12 02:11:13 +03:00
|
|
|
PoolSettings = collections.namedtuple(
|
|
|
|
'PoolSettings', [
|
2017-05-13 00:40:06 +03:00
|
|
|
'id', 'vm_size', 'vm_count', 'resize_timeout', 'max_tasks_per_node',
|
2017-06-06 18:29:44 +03:00
|
|
|
'inter_node_communication_enabled', 'vm_configuration',
|
2017-10-04 18:59:03 +03:00
|
|
|
'reboot_on_start_task_failed', 'attempt_recovery_on_unusable',
|
2016-11-12 02:11:13 +03:00
|
|
|
'block_until_all_global_resources_loaded',
|
2017-05-01 19:43:41 +03:00
|
|
|
'transfer_files_on_pool_creation', 'input_data', 'resource_files',
|
2019-01-15 21:50:17 +03:00
|
|
|
'gpu_driver', 'ssh', 'rdp', 'additional_node_prep', 'virtual_network',
|
2018-03-14 22:21:13 +03:00
|
|
|
'autoscale', 'node_fill_type', 'remote_access_control',
|
2018-06-25 22:35:50 +03:00
|
|
|
'certificates', 'prometheus', 'upload_diagnostics_logs_on_unusable',
|
2018-10-31 00:22:27 +03:00
|
|
|
'container_runtimes_install', 'container_runtimes_default',
|
2019-08-14 01:06:50 +03:00
|
|
|
'per_job_auto_scratch', 'batch_insights_enabled', 'public_ips',
|
2016-11-12 02:11:13 +03:00
|
|
|
]
|
|
|
|
)
|
|
|
|
SSHSettings = collections.namedtuple(
|
|
|
|
'SSHSettings', [
|
2017-04-13 19:31:35 +03:00
|
|
|
'username', 'expiry_days', 'ssh_public_key', 'ssh_public_key_data',
|
|
|
|
'ssh_private_key', 'generate_docker_tunnel_script',
|
|
|
|
'generated_file_export_path', 'hpn_server_swap',
|
2018-06-12 00:27:26 +03:00
|
|
|
'allow_docker_access',
|
2016-11-12 08:08:58 +03:00
|
|
|
]
|
|
|
|
)
|
2017-11-03 23:43:18 +03:00
|
|
|
RDPSettings = collections.namedtuple(
|
|
|
|
'RDPSettings', [
|
|
|
|
'username', 'expiry_days', 'password',
|
|
|
|
]
|
|
|
|
)
|
2018-03-14 22:21:13 +03:00
|
|
|
RemoteAccessControl = collections.namedtuple(
|
|
|
|
'RemoteAccessControl', [
|
|
|
|
'starting_port', 'backend_port', 'protocol', 'allow', 'deny',
|
|
|
|
]
|
|
|
|
)
|
2017-03-07 20:01:10 +03:00
|
|
|
AADSettings = collections.namedtuple(
|
|
|
|
'AADSettings', [
|
|
|
|
'directory_id', 'application_id', 'auth_key', 'rsa_private_key_pem',
|
|
|
|
'x509_cert_sha1_thumbprint', 'user', 'password', 'endpoint',
|
2018-02-16 07:37:49 +03:00
|
|
|
'token_cache_file', 'authority_url',
|
2017-03-07 20:01:10 +03:00
|
|
|
]
|
|
|
|
)
|
2017-03-03 07:17:35 +03:00
|
|
|
KeyVaultCredentialsSettings = collections.namedtuple(
|
|
|
|
'KeyVaultCredentialsSettings', [
|
2017-03-07 20:01:10 +03:00
|
|
|
'aad', 'keyvault_uri', 'keyvault_credentials_secret_id',
|
2017-03-03 07:17:35 +03:00
|
|
|
]
|
|
|
|
)
|
|
|
|
ManagementCredentialsSettings = collections.namedtuple(
|
|
|
|
'ManagementCredentialsSettings', [
|
2017-03-07 20:01:10 +03:00
|
|
|
'aad', 'subscription_id',
|
2017-03-03 07:17:35 +03:00
|
|
|
]
|
|
|
|
)
|
2016-11-12 08:08:58 +03:00
|
|
|
BatchCredentialsSettings = collections.namedtuple(
|
|
|
|
'BatchCredentialsSettings', [
|
2017-03-08 22:13:09 +03:00
|
|
|
'aad', 'account', 'account_key', 'account_service_url',
|
2017-03-10 01:38:16 +03:00
|
|
|
'resource_group', 'subscription_id', 'location',
|
2019-01-16 18:28:06 +03:00
|
|
|
'app_insights_instrumentation_key',
|
|
|
|
'app_insights_application_id',
|
2016-11-12 08:08:58 +03:00
|
|
|
]
|
|
|
|
)
|
|
|
|
StorageCredentialsSettings = collections.namedtuple(
|
|
|
|
'StorageCredentialsSettings', [
|
2018-04-18 01:01:12 +03:00
|
|
|
'account', 'account_key', 'endpoint', 'resource_group',
|
2016-11-12 08:08:58 +03:00
|
|
|
]
|
|
|
|
)
|
2016-11-12 08:51:11 +03:00
|
|
|
BatchShipyardSettings = collections.namedtuple(
|
|
|
|
'BatchShipyardSettings', [
|
|
|
|
'storage_account_settings', 'storage_entity_prefix',
|
|
|
|
'generated_sas_expiry_days', 'use_shipyard_docker_image',
|
2018-06-26 21:25:53 +03:00
|
|
|
'store_timing_metrics', 'fallback_registry',
|
2018-07-29 02:09:22 +03:00
|
|
|
'delay_docker_image_preload',
|
2016-11-12 08:51:11 +03:00
|
|
|
]
|
|
|
|
)
|
|
|
|
DataReplicationSettings = collections.namedtuple(
|
|
|
|
'DataReplicationSettings', [
|
2019-05-23 00:46:44 +03:00
|
|
|
'concurrent_source_downloads',
|
2016-11-12 08:51:11 +03:00
|
|
|
]
|
|
|
|
)
|
2016-11-12 23:35:56 +03:00
|
|
|
SourceSettings = collections.namedtuple(
|
|
|
|
'SourceSettings', [
|
|
|
|
'path', 'include', 'exclude'
|
|
|
|
]
|
|
|
|
)
|
|
|
|
DestinationSettings = collections.namedtuple(
|
|
|
|
'DestinationSettings', [
|
|
|
|
'storage_account_settings', 'shared_data_volume',
|
|
|
|
'relative_destination_path', 'data_transfer'
|
|
|
|
]
|
|
|
|
)
|
|
|
|
DataTransferSettings = collections.namedtuple(
|
|
|
|
'DataTransferSettings', [
|
|
|
|
'method', 'ssh_private_key', 'scp_ssh_extra_options',
|
|
|
|
'rsync_extra_options', 'split_files_megabytes',
|
2017-10-03 03:29:55 +03:00
|
|
|
'max_parallel_transfers_per_node', 'is_file_share',
|
|
|
|
'remote_path', 'blobxfer_extra_options',
|
2016-11-12 23:35:56 +03:00
|
|
|
]
|
|
|
|
)
|
2017-08-08 05:42:09 +03:00
|
|
|
JobScheduleSettings = collections.namedtuple(
|
|
|
|
'JobScheduleSettings', [
|
|
|
|
'do_not_run_until', 'do_not_run_after', 'start_window',
|
|
|
|
'recurrence_interval',
|
|
|
|
]
|
|
|
|
)
|
|
|
|
JobManagerSettings = collections.namedtuple(
|
|
|
|
'JobManagerSettings', [
|
2017-08-09 06:29:46 +03:00
|
|
|
'allow_low_priority_node', 'run_exclusive', 'monitor_task_completion',
|
2017-08-08 05:42:09 +03:00
|
|
|
]
|
|
|
|
)
|
|
|
|
JobRecurrenceSettings = collections.namedtuple(
|
|
|
|
'JobRecurrenceSettings', [
|
|
|
|
'schedule', 'job_manager',
|
|
|
|
]
|
|
|
|
)
|
2017-03-14 18:52:09 +03:00
|
|
|
UserIdentitySettings = collections.namedtuple(
|
|
|
|
'UserIdentitySettings', [
|
|
|
|
'default_pool_admin', 'specific_user_uid', 'specific_user_gid',
|
|
|
|
]
|
|
|
|
)
|
2017-08-04 20:19:04 +03:00
|
|
|
TaskFactoryStorageSettings = collections.namedtuple(
|
|
|
|
'TaskFactoryStorageSettings', [
|
2017-11-10 19:24:50 +03:00
|
|
|
'storage_settings', 'storage_link_name', 'container', 'remote_path',
|
2017-10-03 03:29:55 +03:00
|
|
|
'is_file_share', 'include', 'exclude',
|
2017-08-04 20:19:04 +03:00
|
|
|
]
|
|
|
|
)
|
2018-02-27 02:06:55 +03:00
|
|
|
TaskExitOptions = collections.namedtuple(
|
|
|
|
'TaskExitOptions', [
|
|
|
|
'job_action', 'dependency_action',
|
|
|
|
]
|
|
|
|
)
|
2016-11-13 09:13:55 +03:00
|
|
|
TaskSettings = collections.namedtuple(
|
|
|
|
'TaskSettings', [
|
2017-10-25 06:07:00 +03:00
|
|
|
'id', 'docker_image', 'singularity_image', 'name', 'run_options',
|
|
|
|
'docker_exec_options', 'singularity_cmd', 'run_elevated',
|
2017-10-22 23:59:00 +03:00
|
|
|
'environment_variables', 'environment_variables_keyvault_secret_id',
|
|
|
|
'envfile', 'resource_files', 'command', 'infiniband', 'gpu',
|
|
|
|
'depends_on', 'depends_on_range', 'max_task_retries', 'max_wall_time',
|
2019-06-12 22:23:08 +03:00
|
|
|
'retention_time', 'multi_instance', 'default_exit_options',
|
2019-06-25 01:06:20 +03:00
|
|
|
'working_dir',
|
2016-11-13 09:13:55 +03:00
|
|
|
]
|
|
|
|
)
|
|
|
|
MultiInstanceSettings = collections.namedtuple(
|
|
|
|
'MultiInstanceSettings', [
|
|
|
|
'num_instances', 'coordination_command', 'resource_files',
|
2019-07-03 22:40:54 +03:00
|
|
|
'pre_execution_command', 'mpi',
|
|
|
|
]
|
|
|
|
)
|
|
|
|
MpiSettings = collections.namedtuple(
|
|
|
|
'MpiSettings', [
|
2019-07-18 04:57:06 +03:00
|
|
|
'runtime', 'executable_path', 'options', 'processes_per_node',
|
2016-11-13 09:13:55 +03:00
|
|
|
]
|
|
|
|
)
|
|
|
|
ResourceFileSettings = collections.namedtuple(
|
|
|
|
'ResourceFileSettings', [
|
|
|
|
'file_path', 'blob_source', 'file_mode',
|
|
|
|
]
|
|
|
|
)
|
2018-06-25 17:49:45 +03:00
|
|
|
CustomMountFstabSettings = collections.namedtuple(
|
|
|
|
'CustomMountFstabSettings', [
|
|
|
|
'fs_spec', 'fs_vfstype', 'fs_mntops', 'fs_freq', 'fs_passno',
|
|
|
|
]
|
|
|
|
)
|
|
|
|
FederationPoolConstraintSettings = collections.namedtuple(
|
|
|
|
'FederationPoolConstraintSettings', [
|
|
|
|
'native', 'windows', 'location', 'custom_image_arm_id',
|
|
|
|
'virtual_network_arm_id', 'low_priority_nodes_allow',
|
|
|
|
'low_priority_nodes_exclusive', 'autoscale_allow',
|
|
|
|
'autoscale_exclusive', 'container_registries_private_docker_hub',
|
|
|
|
'container_registries_public', 'max_active_task_backlog_ratio',
|
|
|
|
'max_active_task_backlog_autoscale_exempt',
|
|
|
|
]
|
|
|
|
)
|
|
|
|
FederationComputeNodeConstraintSettings = collections.namedtuple(
|
|
|
|
'FederationComputeNodeConstraintSettings', [
|
|
|
|
'vm_size', 'cores', 'core_variance', 'memory', 'memory_variance',
|
|
|
|
'exclusive', 'gpu', 'infiniband',
|
|
|
|
]
|
|
|
|
)
|
|
|
|
FederationConstraintSettings = collections.namedtuple(
|
|
|
|
'FederationConstraintSettings', [
|
|
|
|
'pool', 'compute_node',
|
|
|
|
]
|
|
|
|
)
|
2017-03-03 21:28:10 +03:00
|
|
|
ManagedDisksSettings = collections.namedtuple(
|
|
|
|
'ManagedDisksSettings', [
|
2018-11-06 01:42:39 +03:00
|
|
|
'location', 'resource_group', 'zone', 'sku', 'disk_size_gb',
|
|
|
|
'disk_provisioned_perf_iops_rw', 'disk_provisioned_perf_mbps_rw',
|
|
|
|
'disk_names',
|
2017-03-03 21:28:10 +03:00
|
|
|
]
|
|
|
|
)
|
2017-03-07 01:56:13 +03:00
|
|
|
VirtualNetworkSettings = collections.namedtuple(
|
|
|
|
'VirtualNetworkSettings', [
|
2017-09-29 04:39:15 +03:00
|
|
|
'arm_subnet_id', 'name', 'resource_group', 'address_space',
|
|
|
|
'subnet_name', 'subnet_address_prefix', 'existing_ok',
|
|
|
|
'create_nonexistant',
|
2017-03-05 11:52:37 +03:00
|
|
|
]
|
|
|
|
)
|
2017-03-30 23:18:47 +03:00
|
|
|
SambaAccountSettings = collections.namedtuple(
|
|
|
|
'SambaAccountSettings', [
|
|
|
|
'username', 'password', 'uid', 'gid',
|
|
|
|
]
|
|
|
|
)
|
|
|
|
SambaSettings = collections.namedtuple(
|
|
|
|
'SambaSettings', [
|
|
|
|
'share_name', 'account', 'read_only', 'create_mask',
|
|
|
|
'directory_mask',
|
|
|
|
]
|
|
|
|
)
|
2017-03-07 01:56:13 +03:00
|
|
|
FileServerSettings = collections.namedtuple(
|
|
|
|
'FileServerSettings', [
|
2017-03-30 23:18:47 +03:00
|
|
|
'type', 'mountpoint', 'mount_options', 'server_options', 'samba',
|
2017-03-07 01:56:13 +03:00
|
|
|
]
|
|
|
|
)
|
2017-03-08 09:27:53 +03:00
|
|
|
InboundNetworkSecurityRule = collections.namedtuple(
|
|
|
|
'InboundNetworkSecurityRule', [
|
|
|
|
'destination_port_range', 'source_address_prefix', 'protocol',
|
|
|
|
]
|
|
|
|
)
|
2017-03-05 11:52:37 +03:00
|
|
|
NetworkSecuritySettings = collections.namedtuple(
|
|
|
|
'NetworkSecuritySettings', [
|
2017-03-08 09:27:53 +03:00
|
|
|
'inbound',
|
2017-03-03 21:28:10 +03:00
|
|
|
]
|
|
|
|
)
|
|
|
|
MappedVmDiskSettings = collections.namedtuple(
|
|
|
|
'MappedVmDiskSettings', [
|
2017-03-11 02:10:31 +03:00
|
|
|
'disk_array', 'filesystem', 'raid_level',
|
2017-03-03 21:28:10 +03:00
|
|
|
]
|
|
|
|
)
|
2017-03-29 05:58:55 +03:00
|
|
|
PublicIpSettings = collections.namedtuple(
|
|
|
|
'PublicIpSettings', [
|
|
|
|
'enabled', 'static',
|
|
|
|
]
|
|
|
|
)
|
2017-03-03 21:28:10 +03:00
|
|
|
RemoteFsSettings = collections.namedtuple(
|
|
|
|
'RemoteFsSettings', [
|
2018-05-01 01:06:52 +03:00
|
|
|
'managed_disks', 'storage_cluster',
|
|
|
|
]
|
|
|
|
)
|
2018-05-30 01:02:32 +03:00
|
|
|
PrometheusMonitoringSettings = collections.namedtuple(
|
|
|
|
'PrometheusMonitoringSettings', [
|
|
|
|
'port', 'scrape_interval',
|
|
|
|
]
|
|
|
|
)
|
|
|
|
GrafanaMonitoringSettings = collections.namedtuple(
|
|
|
|
'GrafanaMonitoringSettings', [
|
2018-06-05 22:00:22 +03:00
|
|
|
'admin_user', 'admin_password', 'additional_dashboards',
|
2018-05-30 01:02:32 +03:00
|
|
|
]
|
|
|
|
)
|
|
|
|
MonitoringServicesSettings = collections.namedtuple(
|
|
|
|
'MonitoringServicesSettings', [
|
|
|
|
'resource_polling_interval', 'lets_encrypt_enabled',
|
|
|
|
'lets_encrypt_staging', 'prometheus', 'grafana',
|
|
|
|
]
|
|
|
|
)
|
2018-05-01 01:06:52 +03:00
|
|
|
MonitoringVmSettings = collections.namedtuple(
|
|
|
|
'MonitoringVmSettings', [
|
|
|
|
'resource_group', 'virtual_network', 'network_security',
|
|
|
|
'vm_size', 'public_ip', 'hostname_prefix', 'ssh',
|
|
|
|
'accelerated_networking',
|
2017-03-03 21:28:10 +03:00
|
|
|
]
|
|
|
|
)
|
2018-06-25 17:49:45 +03:00
|
|
|
FederationProxyOptionsSettings = collections.namedtuple(
|
|
|
|
'FederationProxyOptionsSettings', [
|
|
|
|
'federations_polling_interval', 'actions_polling_interval',
|
|
|
|
'log_persistence', 'log_level', 'log_filename',
|
|
|
|
'scheduling_after_success_blackout_interval',
|
|
|
|
'scheduling_after_success_evaluate_autoscale',
|
2018-02-12 21:34:33 +03:00
|
|
|
]
|
|
|
|
)
|
2019-01-15 20:56:03 +03:00
|
|
|
SlurmBatchPoolSettings = collections.namedtuple(
|
|
|
|
'SlurmBatchPoolSettings', [
|
|
|
|
'batch_service_url', 'compute_node_type', 'max_compute_nodes',
|
|
|
|
'weight', 'features', 'reclaim_exclude_num_nodes',
|
|
|
|
]
|
|
|
|
)
|
|
|
|
SlurmPartitionSettings = collections.namedtuple(
|
|
|
|
'SlurmPartitionSettings', [
|
2019-04-03 01:59:36 +03:00
|
|
|
'batch_pools', 'max_runtime_limit', 'default', 'preempt_type',
|
|
|
|
'preempt_mode', 'over_subscribe', 'priority_tier', 'other_options',
|
2019-01-15 20:56:03 +03:00
|
|
|
]
|
|
|
|
)
|
|
|
|
SlurmUnmanagedPartitionSettings = collections.namedtuple(
|
|
|
|
'SlurmUnmanagedPartitionSettings', [
|
|
|
|
'partition', 'nodes',
|
|
|
|
]
|
|
|
|
)
|
|
|
|
SlurmOptionsSettings = collections.namedtuple(
|
|
|
|
'SlurmOptionsSettings', [
|
|
|
|
'cluster_id', 'idle_reclaim_time', 'max_nodes', 'elastic_partitions',
|
|
|
|
'unmanaged_partitions',
|
|
|
|
]
|
|
|
|
)
|
|
|
|
SlurmSharedDataVolumesSettings = collections.namedtuple(
|
|
|
|
'SlurmSharedDataVolumesSettings', [
|
|
|
|
'id', 'host_mount_path', 'store_slurmctld_state',
|
|
|
|
]
|
|
|
|
)
|
|
|
|
SlurmCredentialsSettings = collections.namedtuple(
|
|
|
|
'SlurmCredentialsSettings', [
|
|
|
|
'db_password',
|
|
|
|
]
|
|
|
|
)
|
2019-11-06 07:14:40 +03:00
|
|
|
SingularityImageSettings = collections.namedtuple(
|
|
|
|
'SingularityImageSettings', [
|
|
|
|
'image', 'key_fingerprint', 'key_file',
|
|
|
|
'encryption_certificate_sha1_thumbprint'
|
2019-06-05 21:14:37 +03:00
|
|
|
]
|
|
|
|
)
|
2017-03-03 21:28:10 +03:00
|
|
|
|
|
|
|
|
2018-05-01 01:06:52 +03:00
|
|
|
class VmResource(object):
|
|
|
|
def __init__(
|
|
|
|
self, location, resource_group, hostname_prefix, vm_size,
|
|
|
|
public_ip, virtual_network, network_security, ssh,
|
2018-11-06 01:42:39 +03:00
|
|
|
accelerated_networking, zone):
|
2018-05-01 01:06:52 +03:00
|
|
|
# type: (VmResource, str, str, str, str, PublicIpSettings,
|
2018-06-12 00:27:26 +03:00
|
|
|
# VirtualNetworkSettings, NetworkSecuritySettings, SSHSettings,
|
2018-11-06 01:42:39 +03:00
|
|
|
# bool, int) -> None
|
2018-06-25 17:49:45 +03:00
|
|
|
if location is None or ' ' in location:
|
|
|
|
raise ValueError('invalid location specified')
|
2018-05-01 01:06:52 +03:00
|
|
|
self.location = location
|
|
|
|
self.resource_group = resource_group
|
|
|
|
self.hostname_prefix = hostname_prefix
|
|
|
|
self.vm_size = vm_size
|
|
|
|
self.public_ip = public_ip
|
|
|
|
self.virtual_network = virtual_network
|
|
|
|
self.network_security = network_security
|
|
|
|
self.ssh = ssh
|
|
|
|
self.accelerated_networking = accelerated_networking
|
2018-11-06 01:42:39 +03:00
|
|
|
self.zone = zone
|
2018-05-01 01:06:52 +03:00
|
|
|
|
|
|
|
|
|
|
|
class StorageClusterSettings(VmResource):
|
|
|
|
def __init__(
|
|
|
|
self, id, file_server, vm_count, fault_domains, vm_disk_map,
|
|
|
|
location, resource_group, hostname_prefix, vm_size,
|
|
|
|
public_ip, virtual_network, network_security, ssh,
|
2018-11-06 01:42:39 +03:00
|
|
|
accelerated_networking, zone, prometheus):
|
2018-05-01 01:06:52 +03:00
|
|
|
# type: (StorageClusterSettings, str, FileServerSettings, int, int,
|
|
|
|
# Dict, str, str, str, str, PublicIpSettings,
|
2018-06-12 00:27:26 +03:00
|
|
|
# VirtualNetworkSettings, NetworkSecuritySettings, SSHSettings,
|
2018-11-06 01:42:39 +03:00
|
|
|
# bool, int, PrometheusSettings) -> None
|
2018-05-01 01:06:52 +03:00
|
|
|
super(StorageClusterSettings, self).__init__(
|
|
|
|
location, resource_group, hostname_prefix, vm_size, public_ip,
|
2018-11-06 01:42:39 +03:00
|
|
|
virtual_network, network_security, ssh, accelerated_networking,
|
|
|
|
zone)
|
2018-05-01 01:06:52 +03:00
|
|
|
self.id = id
|
|
|
|
self.file_server = file_server
|
|
|
|
self.vm_count = vm_count
|
|
|
|
self.fault_domains = fault_domains
|
|
|
|
self.vm_disk_map = vm_disk_map
|
2018-06-05 22:00:22 +03:00
|
|
|
self.prometheus = prometheus
|
2018-05-01 01:06:52 +03:00
|
|
|
|
|
|
|
|
2017-03-03 21:28:10 +03:00
|
|
|
def _kv_read_checked(conf, key, default=None):
|
|
|
|
# type: (dict, str, obj) -> obj
|
|
|
|
"""Read a key as some value with a check against None and length
|
|
|
|
:param dict conf: configuration dict
|
|
|
|
:param str key: conf key
|
|
|
|
:param obj default: default to assign
|
|
|
|
:rtype: obj or None
|
|
|
|
:return: value of key
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
ret = conf[key]
|
|
|
|
if util.is_none_or_empty(ret):
|
|
|
|
raise KeyError()
|
|
|
|
except KeyError:
|
|
|
|
ret = default
|
|
|
|
return ret
|
|
|
|
|
|
|
|
|
|
|
|
def _kv_read(conf, key, default=None):
|
2017-04-28 20:56:03 +03:00
|
|
|
# type: (dict, str, obj) -> obj
|
2017-03-03 21:28:10 +03:00
|
|
|
"""Read a key as some value
|
|
|
|
:param dict conf: configuration dict
|
|
|
|
:param str key: conf key
|
|
|
|
:param obj default: default to assign
|
|
|
|
:rtype: obj or None
|
|
|
|
:return: value of key
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
ret = conf[key]
|
|
|
|
except KeyError:
|
|
|
|
ret = default
|
|
|
|
return ret
|
2016-11-13 09:13:55 +03:00
|
|
|
|
|
|
|
|
2017-06-26 23:20:49 +03:00
|
|
|
def get_metadata_version_name():
|
|
|
|
# type: (None) -> str
|
|
|
|
"""Get metadata version name
|
|
|
|
:rtype: str
|
|
|
|
:return: metadata version name
|
|
|
|
"""
|
|
|
|
return _METADATA_VERSION_NAME
|
|
|
|
|
|
|
|
|
2019-06-25 01:06:20 +03:00
|
|
|
def get_valid_publishers():
|
|
|
|
# type: (None) -> str
|
|
|
|
"""Get valid publishers
|
|
|
|
:rtype: str
|
|
|
|
:return: publisher set
|
|
|
|
"""
|
|
|
|
return _VALID_PUBLISHERS
|
|
|
|
|
|
|
|
|
2017-04-28 20:56:03 +03:00
|
|
|
def get_tensorboard_docker_image():
|
|
|
|
# type: (None) -> Tuple[str, str]
|
|
|
|
"""Get tensorboard docker image
|
|
|
|
:rtype: tuple
|
2017-04-30 08:16:35 +03:00
|
|
|
:return: (tensorboard docker image,
|
|
|
|
absolute path to tensorboard.py, container port)
|
2017-04-28 20:56:03 +03:00
|
|
|
"""
|
|
|
|
return _TENSORBOARD_DOCKER_IMAGE
|
|
|
|
|
|
|
|
|
2017-03-12 06:15:14 +03:00
|
|
|
def get_gluster_default_volume_name():
|
2016-11-13 09:13:55 +03:00
|
|
|
# type: (None) -> str
|
2017-03-12 06:15:14 +03:00
|
|
|
"""Get gluster default volume name
|
2016-11-13 09:13:55 +03:00
|
|
|
:rtype: str
|
2017-03-12 06:15:14 +03:00
|
|
|
:return: gluster default volume name
|
2016-11-13 09:13:55 +03:00
|
|
|
"""
|
2017-03-12 06:15:14 +03:00
|
|
|
return _GLUSTER_DEFAULT_VOLNAME
|
|
|
|
|
|
|
|
|
|
|
|
def get_gluster_on_compute_volume():
|
|
|
|
# type: (None) -> str
|
|
|
|
"""Get gluster on compute volume mount suffix
|
|
|
|
:rtype: str
|
|
|
|
:return: gluster on compute volume mount
|
|
|
|
"""
|
|
|
|
return _GLUSTER_ON_COMPUTE_VOLUME
|
2016-11-12 02:11:13 +03:00
|
|
|
|
|
|
|
|
2017-11-05 21:38:22 +03:00
|
|
|
def get_host_mounts_path(is_windows):
|
|
|
|
# type: (bool) -> str
|
2017-10-05 03:59:30 +03:00
|
|
|
"""Get host mounts path
|
2017-11-05 21:38:22 +03:00
|
|
|
:param bool is_windows: is windows pool
|
2017-10-05 03:59:30 +03:00
|
|
|
:rtype: str
|
|
|
|
:return: host mounts dir
|
|
|
|
"""
|
2017-11-05 21:38:22 +03:00
|
|
|
return _HOST_MOUNTS_DIR_WINDOWS if is_windows else _HOST_MOUNTS_DIR
|
2017-10-05 03:59:30 +03:00
|
|
|
|
|
|
|
|
2017-11-01 23:27:14 +03:00
|
|
|
def get_singularity_tmpdir(config):
|
|
|
|
# type: (dict) -> str
|
|
|
|
"""Get Singularity tmpdir var
|
|
|
|
:param dict config: configuration dict
|
|
|
|
:rtype: str
|
|
|
|
:return: singularity tmpdir
|
|
|
|
"""
|
2017-11-03 09:24:45 +03:00
|
|
|
if is_windows_pool(config):
|
|
|
|
sep = '\\'
|
|
|
|
else:
|
|
|
|
sep = '/'
|
|
|
|
return sep.join((temp_disk_mountpoint(config), 'singularity', 'tmp'))
|
2017-11-01 23:27:14 +03:00
|
|
|
|
|
|
|
|
|
|
|
def get_singularity_cachedir(config):
|
|
|
|
# type: (dict) -> str
|
|
|
|
"""Get Singularity cachedir var
|
|
|
|
:param dict config: configuration dict
|
|
|
|
:rtype: str
|
|
|
|
:return: singularity cachedir
|
|
|
|
"""
|
2017-11-03 09:24:45 +03:00
|
|
|
if is_windows_pool(config):
|
|
|
|
sep = '\\'
|
|
|
|
else:
|
|
|
|
sep = '/'
|
|
|
|
return sep.join((temp_disk_mountpoint(config), 'singularity', 'cache'))
|
2017-11-01 23:27:14 +03:00
|
|
|
|
|
|
|
|
2019-06-05 21:14:37 +03:00
|
|
|
def get_singularity_sypgpdir(config):
|
|
|
|
# type: (dict) -> str
|
|
|
|
"""Get Singularity sypgpdir var
|
|
|
|
:param dict config: configuration dict
|
|
|
|
:rtype: str
|
|
|
|
:return: singularity sypgpdir
|
|
|
|
"""
|
|
|
|
if is_windows_pool(config):
|
|
|
|
sep = '\\'
|
|
|
|
else:
|
|
|
|
sep = '/'
|
|
|
|
return sep.join((temp_disk_mountpoint(config), 'singularity', 'sypgp'))
|
|
|
|
|
|
|
|
|
2016-11-12 02:11:13 +03:00
|
|
|
def can_tune_tcp(vm_size):
|
|
|
|
# type: (str) -> bool
|
|
|
|
"""Check if TCP tuning on compute node should be performed
|
|
|
|
:param str vm_size: vm size
|
|
|
|
:rtype: bool
|
|
|
|
:return: True if VM should be tuned
|
|
|
|
"""
|
|
|
|
if vm_size.lower() in _VM_TCP_NO_TUNE:
|
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
|
|
|
def is_gpu_pool(vm_size):
|
|
|
|
# type: (str) -> bool
|
|
|
|
"""Check if pool is GPU capable
|
|
|
|
:param str vm_size: vm size
|
|
|
|
:rtype: bool
|
|
|
|
:return: if gpus are present
|
|
|
|
"""
|
2018-10-31 00:22:27 +03:00
|
|
|
return (
|
|
|
|
is_gpu_compute_pool(vm_size) or is_gpu_visualization_pool(vm_size)
|
|
|
|
)
|
2016-11-12 02:11:13 +03:00
|
|
|
|
|
|
|
|
2016-11-20 12:55:35 +03:00
|
|
|
def is_gpu_compute_pool(vm_size):
|
|
|
|
# type: (str) -> bool
|
|
|
|
"""Check if pool is for GPU compute
|
|
|
|
:param str vm_size: vm size
|
|
|
|
:rtype: bool
|
|
|
|
:return: if compute gpus are present
|
|
|
|
"""
|
2018-11-29 20:39:34 +03:00
|
|
|
return _GPU_COMPUTE_INSTANCES.match(vm_size) is not None
|
2016-11-20 12:55:35 +03:00
|
|
|
|
|
|
|
|
2016-11-12 02:11:13 +03:00
|
|
|
def is_gpu_visualization_pool(vm_size):
|
|
|
|
# type: (str) -> bool
|
|
|
|
"""Check if pool is for GPU visualization
|
|
|
|
:param str vm_size: vm size
|
|
|
|
:rtype: bool
|
|
|
|
:return: if visualization gpus are present
|
|
|
|
"""
|
2018-11-29 20:39:34 +03:00
|
|
|
return _GPU_VISUALIZATION_INSTANCES.match(vm_size) is not None
|
2016-11-12 02:11:13 +03:00
|
|
|
|
|
|
|
|
2017-06-30 20:20:19 +03:00
|
|
|
def get_gpu_type_from_vm_size(vm_size):
|
|
|
|
# type: (str) -> str
|
|
|
|
"""Get GPU type as string
|
|
|
|
:param str vm_size: vm size
|
|
|
|
:rtype: str
|
2018-03-22 00:39:58 +03:00
|
|
|
:return: type of gpu and compute capability
|
2017-06-30 20:20:19 +03:00
|
|
|
"""
|
|
|
|
if is_gpu_compute_pool(vm_size):
|
2018-10-31 00:22:27 +03:00
|
|
|
if _GPU_CC37_INSTANCES.match(vm_size):
|
2018-03-22 00:39:58 +03:00
|
|
|
return 'compute_cc37'
|
|
|
|
else:
|
|
|
|
return 'compute_cc6-7'
|
2017-06-30 20:20:19 +03:00
|
|
|
elif is_gpu_visualization_pool(vm_size):
|
2018-03-22 00:39:58 +03:00
|
|
|
return 'viz_cc52'
|
2017-06-30 20:20:19 +03:00
|
|
|
else:
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
2019-01-15 20:56:03 +03:00
|
|
|
def get_num_gpus_from_vm_size(vm_size):
|
|
|
|
# type: (str) -> int
|
|
|
|
"""Get number of GPUs from VM size
|
|
|
|
:param str vm_size: vm size
|
|
|
|
:rtype: int
|
|
|
|
:return: number of GPUs
|
|
|
|
"""
|
|
|
|
for vm in _VM_GPU_COUNT:
|
|
|
|
if _VM_GPU_COUNT[vm].match(vm_size):
|
|
|
|
return vm
|
|
|
|
raise RuntimeError('vm_size {} has no mapping to number of GPUs'.format(
|
|
|
|
vm_size))
|
|
|
|
|
|
|
|
|
|
|
|
def get_gpu_class_from_vm_size(vm_size):
|
|
|
|
# type: (str) -> str
|
|
|
|
"""Get GPU class from VM size
|
|
|
|
:param str vm_size: vm size
|
|
|
|
:rtype: str
|
|
|
|
:return: GPU class
|
|
|
|
"""
|
|
|
|
for c in _VM_GPU_CLASS:
|
|
|
|
if _VM_GPU_CLASS[c].match(vm_size):
|
|
|
|
return c
|
|
|
|
raise RuntimeError('vm_size {} has no mapping to GPU class'.format(
|
|
|
|
vm_size))
|
|
|
|
|
|
|
|
|
2017-07-06 21:12:05 +03:00
|
|
|
def gpu_configuration_check(config, vm_size=None):
|
|
|
|
# type: (dict, str) -> bool
|
|
|
|
"""Check if OS is allowed with a GPU VM
|
|
|
|
:param dict config: configuration dict
|
|
|
|
:param str vm_size: vm size
|
|
|
|
:rtype: bool
|
|
|
|
:return: if configuration is allowed
|
|
|
|
"""
|
|
|
|
# if this is not a gpu sku, always allow
|
|
|
|
if util.is_none_or_empty(vm_size):
|
2018-06-11 23:00:17 +03:00
|
|
|
vm_size = _pool_vm_size(config)
|
2017-07-06 21:12:05 +03:00
|
|
|
if not is_gpu_pool(vm_size):
|
|
|
|
return True
|
|
|
|
# always allow gpu with custom images
|
|
|
|
node_agent = pool_custom_image_node_agent(config)
|
|
|
|
if util.is_not_empty(node_agent):
|
|
|
|
return True
|
|
|
|
# check for platform image support
|
|
|
|
publisher = pool_publisher(config, lower=True)
|
|
|
|
offer = pool_offer(config, lower=True)
|
|
|
|
sku = pool_sku(config, lower=True)
|
2017-09-19 23:37:21 +03:00
|
|
|
if publisher == 'microsoft-azure-batch':
|
|
|
|
return True
|
|
|
|
elif (publisher == 'canonical' and offer == 'ubuntuserver' and
|
2017-07-06 21:12:05 +03:00
|
|
|
sku > '16.04'):
|
|
|
|
return True
|
2018-02-15 01:20:27 +03:00
|
|
|
elif publisher == 'openlogic':
|
2019-08-09 18:41:19 +03:00
|
|
|
if offer == 'centos-hpc' and sku >= '7.3':
|
2018-02-15 01:20:27 +03:00
|
|
|
return True
|
2018-05-22 00:29:02 +03:00
|
|
|
elif offer == 'centos' and sku >= '7.3':
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
|
|
def is_lis_install_required(config, vm_size=None):
|
|
|
|
# type: (dict, str) -> bool
|
|
|
|
"""Check if the pool requires installing LIS
|
|
|
|
:param dict config: configuration dict
|
|
|
|
:param str vm_size: vm size
|
|
|
|
:rtype: bool
|
|
|
|
:return: if pool requires lis install
|
|
|
|
"""
|
|
|
|
# LIS is linux only
|
|
|
|
if is_windows_pool(config):
|
|
|
|
return False
|
|
|
|
if util.is_none_or_empty(vm_size):
|
2018-06-11 23:00:17 +03:00
|
|
|
vm_size = _pool_vm_size(config)
|
2018-05-22 00:29:02 +03:00
|
|
|
# currently lis is only required for GPU pool setup for certain distros
|
|
|
|
if is_gpu_pool(vm_size):
|
|
|
|
publisher = pool_publisher(config, lower=True)
|
|
|
|
offer = pool_offer(config, lower=True)
|
|
|
|
sku = pool_sku(config, lower=True)
|
|
|
|
if publisher == 'openlogic' and offer == 'centos' and sku > '7.3':
|
2018-02-15 01:20:27 +03:00
|
|
|
return True
|
|
|
|
return False
|
2017-07-06 21:12:05 +03:00
|
|
|
|
|
|
|
|
2017-09-19 23:37:21 +03:00
|
|
|
def is_native_docker_pool(config, vm_config=None):
|
|
|
|
# type: (dict, any) -> bool
|
|
|
|
"""Check if vm configuration has native docker support
|
|
|
|
:param dict config: configuration dict
|
|
|
|
:param any vm_config: vm configuration
|
|
|
|
:rtype: bool
|
|
|
|
:return: if vm configuration has native docker support
|
|
|
|
"""
|
|
|
|
if vm_config is None:
|
|
|
|
vm_config = _populate_pool_vm_configuration(config)
|
2017-09-27 19:24:43 +03:00
|
|
|
return vm_config.native
|
2017-09-19 23:37:21 +03:00
|
|
|
|
|
|
|
|
2017-11-01 23:27:14 +03:00
|
|
|
def is_windows_pool(config, vm_config=None):
|
|
|
|
# type: (dict, any) -> bool
|
|
|
|
"""Check if pool is Windows
|
|
|
|
:param dict config: configuration dict
|
|
|
|
:param any vm_config: vm configuration
|
|
|
|
:rtype: bool
|
|
|
|
:return: pool is Windows
|
|
|
|
"""
|
|
|
|
if vm_config is None:
|
|
|
|
vm_config = _populate_pool_vm_configuration(config)
|
|
|
|
if is_platform_image(config, vm_config=vm_config):
|
2018-02-17 01:35:17 +03:00
|
|
|
return vm_config.publisher == 'microsoftwindowsserver'
|
2017-11-01 23:27:14 +03:00
|
|
|
else:
|
|
|
|
return vm_config.node_agent.startswith('batch.node.windows')
|
|
|
|
|
|
|
|
|
2019-06-27 23:08:11 +03:00
|
|
|
def is_sriov_rdma_pool(vm_size):
|
|
|
|
# type: (str) -> bool
|
|
|
|
"""Check if pool is SRIOV IB/RDMA capable
|
|
|
|
:param str vm_size: vm size
|
|
|
|
:rtype: bool
|
|
|
|
:return: if sriov rdma is present
|
|
|
|
"""
|
2019-11-16 02:22:11 +03:00
|
|
|
return (
|
|
|
|
_SRIOV_RDMA_INSTANCES.match(vm_size) is not None or
|
|
|
|
_SRIOV_RDMA_TRANSITION_INSTANCES.match(vm_size) is not None
|
|
|
|
)
|
2019-06-27 23:08:11 +03:00
|
|
|
|
|
|
|
|
|
|
|
def is_networkdirect_rdma_pool(vm_size):
|
|
|
|
# type: (str) -> bool
|
|
|
|
"""Check if pool is NetworkDirect IB/RDMA capable
|
|
|
|
:param str vm_size: vm size
|
|
|
|
:rtype: bool
|
|
|
|
:return: if network direct rdma is present
|
|
|
|
"""
|
2019-11-16 02:22:11 +03:00
|
|
|
return (
|
|
|
|
_NETWORKDIRECT_RDMA_INSTANCES.match(vm_size) is not None and
|
|
|
|
_SRIOV_RDMA_TRANSITION_INSTANCES.match(vm_size) is None
|
|
|
|
)
|
2019-06-27 23:08:11 +03:00
|
|
|
|
|
|
|
|
2016-11-12 02:11:13 +03:00
|
|
|
def is_rdma_pool(vm_size):
|
|
|
|
# type: (str) -> bool
|
|
|
|
"""Check if pool is IB/RDMA capable
|
|
|
|
:param str vm_size: vm size
|
|
|
|
:rtype: bool
|
|
|
|
:return: if rdma is present
|
|
|
|
"""
|
2019-06-27 23:08:11 +03:00
|
|
|
return is_sriov_rdma_pool(vm_size) or is_networkdirect_rdma_pool(vm_size)
|
2016-11-12 02:11:13 +03:00
|
|
|
|
|
|
|
|
2019-01-15 20:56:03 +03:00
|
|
|
def get_ib_class_from_vm_size(vm_size):
|
|
|
|
# type: (str) -> str
|
|
|
|
"""Get IB class from VM size
|
|
|
|
:param str vm_size: vm size
|
|
|
|
:rtype: str
|
|
|
|
:return: IB class
|
|
|
|
"""
|
|
|
|
for c in _VM_IB_CLASS:
|
|
|
|
if _VM_IB_CLASS[c].match(vm_size):
|
|
|
|
return c
|
|
|
|
raise RuntimeError('vm_size {} has no mapping to IB class'.format(
|
|
|
|
vm_size))
|
|
|
|
|
|
|
|
|
2017-03-13 12:10:08 +03:00
|
|
|
def is_premium_storage_vm_size(vm_size):
|
|
|
|
# type: (str) -> bool
|
|
|
|
"""Check if vm size is premium storage compatible
|
|
|
|
:pararm str vm_size: vm size
|
|
|
|
:rtype: bool
|
|
|
|
:return: if vm size is premium storage compatible
|
|
|
|
"""
|
2018-11-29 20:39:34 +03:00
|
|
|
return _PREMIUM_STORAGE_INSTANCES.match(vm_size) is not None
|
2018-10-31 00:22:27 +03:00
|
|
|
|
|
|
|
|
|
|
|
def is_nested_virtualization_capable(vm_size):
|
|
|
|
# type: (str) -> bool
|
|
|
|
"""Check if vm size is nested virtualization capable
|
|
|
|
:pararm str vm_size: vm size
|
|
|
|
:rtype: bool
|
|
|
|
:return: if vm size is nested virtualization capable
|
|
|
|
"""
|
2018-11-29 20:39:34 +03:00
|
|
|
return _NESTED_VIRTUALIZATION_INSTANCES.match(vm_size) is not None
|
2017-03-13 12:10:08 +03:00
|
|
|
|
|
|
|
|
2017-10-04 18:59:03 +03:00
|
|
|
def is_platform_image(config, vm_config=None):
|
|
|
|
# type (dict, PoolVmConfiguration) -> bool
|
|
|
|
"""If pool is on a platform image
|
|
|
|
:param dict config: configuration object
|
|
|
|
:param bool vm_config: vm configuration
|
|
|
|
:rtype: bool
|
|
|
|
:return: if on platform image
|
|
|
|
"""
|
|
|
|
if vm_config is None:
|
|
|
|
vm_config = _populate_pool_vm_configuration(config)
|
|
|
|
return isinstance(vm_config, PoolVmPlatformImageSettings)
|
|
|
|
|
|
|
|
|
2016-11-12 23:35:56 +03:00
|
|
|
def temp_disk_mountpoint(config, offer=None):
|
2016-11-12 10:19:28 +03:00
|
|
|
# type: (dict) -> str
|
|
|
|
"""Get temporary disk mountpoint
|
|
|
|
:param dict config: configuration object
|
2016-11-12 23:35:56 +03:00
|
|
|
:param str offer: offer override
|
2016-11-12 10:19:28 +03:00
|
|
|
:rtype: str
|
|
|
|
:return: temporary disk mount point
|
|
|
|
"""
|
2016-11-12 23:35:56 +03:00
|
|
|
if offer is None:
|
2017-06-06 18:29:44 +03:00
|
|
|
vmconfig = _populate_pool_vm_configuration(config)
|
2017-10-04 18:59:03 +03:00
|
|
|
if is_platform_image(config, vm_config=vmconfig):
|
2017-06-06 18:29:44 +03:00
|
|
|
offer = pool_offer(config, lower=True)
|
|
|
|
else:
|
|
|
|
if vmconfig.node_agent.lower().startswith('batch.node.ubuntu'):
|
2017-09-19 23:37:21 +03:00
|
|
|
offer = 'ubuntu'
|
2017-11-01 23:27:14 +03:00
|
|
|
elif vmconfig.node_agent.lower().startswith('batch.node.windows'):
|
|
|
|
offer = 'windowsserver'
|
2017-06-06 18:29:44 +03:00
|
|
|
else:
|
2017-10-30 20:26:22 +03:00
|
|
|
offer = '!ubuntu'
|
2016-11-12 23:35:56 +03:00
|
|
|
else:
|
|
|
|
offer = offer.lower()
|
2017-09-19 23:37:21 +03:00
|
|
|
if offer.startswith('ubuntu'):
|
2016-11-12 10:19:28 +03:00
|
|
|
return '/mnt'
|
2017-11-01 23:27:14 +03:00
|
|
|
elif offer.startswith('windows'):
|
|
|
|
return 'D:\\batch'
|
2016-11-12 10:19:28 +03:00
|
|
|
else:
|
|
|
|
return '/mnt/resource'
|
|
|
|
|
|
|
|
|
2016-11-13 09:13:55 +03:00
|
|
|
def verbose(config):
|
|
|
|
# type: (dict) -> bool
|
|
|
|
"""Get verbose setting
|
|
|
|
:param dict config: configuration object
|
|
|
|
:rtype: bool
|
|
|
|
:return: verbose setting
|
|
|
|
"""
|
|
|
|
return config['_verbose']
|
|
|
|
|
|
|
|
|
2018-04-17 01:03:08 +03:00
|
|
|
def raw(config):
|
|
|
|
# type: (dict) -> bool
|
|
|
|
"""Get raw setting
|
|
|
|
:param dict config: configuration object
|
|
|
|
:rtype: bool
|
|
|
|
:return: raw setting
|
|
|
|
"""
|
|
|
|
return config['_raw']
|
|
|
|
|
|
|
|
|
2019-02-07 20:43:31 +03:00
|
|
|
def get_auto_confirm(config):
|
|
|
|
# type: (dict) -> bool
|
|
|
|
"""Get autoconfirm setting
|
|
|
|
:param dict config: configuration object
|
|
|
|
:rtype: bool
|
|
|
|
:return: auto confirm setting
|
|
|
|
"""
|
|
|
|
return config['_auto_confirm']
|
|
|
|
|
|
|
|
|
2016-11-13 09:13:55 +03:00
|
|
|
def set_auto_confirm(config, flag):
|
|
|
|
# type: (dict, bool) -> None
|
|
|
|
"""Set autoconfirm setting
|
|
|
|
:param dict config: configuration object
|
|
|
|
:param bool flag: flag to set
|
|
|
|
"""
|
|
|
|
config['_auto_confirm'] = flag
|
|
|
|
|
|
|
|
|
2016-11-12 02:11:13 +03:00
|
|
|
# POOL CONFIG
|
|
|
|
def pool_specification(config):
|
|
|
|
# type: (dict) -> dict
|
|
|
|
"""Get Pool specification config block
|
|
|
|
:param dict config: configuration object
|
|
|
|
:rtype: dict
|
|
|
|
:return: pool specification
|
|
|
|
"""
|
|
|
|
return config['pool_specification']
|
|
|
|
|
|
|
|
|
2017-07-20 01:34:46 +03:00
|
|
|
def _pool_vm_count(config, conf=None):
|
|
|
|
# type: (dict, dict) -> PoolVmCountSettings
|
2017-05-13 00:40:06 +03:00
|
|
|
"""Get Pool vm count settings
|
|
|
|
:param dict config: configuration object
|
2017-07-20 01:34:46 +03:00
|
|
|
:param dict conf: vm_count object
|
2017-05-13 00:40:06 +03:00
|
|
|
:rtype: PoolVmCountSettings
|
|
|
|
:return: pool vm count settings
|
|
|
|
"""
|
2017-07-20 01:34:46 +03:00
|
|
|
if conf is None:
|
|
|
|
conf = pool_specification(config)['vm_count']
|
2017-05-13 00:40:06 +03:00
|
|
|
return PoolVmCountSettings(
|
|
|
|
dedicated=_kv_read(conf, 'dedicated', 0),
|
|
|
|
low_priority=_kv_read(conf, 'low_priority', 0),
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2018-06-11 23:00:17 +03:00
|
|
|
def _pool_vm_size(config):
|
|
|
|
# type: (dict) -> str
|
|
|
|
"""Get Pool VM size
|
|
|
|
:param dict config: configuration object
|
|
|
|
:rtype: str
|
|
|
|
:return: Pool VM Size
|
|
|
|
"""
|
|
|
|
return config['pool_specification']['vm_size'].lower()
|
|
|
|
|
|
|
|
|
2017-06-06 18:29:44 +03:00
|
|
|
def pool_vm_configuration(config, key):
|
|
|
|
# type: (dict, str) -> dict
|
|
|
|
"""Get Pool VM configuration
|
|
|
|
:param dict config: configuration object
|
|
|
|
:param str key: vm config key
|
|
|
|
:rtype: str
|
|
|
|
:return: pool vm config
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
conf = _kv_read_checked(
|
|
|
|
config['pool_specification']['vm_configuration'], key)
|
|
|
|
except KeyError:
|
|
|
|
conf = None
|
|
|
|
if conf is None:
|
|
|
|
return config['pool_specification']
|
|
|
|
else:
|
|
|
|
return conf
|
|
|
|
|
|
|
|
|
|
|
|
def _populate_pool_vm_configuration(config):
|
|
|
|
# type: (dict) -> dict
|
|
|
|
"""Populate Pool VM configuration
|
|
|
|
:param dict config: configuration object
|
|
|
|
:rtype: PoolVmPlatformImageSettings or PoolVmCustomImageSettings
|
|
|
|
:return: pool vm config
|
|
|
|
"""
|
|
|
|
conf = pool_vm_configuration(config, 'platform_image')
|
|
|
|
if 'publisher' in conf:
|
2017-11-01 23:27:14 +03:00
|
|
|
publisher = conf['publisher'].lower()
|
|
|
|
offer = conf['offer'].lower()
|
|
|
|
sku = str(conf['sku']).lower()
|
|
|
|
# auto convert windows native if detected
|
2018-06-29 23:17:22 +03:00
|
|
|
if publisher == 'microsoftwindowsserver':
|
2017-11-01 23:27:14 +03:00
|
|
|
vm_config = PoolVmPlatformImageSettings(
|
|
|
|
publisher=publisher,
|
|
|
|
offer=offer,
|
|
|
|
sku=sku,
|
|
|
|
version=_kv_read_checked(conf, 'version', default='latest'),
|
|
|
|
native=True,
|
2018-02-28 19:38:57 +03:00
|
|
|
license_type=_kv_read_checked(conf, 'license_type'),
|
2017-11-01 23:27:14 +03:00
|
|
|
)
|
2018-06-11 23:00:17 +03:00
|
|
|
elif publisher == 'microsoft-azure-batch':
|
|
|
|
# auto convert linux native if detected
|
|
|
|
vm_config = PoolVmPlatformImageSettings(
|
|
|
|
publisher=publisher,
|
|
|
|
offer=offer,
|
|
|
|
sku=sku,
|
|
|
|
version=_kv_read_checked(conf, 'version', default='latest'),
|
|
|
|
native=True,
|
|
|
|
license_type=None,
|
|
|
|
)
|
2017-11-01 23:27:14 +03:00
|
|
|
else:
|
|
|
|
vm_config = PoolVmPlatformImageSettings(
|
|
|
|
publisher=publisher,
|
|
|
|
offer=offer,
|
|
|
|
sku=sku,
|
|
|
|
version=_kv_read_checked(conf, 'version', default='latest'),
|
|
|
|
native=False,
|
2018-02-28 19:38:57 +03:00
|
|
|
license_type=None,
|
2017-11-01 23:27:14 +03:00
|
|
|
)
|
2017-09-19 23:37:21 +03:00
|
|
|
# auto convert vm config to native if specified
|
2018-06-11 23:00:17 +03:00
|
|
|
if not vm_config.native and _kv_read(conf, 'native', default=False):
|
2019-02-27 01:30:09 +03:00
|
|
|
vm_size = _pool_vm_size(config)
|
2017-09-19 23:37:21 +03:00
|
|
|
if (vm_config.publisher == 'canonical' and
|
|
|
|
vm_config.offer == 'ubuntuserver' and
|
|
|
|
vm_config.sku == '16.04-lts'):
|
2018-07-19 22:50:11 +03:00
|
|
|
vm_config = PoolVmPlatformImageSettings(
|
|
|
|
publisher='microsoft-azure-batch',
|
2019-02-27 01:30:09 +03:00
|
|
|
offer='ubuntu-server-container{}'.format(
|
|
|
|
'-rdma' if is_rdma_pool(vm_size) else ''),
|
|
|
|
sku=vm_config.sku.replace('.', '-'),
|
2018-07-19 22:50:11 +03:00
|
|
|
version='latest',
|
|
|
|
native=True,
|
|
|
|
license_type=None,
|
|
|
|
)
|
2017-09-19 23:37:21 +03:00
|
|
|
elif (vm_config.publisher == 'openlogic' and
|
2019-02-27 01:30:09 +03:00
|
|
|
vm_config.offer.startswith('centos') and
|
|
|
|
(vm_config.sku == '7.4' or vm_config.sku == '7.5' or
|
2019-10-18 19:27:04 +03:00
|
|
|
vm_config.sku == '7.6' or vm_config.sku == '7.7')):
|
2017-09-19 23:37:21 +03:00
|
|
|
vm_config = PoolVmPlatformImageSettings(
|
|
|
|
publisher='microsoft-azure-batch',
|
2019-02-27 01:30:09 +03:00
|
|
|
offer='centos-container{}'.format(
|
|
|
|
'-rdma' if is_rdma_pool(vm_size) else ''),
|
|
|
|
sku=vm_config.sku.replace('.', '-'),
|
2017-09-19 23:37:21 +03:00
|
|
|
version='latest',
|
2017-09-27 19:24:43 +03:00
|
|
|
native=True,
|
2018-02-28 19:38:57 +03:00
|
|
|
license_type=None,
|
2017-09-19 23:37:21 +03:00
|
|
|
)
|
|
|
|
return vm_config
|
2017-06-06 18:29:44 +03:00
|
|
|
else:
|
|
|
|
conf = pool_vm_configuration(config, 'custom_image')
|
2017-11-01 23:27:14 +03:00
|
|
|
node_agent = conf['node_agent'].lower()
|
|
|
|
if node_agent == 'batch.node.windows amd64':
|
|
|
|
native = True
|
2018-02-28 19:38:57 +03:00
|
|
|
license_type = _kv_read_checked(conf, 'license_type')
|
2017-11-01 23:27:14 +03:00
|
|
|
else:
|
|
|
|
native = _kv_read(conf, 'native', default=False)
|
2018-02-28 19:38:57 +03:00
|
|
|
license_type = None
|
2017-06-06 18:29:44 +03:00
|
|
|
return PoolVmCustomImageSettings(
|
2017-09-29 07:29:42 +03:00
|
|
|
arm_image_id=_kv_read_checked(conf, 'arm_image_id'),
|
2017-11-01 23:27:14 +03:00
|
|
|
node_agent=node_agent,
|
|
|
|
native=native,
|
2018-02-28 19:38:57 +03:00
|
|
|
license_type=license_type,
|
2017-06-06 18:29:44 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
|
2017-07-20 01:34:46 +03:00
|
|
|
def pool_autoscale_settings(config):
|
|
|
|
# type: (dict) -> PoolAutoscaleSettings
|
|
|
|
"""Get Pool autoscale settings
|
|
|
|
:param dict config: configuration object
|
|
|
|
:rtype: PoolAutoscaleSettings
|
|
|
|
:return: pool autoscale settings from specification
|
|
|
|
"""
|
|
|
|
conf = pool_specification(config)
|
|
|
|
conf = _kv_read_checked(conf, 'autoscale', {})
|
|
|
|
ei = _kv_read_checked(conf, 'evaluation_interval')
|
|
|
|
if util.is_not_empty(ei):
|
|
|
|
ei = util.convert_string_to_timedelta(ei)
|
|
|
|
else:
|
|
|
|
ei = datetime.timedelta(minutes=15)
|
|
|
|
scenconf = _kv_read_checked(conf, 'scenario')
|
|
|
|
if scenconf is not None:
|
2017-07-20 22:55:49 +03:00
|
|
|
mvc = _kv_read_checked(scenconf, 'maximum_vm_count')
|
|
|
|
if mvc is None:
|
|
|
|
raise ValueError('maximum_vm_count must be specified')
|
2018-06-08 01:07:21 +03:00
|
|
|
mvipe = _kv_read_checked(
|
|
|
|
scenconf, 'maximum_vm_increment_per_evaluation', default={})
|
2017-07-20 01:34:46 +03:00
|
|
|
ndo = _kv_read_checked(
|
|
|
|
scenconf, 'node_deallocation_option', 'taskcompletion')
|
|
|
|
if (ndo is not None and
|
|
|
|
ndo not in (
|
|
|
|
'requeue', 'terminate', 'taskcompletion', 'retaineddata')):
|
|
|
|
raise ValueError(
|
|
|
|
'invalid node_deallocation_option: {}'.format(ndo))
|
|
|
|
sli = _kv_read_checked(scenconf, 'sample_lookback_interval')
|
|
|
|
if util.is_not_empty(sli):
|
|
|
|
sli = util.convert_string_to_timedelta(sli)
|
|
|
|
else:
|
|
|
|
sli = datetime.timedelta(minutes=10)
|
2018-06-08 01:07:21 +03:00
|
|
|
tr = _kv_read_checked(scenconf, 'time_ranges', default={})
|
|
|
|
trweekday = _kv_read_checked(tr, 'weekdays', default={})
|
|
|
|
trworkhour = _kv_read_checked(tr, 'work_hours', default={})
|
2017-07-20 01:34:46 +03:00
|
|
|
scenario = PoolAutoscaleScenarioSettings(
|
|
|
|
name=_kv_read_checked(scenconf, 'name').lower(),
|
|
|
|
maximum_vm_count=_pool_vm_count(config, conf=mvc),
|
2018-06-08 01:07:21 +03:00
|
|
|
maximum_vm_increment_per_evaluation=_pool_vm_count(
|
|
|
|
config, conf=mvipe),
|
2017-07-20 01:34:46 +03:00
|
|
|
node_deallocation_option=ndo,
|
|
|
|
sample_lookback_interval=sli,
|
|
|
|
required_sample_percentage=_kv_read(
|
|
|
|
scenconf, 'required_sample_percentage', 70),
|
2017-07-29 00:32:28 +03:00
|
|
|
rebalance_preemption_percentage=_kv_read(
|
|
|
|
scenconf, 'rebalance_preemption_percentage', None),
|
2017-07-20 01:34:46 +03:00
|
|
|
bias_last_sample=_kv_read(
|
|
|
|
scenconf, 'bias_last_sample', True),
|
|
|
|
bias_node_type=_kv_read_checked(
|
2017-07-29 00:32:28 +03:00
|
|
|
scenconf, 'bias_node_type', 'auto').lower(),
|
2018-06-08 01:07:21 +03:00
|
|
|
weekday_start=_kv_read(trweekday, 'start', default=1),
|
|
|
|
weekday_end=_kv_read(trweekday, 'end', default=5),
|
|
|
|
workhour_start=_kv_read(trworkhour, 'start', default=8),
|
|
|
|
workhour_end=_kv_read(trworkhour, 'end', default=17),
|
2017-07-20 01:34:46 +03:00
|
|
|
)
|
|
|
|
else:
|
|
|
|
scenario = None
|
|
|
|
return PoolAutoscaleSettings(
|
|
|
|
evaluation_interval=ei,
|
|
|
|
formula=_kv_read_checked(conf, 'formula'),
|
|
|
|
scenario=scenario,
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def is_pool_autoscale_enabled(config, pas=None):
|
|
|
|
# type: (dict, PoolAutoscaleSettings) -> bool
|
|
|
|
"""Check if pool autoscale is enabled
|
|
|
|
:param dict config: configuration object
|
|
|
|
:param PoolAutoscaleSettings pas: pool autoscale settings
|
|
|
|
:rtype: bool
|
|
|
|
:return: if pool autoscale is enabled
|
|
|
|
"""
|
|
|
|
if pas is None:
|
|
|
|
pas = pool_autoscale_settings(config)
|
|
|
|
return util.is_not_empty(pas.formula) or pas.scenario is not None
|
|
|
|
|
|
|
|
|
2018-05-01 01:06:52 +03:00
|
|
|
def prometheus_settings(config):
|
|
|
|
# type: (dict) -> PrometheusSettings
|
|
|
|
"""Get Prometheus Settings
|
|
|
|
:param dict config: configuration object
|
|
|
|
:rtype: PrometheusSettings
|
|
|
|
:return Prometheus settings from specification
|
|
|
|
"""
|
|
|
|
conf = _kv_read_checked(config, 'prometheus', default={})
|
|
|
|
ne_conf = _kv_read_checked(conf, 'node_exporter', default={})
|
|
|
|
ca_conf = _kv_read_checked(conf, 'cadvisor', default={})
|
2018-06-07 18:33:25 +03:00
|
|
|
ca_options = _kv_read_checked(ca_conf, 'options')
|
|
|
|
# do not allow docker_only, which interferes with metric gathering
|
|
|
|
if util.is_not_empty(ca_options) and '-docker_only' in ca_options:
|
|
|
|
ca_options.remove('-docker_only')
|
2018-05-01 01:06:52 +03:00
|
|
|
return PrometheusSettings(
|
|
|
|
ne_enabled=_kv_read(ne_conf, 'enabled', default=False),
|
|
|
|
ne_port=_kv_read(ne_conf, 'port', default=9100),
|
|
|
|
ne_options=_kv_read_checked(ne_conf, 'options'),
|
|
|
|
ca_enabled=_kv_read(ca_conf, 'enabled', default=False),
|
|
|
|
ca_port=_kv_read(ca_conf, 'port', default=8080),
|
2018-06-07 18:33:25 +03:00
|
|
|
ca_options=ca_options,
|
2018-05-01 01:06:52 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
|
2016-11-12 02:11:13 +03:00
|
|
|
def pool_settings(config):
|
|
|
|
# type: (dict) -> PoolSettings
|
|
|
|
"""Get Pool settings
|
|
|
|
:param dict config: configuration object
|
|
|
|
:rtype: PoolSettings
|
|
|
|
:return: pool settings from specification
|
|
|
|
"""
|
|
|
|
conf = pool_specification(config)
|
2017-10-04 18:59:03 +03:00
|
|
|
max_tasks_per_node = _kv_read(conf, 'max_tasks_per_node', default=1)
|
2017-05-13 00:40:06 +03:00
|
|
|
resize_timeout = _kv_read_checked(conf, 'resize_timeout')
|
|
|
|
if util.is_not_empty(resize_timeout):
|
|
|
|
resize_timeout = util.convert_string_to_timedelta(resize_timeout)
|
|
|
|
else:
|
|
|
|
resize_timeout = None
|
2017-10-04 18:59:03 +03:00
|
|
|
inter_node_communication_enabled = _kv_read(
|
|
|
|
conf, 'inter_node_communication_enabled', default=False)
|
|
|
|
reboot_on_start_task_failed = _kv_read(
|
|
|
|
conf, 'reboot_on_start_task_failed', default=False)
|
|
|
|
attempt_recovery_on_unusable = _kv_read(
|
|
|
|
conf, 'attempt_recovery_on_unusable', default=False)
|
|
|
|
block_until_all_gr = _kv_read(
|
|
|
|
conf, 'block_until_all_global_resources_loaded', default=True)
|
|
|
|
transfer_files_on_pool_creation = _kv_read(
|
|
|
|
conf, 'transfer_files_on_pool_creation', default=False)
|
2016-11-12 02:11:13 +03:00
|
|
|
try:
|
|
|
|
input_data = conf['input_data']
|
|
|
|
if util.is_none_or_empty(input_data):
|
|
|
|
raise KeyError()
|
|
|
|
except KeyError:
|
|
|
|
input_data = None
|
2017-05-01 19:43:41 +03:00
|
|
|
# get additional resource files
|
|
|
|
try:
|
|
|
|
rfs = conf['resource_files']
|
|
|
|
if util.is_none_or_empty(rfs):
|
|
|
|
raise KeyError()
|
|
|
|
resource_files = []
|
|
|
|
for rf in rfs:
|
|
|
|
try:
|
|
|
|
fm = rf['file_mode']
|
|
|
|
if util.is_none_or_empty(fm):
|
|
|
|
raise KeyError()
|
|
|
|
except KeyError:
|
|
|
|
fm = None
|
|
|
|
resource_files.append(
|
|
|
|
ResourceFileSettings(
|
|
|
|
file_path=rf['file_path'],
|
|
|
|
blob_source=rf['blob_source'],
|
|
|
|
file_mode=fm,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
except KeyError:
|
|
|
|
resource_files = None
|
2017-04-19 04:46:57 +03:00
|
|
|
# ssh settings
|
2016-11-12 02:11:13 +03:00
|
|
|
try:
|
2017-04-19 04:46:57 +03:00
|
|
|
sshconf = conf['ssh']
|
|
|
|
ssh_username = _kv_read_checked(sshconf, 'username')
|
2016-11-12 02:11:13 +03:00
|
|
|
if util.is_none_or_empty(ssh_username):
|
|
|
|
raise KeyError()
|
|
|
|
except KeyError:
|
|
|
|
ssh_username = None
|
2017-04-19 04:46:57 +03:00
|
|
|
ssh_expiry_days = None
|
|
|
|
ssh_public_key = None
|
|
|
|
ssh_public_key_data = None
|
|
|
|
ssh_private_key = None
|
|
|
|
ssh_gen_docker_tunnel = None
|
2017-10-24 18:50:30 +03:00
|
|
|
ssh_gen_file_path = '.'
|
2017-04-19 04:46:57 +03:00
|
|
|
ssh_hpn = None
|
2018-06-12 00:27:26 +03:00
|
|
|
ssh_ada = None
|
2017-04-19 04:46:57 +03:00
|
|
|
else:
|
|
|
|
ssh_expiry_days = _kv_read(sshconf, 'expiry_days', 30)
|
|
|
|
if ssh_expiry_days <= 0:
|
|
|
|
ssh_expiry_days = 30
|
|
|
|
ssh_public_key = _kv_read_checked(sshconf, 'ssh_public_key')
|
|
|
|
if util.is_not_empty(ssh_public_key):
|
|
|
|
ssh_public_key = pathlib.Path(ssh_public_key)
|
|
|
|
ssh_public_key_data = _kv_read_checked(sshconf, 'ssh_public_key_data')
|
|
|
|
ssh_private_key = _kv_read_checked(sshconf, 'ssh_private_key')
|
|
|
|
if util.is_not_empty(ssh_private_key):
|
|
|
|
ssh_private_key = pathlib.Path(ssh_private_key)
|
|
|
|
if (ssh_public_key is not None and
|
|
|
|
util.is_not_empty(ssh_public_key_data)):
|
|
|
|
raise ValueError(
|
|
|
|
'cannot specify both an SSH public key file and data')
|
|
|
|
if (ssh_public_key is None and
|
|
|
|
util.is_none_or_empty(ssh_public_key_data) and
|
|
|
|
ssh_private_key is not None):
|
|
|
|
raise ValueError(
|
|
|
|
'cannot specify an SSH private key with no public '
|
|
|
|
'key specified')
|
|
|
|
ssh_gen_docker_tunnel = _kv_read(
|
|
|
|
sshconf, 'generate_docker_tunnel_script', False)
|
|
|
|
ssh_gen_file_path = _kv_read_checked(
|
|
|
|
sshconf, 'generated_file_export_path', '.')
|
|
|
|
ssh_hpn = _kv_read(sshconf, 'hpn_server_swap', False)
|
2018-06-12 00:27:26 +03:00
|
|
|
ssh_ada = _kv_read(sshconf, 'allow_docker_access', False)
|
2017-11-03 23:43:18 +03:00
|
|
|
# rdp settings
|
|
|
|
try:
|
|
|
|
rdpconf = conf['rdp']
|
|
|
|
rdp_username = _kv_read_checked(rdpconf, 'username')
|
|
|
|
if util.is_none_or_empty(rdp_username):
|
|
|
|
raise KeyError()
|
|
|
|
except KeyError:
|
|
|
|
rdp_username = None
|
|
|
|
rdp_expiry_days = None
|
|
|
|
rdp_password = None
|
|
|
|
else:
|
|
|
|
rdp_expiry_days = _kv_read(rdpconf, 'expiry_days', 30)
|
|
|
|
if rdp_expiry_days <= 0:
|
|
|
|
rdp_expiry_days = 30
|
|
|
|
rdp_password = _kv_read_checked(rdpconf, 'password')
|
2018-03-14 22:21:13 +03:00
|
|
|
# remote access control
|
|
|
|
rac = _kv_read_checked(conf, 'remote_access_control', default={})
|
|
|
|
rac = RemoteAccessControl(
|
|
|
|
starting_port=_kv_read(rac, 'starting_port', default=49000),
|
|
|
|
backend_port='22' if ssh_username is not None else '3389',
|
|
|
|
protocol='tcp',
|
|
|
|
allow=_kv_read_checked(rac, 'allow'),
|
|
|
|
deny=_kv_read_checked(rac, 'deny'),
|
|
|
|
)
|
2018-03-15 00:01:29 +03:00
|
|
|
if (rac.starting_port < 1 or
|
|
|
|
(rac.starting_port > 49000 and rac.starting_port <= 55000) or
|
|
|
|
rac.starting_port > 64536):
|
|
|
|
raise ValueError('starting_port is invalid or in a reserved range')
|
2018-03-14 22:21:13 +03:00
|
|
|
# gpu driver
|
2016-11-12 02:11:13 +03:00
|
|
|
try:
|
2018-03-14 22:21:13 +03:00
|
|
|
gpu_driver = _kv_read_checked(conf['gpu']['nvidia_driver'], 'source')
|
2016-11-12 02:11:13 +03:00
|
|
|
except KeyError:
|
|
|
|
gpu_driver = None
|
2018-03-14 22:21:13 +03:00
|
|
|
# additional node prep
|
2019-01-15 21:50:17 +03:00
|
|
|
addl_np = _kv_read_checked(
|
|
|
|
conf, 'additional_node_prep', default={})
|
|
|
|
addl_np_cmds = _kv_read_checked(addl_np, 'commands', default={})
|
|
|
|
additional_node_prep = AdditionalNodePrepSettings(
|
|
|
|
commands_pre=_kv_read_checked(addl_np_cmds, 'pre', default=[]),
|
|
|
|
commands_post=_kv_read_checked(addl_np_cmds, 'post', default=[]),
|
|
|
|
environment_variables=_kv_read_checked(
|
|
|
|
addl_np, 'environment_variables', default={}),
|
|
|
|
environment_variables_keyvault_secret_id=_kv_read_checked(
|
|
|
|
addl_np, 'environment_variables_keyvault_secret_id'),
|
|
|
|
)
|
|
|
|
del addl_np_cmds
|
|
|
|
del addl_np
|
2018-03-15 00:01:29 +03:00
|
|
|
# certificates
|
|
|
|
certdict = _kv_read_checked(conf, 'certificates', default={})
|
|
|
|
certs = []
|
|
|
|
for tp in certdict:
|
|
|
|
visibility = []
|
|
|
|
for vis in certdict[tp]['visibility']:
|
|
|
|
if vis == 'remote_user':
|
|
|
|
visibility.append(
|
|
|
|
batchmodels.CertificateVisibility.remote_user)
|
|
|
|
elif vis == 'start_task':
|
|
|
|
visibility.append(batchmodels.CertificateVisibility.start_task)
|
|
|
|
elif vis == 'task':
|
|
|
|
visibility.append(batchmodels.CertificateVisibility.task)
|
|
|
|
certs.append(batchmodels.CertificateReference(
|
|
|
|
thumbprint=tp, thumbprint_algorithm='sha1',
|
|
|
|
visibility=visibility
|
|
|
|
))
|
2018-10-31 00:22:27 +03:00
|
|
|
# container runtimes
|
|
|
|
try:
|
|
|
|
cr_install = _kv_read_checked(
|
|
|
|
conf['container_runtimes'], 'install', default=[])
|
|
|
|
except KeyError:
|
|
|
|
cr_install = []
|
|
|
|
try:
|
|
|
|
cr_default = _kv_read_checked(
|
|
|
|
conf['container_runtimes'], 'default', default='runc')
|
|
|
|
except KeyError:
|
|
|
|
cr_default = 'runc'
|
2016-11-12 02:11:13 +03:00
|
|
|
return PoolSettings(
|
|
|
|
id=conf['id'],
|
2018-06-11 23:00:17 +03:00
|
|
|
vm_size=_pool_vm_size(config),
|
2017-05-13 00:40:06 +03:00
|
|
|
vm_count=_pool_vm_count(config),
|
|
|
|
resize_timeout=resize_timeout,
|
2016-11-12 02:11:13 +03:00
|
|
|
max_tasks_per_node=max_tasks_per_node,
|
|
|
|
inter_node_communication_enabled=inter_node_communication_enabled,
|
2017-06-06 18:29:44 +03:00
|
|
|
vm_configuration=_populate_pool_vm_configuration(config),
|
2016-11-12 02:11:13 +03:00
|
|
|
reboot_on_start_task_failed=reboot_on_start_task_failed,
|
2017-10-04 18:59:03 +03:00
|
|
|
attempt_recovery_on_unusable=attempt_recovery_on_unusable,
|
2016-11-12 02:11:13 +03:00
|
|
|
block_until_all_global_resources_loaded=block_until_all_gr,
|
|
|
|
transfer_files_on_pool_creation=transfer_files_on_pool_creation,
|
|
|
|
input_data=input_data,
|
2017-05-01 19:43:41 +03:00
|
|
|
resource_files=resource_files,
|
2016-11-12 02:11:13 +03:00
|
|
|
ssh=SSHSettings(
|
|
|
|
username=ssh_username,
|
|
|
|
expiry_days=ssh_expiry_days,
|
|
|
|
ssh_public_key=ssh_public_key,
|
2017-04-13 19:31:35 +03:00
|
|
|
ssh_public_key_data=ssh_public_key_data,
|
|
|
|
ssh_private_key=ssh_private_key,
|
2016-11-12 02:11:13 +03:00
|
|
|
generate_docker_tunnel_script=ssh_gen_docker_tunnel,
|
|
|
|
generated_file_export_path=ssh_gen_file_path,
|
|
|
|
hpn_server_swap=ssh_hpn,
|
2018-06-12 00:27:26 +03:00
|
|
|
allow_docker_access=ssh_ada,
|
2016-11-12 02:11:13 +03:00
|
|
|
),
|
2017-11-03 23:43:18 +03:00
|
|
|
rdp=RDPSettings(
|
|
|
|
username=rdp_username,
|
|
|
|
expiry_days=rdp_expiry_days,
|
|
|
|
password=rdp_password,
|
|
|
|
),
|
2016-11-12 02:11:13 +03:00
|
|
|
gpu_driver=gpu_driver,
|
2019-01-15 21:50:17 +03:00
|
|
|
additional_node_prep=additional_node_prep,
|
2017-03-09 07:18:58 +03:00
|
|
|
virtual_network=virtual_network_settings(
|
|
|
|
conf,
|
|
|
|
default_existing_ok=True,
|
|
|
|
default_create_nonexistant=False,
|
|
|
|
),
|
2017-07-20 01:34:46 +03:00
|
|
|
autoscale=pool_autoscale_settings(config),
|
2017-07-20 19:18:48 +03:00
|
|
|
node_fill_type=_kv_read_checked(conf, 'node_fill_type'),
|
2018-03-14 22:21:13 +03:00
|
|
|
remote_access_control=rac,
|
2018-03-15 00:01:29 +03:00
|
|
|
certificates=certs,
|
2018-05-01 01:06:52 +03:00
|
|
|
prometheus=prometheus_settings(conf),
|
2018-06-25 22:35:50 +03:00
|
|
|
upload_diagnostics_logs_on_unusable=_kv_read(
|
|
|
|
conf, 'upload_diagnostics_logs_on_unusable', default=True),
|
2018-10-31 00:22:27 +03:00
|
|
|
container_runtimes_install=cr_install,
|
|
|
|
container_runtimes_default=cr_default,
|
2018-11-01 02:06:00 +03:00
|
|
|
per_job_auto_scratch=_kv_read(
|
|
|
|
conf, 'per_job_auto_scratch', default=False),
|
2019-01-16 18:28:06 +03:00
|
|
|
batch_insights_enabled=_kv_read(
|
|
|
|
conf, 'batch_insights_enabled', default=False),
|
2019-08-14 01:06:50 +03:00
|
|
|
public_ips=_kv_read_checked(conf, 'public_ips'),
|
2016-11-12 02:11:13 +03:00
|
|
|
)
|
|
|
|
|
2016-11-11 20:19:04 +03:00
|
|
|
|
2017-10-04 18:59:03 +03:00
|
|
|
def set_attempt_recovery_on_unusable(config, flag):
|
|
|
|
# type: (dict, bool) -> None
|
|
|
|
"""Set attempt recovery on unusable setting
|
|
|
|
:param dict config: configuration object
|
|
|
|
:param bool flag: flag to set
|
|
|
|
"""
|
|
|
|
config['pool_specification']['attempt_recovery_on_unusable'] = flag
|
|
|
|
|
|
|
|
|
2016-11-12 10:19:28 +03:00
|
|
|
def set_block_until_all_global_resources_loaded(config, flag):
|
|
|
|
# type: (dict, bool) -> None
|
|
|
|
"""Set block until all global resources setting
|
|
|
|
:param dict config: configuration object
|
|
|
|
:param bool flag: flag to set
|
|
|
|
"""
|
|
|
|
config['pool_specification'][
|
|
|
|
'block_until_all_global_resources_loaded'] = flag
|
|
|
|
|
|
|
|
|
|
|
|
def set_inter_node_communication_enabled(config, flag):
|
|
|
|
# type: (dict, bool) -> None
|
|
|
|
"""Set inter node comm setting
|
|
|
|
:param dict config: configuration object
|
|
|
|
:param bool flag: flag to set
|
|
|
|
"""
|
|
|
|
config['pool_specification']['inter_node_communication_enabled'] = flag
|
|
|
|
|
|
|
|
|
|
|
|
def set_ssh_public_key(config, pubkey):
|
|
|
|
# type: (dict, str) -> None
|
|
|
|
"""Set SSH public key setting
|
|
|
|
:param dict config: configuration object
|
|
|
|
:param str pubkey: public key to set
|
|
|
|
"""
|
|
|
|
if 'ssh' not in config['pool_specification']:
|
|
|
|
config['pool_specification']['ssh'] = {}
|
|
|
|
config['pool_specification']['ssh']['ssh_public_key'] = pubkey
|
|
|
|
|
|
|
|
|
|
|
|
def set_hpn_server_swap(config, flag):
|
|
|
|
# type: (dict, bool) -> None
|
|
|
|
"""Set SSH HPN server swap setting
|
|
|
|
:param dict config: configuration object
|
|
|
|
:param bool flag: flag to set
|
|
|
|
"""
|
|
|
|
if 'ssh' not in config['pool_specification']:
|
|
|
|
config['pool_specification']['ssh'] = {}
|
|
|
|
config['pool_specification']['ssh']['hpn_server_swap'] = flag
|
|
|
|
|
|
|
|
|
2016-11-12 08:51:11 +03:00
|
|
|
def pool_id(config, lower=False):
|
|
|
|
# type: (dict, bool) -> str
|
2016-11-12 08:08:58 +03:00
|
|
|
"""Get Pool id
|
|
|
|
:param dict config: configuration object
|
|
|
|
:param bool lower: lowercase return
|
|
|
|
:rtype: str
|
|
|
|
:return: pool id
|
|
|
|
"""
|
|
|
|
id = config['pool_specification']['id']
|
|
|
|
return id.lower() if lower else id
|
|
|
|
|
|
|
|
|
|
|
|
def pool_publisher(config, lower=False):
|
|
|
|
# type: (dict, bool) -> str
|
|
|
|
"""Get Pool publisher
|
|
|
|
:param dict config: configuration object
|
|
|
|
:param bool lower: lowercase return
|
|
|
|
:rtype: str
|
|
|
|
:return: pool publisher
|
|
|
|
"""
|
2017-06-06 18:29:44 +03:00
|
|
|
conf = pool_vm_configuration(config, 'platform_image')
|
|
|
|
pub = _kv_read_checked(conf, 'publisher')
|
|
|
|
return pub.lower() if lower and util.is_not_empty(pub) else pub
|
2016-11-12 08:08:58 +03:00
|
|
|
|
|
|
|
|
|
|
|
def pool_offer(config, lower=False):
|
|
|
|
# type: (dict, bool) -> str
|
|
|
|
"""Get Pool offer
|
|
|
|
:param dict config: configuration object
|
|
|
|
:param bool lower: lowercase return
|
|
|
|
:rtype: str
|
|
|
|
:return: pool offer
|
|
|
|
"""
|
2017-06-06 18:29:44 +03:00
|
|
|
conf = pool_vm_configuration(config, 'platform_image')
|
|
|
|
offer = _kv_read_checked(conf, 'offer')
|
|
|
|
return offer.lower() if lower and util.is_not_empty(offer) else offer
|
2016-11-12 08:08:58 +03:00
|
|
|
|
|
|
|
|
|
|
|
def pool_sku(config, lower=False):
|
|
|
|
# type: (dict, bool) -> str
|
|
|
|
"""Get Pool sku
|
|
|
|
:param dict config: configuration object
|
|
|
|
:param bool lower: lowercase return
|
|
|
|
:rtype: str
|
|
|
|
:return: pool sku
|
|
|
|
"""
|
2017-06-06 18:29:44 +03:00
|
|
|
conf = pool_vm_configuration(config, 'platform_image')
|
2017-10-22 23:59:00 +03:00
|
|
|
try:
|
|
|
|
sku = str(conf['sku'])
|
|
|
|
if util.is_none_or_empty(sku):
|
|
|
|
raise KeyError()
|
|
|
|
except (KeyError, TypeError):
|
|
|
|
sku = None
|
2017-06-06 18:29:44 +03:00
|
|
|
return sku.lower() if lower and util.is_not_empty(sku) else sku
|
|
|
|
|
|
|
|
|
|
|
|
def pool_custom_image_node_agent(config):
|
|
|
|
# type: (dict) -> str
|
|
|
|
"""Get Pool node agent from custom image
|
|
|
|
:param dict config: configuration object
|
|
|
|
:rtype: str
|
|
|
|
:return: pool node agent
|
|
|
|
"""
|
|
|
|
conf = pool_vm_configuration(config, 'custom_image')
|
|
|
|
return _kv_read_checked(conf, 'node_agent')
|
2016-11-12 08:08:58 +03:00
|
|
|
|
|
|
|
|
|
|
|
# CREDENTIALS SETTINGS
|
2017-01-10 22:21:11 +03:00
|
|
|
def raw_credentials(config, omit_keyvault):
|
|
|
|
# type: (dict, bool) -> dict
|
2017-01-05 21:20:13 +03:00
|
|
|
"""Get raw credentials dictionary
|
|
|
|
:param dict config: configuration object
|
2017-01-10 22:21:11 +03:00
|
|
|
:param bool omit_keyvault: omit keyvault settings if present
|
2017-01-05 21:20:13 +03:00
|
|
|
:rtype: dict
|
|
|
|
:return: credentials dict
|
|
|
|
"""
|
2017-01-10 22:21:11 +03:00
|
|
|
conf = config['credentials']
|
|
|
|
if omit_keyvault:
|
|
|
|
conf.pop('keyvault', None)
|
|
|
|
return conf
|
|
|
|
|
|
|
|
|
2018-05-01 01:06:52 +03:00
|
|
|
def determine_cloud_type_from_aad(config):
|
|
|
|
# type: (dict) -> str
|
|
|
|
"""Determine cloud type from aad settings
|
|
|
|
:param dict config: configuration object
|
|
|
|
:rtype: str
|
|
|
|
:return: cloud type string
|
|
|
|
"""
|
|
|
|
auth_url = credentials_management(
|
|
|
|
config).aad.authority_url.rstrip('/').lower()
|
|
|
|
if auth_url.endswith('.com'):
|
|
|
|
cloud_type = 'public'
|
|
|
|
elif auth_url.endswith('.cn'):
|
|
|
|
cloud_type = 'china'
|
|
|
|
elif auth_url.endswith('.de'):
|
|
|
|
cloud_type = 'germany'
|
|
|
|
elif auth_url.endswith('.us'):
|
|
|
|
cloud_type = 'usgov'
|
|
|
|
else:
|
|
|
|
raise ValueError('unknown sovereign cloud authority url: {}'.format(
|
|
|
|
auth_url))
|
|
|
|
return cloud_type
|
|
|
|
|
|
|
|
|
2017-03-09 10:43:16 +03:00
|
|
|
def _aad_credentials(
|
2017-09-29 04:39:15 +03:00
|
|
|
conf, service, default_endpoint=None, default_token_cache_file=None):
|
2017-03-07 20:01:10 +03:00
|
|
|
# type: (dict, str) -> AADSettings
|
|
|
|
"""Retrieve AAD Settings
|
2017-09-29 04:39:15 +03:00
|
|
|
:param dict conf: credentials configuration object
|
|
|
|
:param str service: credentials section service name
|
2017-03-07 20:01:10 +03:00
|
|
|
:param str default_endpoint: default endpoint
|
2017-03-09 10:43:16 +03:00
|
|
|
:param str default_token_cache_file: default token cache file
|
2017-03-07 20:01:10 +03:00
|
|
|
:rtype: AADSettings
|
|
|
|
:return: AAD settings
|
|
|
|
"""
|
2017-09-29 04:39:15 +03:00
|
|
|
super_aad = _kv_read_checked(conf, 'aad', default={})
|
|
|
|
if service in conf:
|
|
|
|
service_aad = _kv_read_checked(conf[service], 'aad', default={})
|
|
|
|
else:
|
|
|
|
service_aad = {}
|
|
|
|
if util.is_not_empty(super_aad) or util.is_not_empty(service_aad):
|
|
|
|
aad_directory_id = (
|
|
|
|
_kv_read_checked(service_aad, 'directory_id') or
|
|
|
|
_kv_read_checked(super_aad, 'directory_id')
|
|
|
|
)
|
|
|
|
aad_application_id = (
|
|
|
|
_kv_read_checked(service_aad, 'application_id') or
|
|
|
|
_kv_read_checked(super_aad, 'application_id')
|
|
|
|
)
|
|
|
|
aad_auth_key = (
|
|
|
|
_kv_read_checked(service_aad, 'auth_key') or
|
|
|
|
_kv_read_checked(super_aad, 'auth_key')
|
|
|
|
)
|
|
|
|
aad_user = (
|
|
|
|
_kv_read_checked(service_aad, 'user') or
|
|
|
|
_kv_read_checked(super_aad, 'user')
|
|
|
|
)
|
|
|
|
aad_password = (
|
|
|
|
_kv_read_checked(service_aad, 'password') or
|
|
|
|
_kv_read_checked(super_aad, 'password')
|
|
|
|
)
|
|
|
|
aad_cert_private_key = (
|
|
|
|
_kv_read_checked(service_aad, 'rsa_private_key_pem') or
|
|
|
|
_kv_read_checked(super_aad, 'rsa_private_key_pem')
|
|
|
|
)
|
|
|
|
aad_cert_thumbprint = (
|
|
|
|
_kv_read_checked(service_aad, 'x509_cert_sha1_thumbprint') or
|
|
|
|
_kv_read_checked(super_aad, 'x509_cert_sha1_thumbprint')
|
|
|
|
)
|
2018-02-16 07:37:49 +03:00
|
|
|
aad_authority_url = (
|
|
|
|
_kv_read_checked(service_aad, 'authority_url') or
|
2018-05-01 01:06:52 +03:00
|
|
|
_kv_read_checked(
|
|
|
|
super_aad, 'authority_url',
|
|
|
|
default='https://login.microsoftonline.com')
|
2018-02-16 07:37:49 +03:00
|
|
|
)
|
2017-03-07 20:01:10 +03:00
|
|
|
aad_endpoint = _kv_read_checked(
|
2017-09-29 04:39:15 +03:00
|
|
|
service_aad, 'endpoint', default=default_endpoint)
|
|
|
|
token_cache = _kv_read_checked(service_aad, 'token_cache', default={})
|
2018-06-20 23:34:38 +03:00
|
|
|
if _kv_read(token_cache, 'enabled', default=True):
|
2017-03-07 20:01:10 +03:00
|
|
|
token_cache_file = _kv_read_checked(
|
2017-09-29 04:39:15 +03:00
|
|
|
token_cache, 'filename', default=default_token_cache_file)
|
2017-03-07 20:01:10 +03:00
|
|
|
else:
|
|
|
|
token_cache_file = None
|
|
|
|
return AADSettings(
|
|
|
|
directory_id=aad_directory_id,
|
|
|
|
application_id=aad_application_id,
|
|
|
|
auth_key=aad_auth_key,
|
|
|
|
user=aad_user,
|
|
|
|
password=aad_password,
|
|
|
|
rsa_private_key_pem=aad_cert_private_key,
|
|
|
|
x509_cert_sha1_thumbprint=aad_cert_thumbprint,
|
|
|
|
endpoint=aad_endpoint,
|
|
|
|
token_cache_file=token_cache_file,
|
2018-02-16 07:37:49 +03:00
|
|
|
authority_url=aad_authority_url,
|
2017-03-07 20:01:10 +03:00
|
|
|
)
|
|
|
|
else:
|
|
|
|
return AADSettings(
|
|
|
|
directory_id=None,
|
|
|
|
application_id=None,
|
|
|
|
auth_key=None,
|
|
|
|
user=None,
|
|
|
|
password=None,
|
|
|
|
rsa_private_key_pem=None,
|
|
|
|
x509_cert_sha1_thumbprint=None,
|
|
|
|
endpoint=default_endpoint,
|
|
|
|
token_cache_file=None,
|
2018-02-16 07:37:49 +03:00
|
|
|
authority_url=None,
|
2017-03-07 20:01:10 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
|
2017-01-10 22:21:11 +03:00
|
|
|
def credentials_keyvault(config):
|
2017-03-03 07:17:35 +03:00
|
|
|
# type: (dict) -> KeyVaultCredentialsSettings
|
2017-01-10 22:21:11 +03:00
|
|
|
"""Get KeyVault settings
|
|
|
|
:param dict config: configuration object
|
2017-03-03 07:17:35 +03:00
|
|
|
:rtype: KeyVaultCredentialsSettings
|
2017-01-10 22:21:11 +03:00
|
|
|
:return: Key Vault settings
|
|
|
|
"""
|
|
|
|
try:
|
2018-05-17 19:35:58 +03:00
|
|
|
creds = config['credentials']
|
2017-01-10 22:21:11 +03:00
|
|
|
except (KeyError, TypeError):
|
2018-05-17 19:35:58 +03:00
|
|
|
creds = {}
|
|
|
|
conf = _kv_read_checked(creds, 'keyvault', default={})
|
2017-03-03 07:17:35 +03:00
|
|
|
return KeyVaultCredentialsSettings(
|
2017-03-09 10:43:16 +03:00
|
|
|
aad=_aad_credentials(
|
2018-05-17 19:35:58 +03:00
|
|
|
creds,
|
2017-09-29 04:39:15 +03:00
|
|
|
'keyvault',
|
2017-03-09 10:43:16 +03:00
|
|
|
default_endpoint='https://vault.azure.net',
|
|
|
|
default_token_cache_file=(
|
|
|
|
'.batch_shipyard_aad_keyvault_token.json'
|
|
|
|
),
|
|
|
|
),
|
2017-09-29 04:39:15 +03:00
|
|
|
keyvault_uri=_kv_read_checked(conf, 'uri'),
|
|
|
|
keyvault_credentials_secret_id=_kv_read_checked(
|
|
|
|
conf, 'credentials_secret_id'),
|
2017-01-10 22:21:11 +03:00
|
|
|
)
|
2017-01-05 21:20:13 +03:00
|
|
|
|
|
|
|
|
2017-03-03 07:17:35 +03:00
|
|
|
def credentials_management(config):
|
|
|
|
# type: (dict) -> ManagementCredentialsSettings
|
|
|
|
"""Get Management settings
|
|
|
|
:param dict config: configuration object
|
|
|
|
:rtype: ManagementCredentialsSettings
|
|
|
|
:return: Management settings
|
|
|
|
"""
|
|
|
|
try:
|
2018-05-17 19:35:58 +03:00
|
|
|
creds = config['credentials']
|
2017-03-03 07:17:35 +03:00
|
|
|
except (KeyError, TypeError):
|
2018-05-17 19:35:58 +03:00
|
|
|
creds = {}
|
|
|
|
conf = _kv_read_checked(creds, 'management', default={})
|
2017-03-03 07:17:35 +03:00
|
|
|
return ManagementCredentialsSettings(
|
2017-03-07 20:01:10 +03:00
|
|
|
aad=_aad_credentials(
|
2018-05-17 19:35:58 +03:00
|
|
|
creds,
|
2017-09-29 04:39:15 +03:00
|
|
|
'management',
|
2018-02-16 07:37:49 +03:00
|
|
|
default_endpoint='https://management.azure.com/',
|
2017-03-09 10:43:16 +03:00
|
|
|
default_token_cache_file=(
|
|
|
|
'.batch_shipyard_aad_management_token.json'
|
|
|
|
),
|
|
|
|
),
|
2017-09-29 04:39:15 +03:00
|
|
|
subscription_id=_kv_read_checked(conf, 'subscription_id'),
|
2017-03-03 07:17:35 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
|
2018-06-25 17:49:45 +03:00
|
|
|
def parse_batch_service_url(account_service_url, test_cluster=False):
|
|
|
|
# type: (str, bool) -> Tuple[str, str]
|
|
|
|
"""Parse batch service url into account name and location
|
|
|
|
:param str account_service_url: account url
|
|
|
|
:param bool test_cluster: test cluster
|
|
|
|
:rtype: tuple
|
|
|
|
:return: account, location
|
|
|
|
"""
|
|
|
|
# parse location from url
|
2019-10-23 18:45:24 +03:00
|
|
|
try:
|
|
|
|
tmp = account_service_url.split('.')
|
|
|
|
location = tmp[1].lower()
|
|
|
|
except Exception:
|
|
|
|
raise ValueError(
|
|
|
|
'The Batch account_service_url is malformed, please check '
|
|
|
|
'your configuration')
|
2018-06-25 17:49:45 +03:00
|
|
|
# parse account name from url
|
|
|
|
if test_cluster:
|
|
|
|
account = account_service_url.split('/')[-1]
|
|
|
|
else:
|
|
|
|
account = tmp[0].split('/')[-1]
|
|
|
|
return account, location
|
|
|
|
|
|
|
|
|
2016-11-12 08:08:58 +03:00
|
|
|
def credentials_batch(config):
|
|
|
|
# type: (dict) -> BatchCredentialsSettings
|
|
|
|
"""Get Batch credentials
|
|
|
|
:param dict config: configuration object
|
|
|
|
:rtype: BatchCredentialsSettings
|
|
|
|
:return: batch creds
|
|
|
|
"""
|
2018-05-17 19:35:58 +03:00
|
|
|
try:
|
|
|
|
creds = config['credentials']
|
|
|
|
except (KeyError, TypeError):
|
|
|
|
raise ValueError('credentials not specified')
|
2018-06-19 18:10:00 +03:00
|
|
|
# allow conf to not exist due to later keyvault load
|
|
|
|
conf = _kv_read_checked(creds, 'batch', default={})
|
2017-03-08 22:13:09 +03:00
|
|
|
account_key = _kv_read_checked(conf, 'account_key')
|
2017-03-09 01:56:39 +03:00
|
|
|
account_service_url = conf['account_service_url']
|
|
|
|
resource_group = _kv_read_checked(conf, 'resource_group')
|
2018-05-17 19:35:58 +03:00
|
|
|
test_cluster = _kv_read(conf, 'test_cluster', default=False)
|
2017-03-09 01:56:39 +03:00
|
|
|
# get subscription id from management section
|
|
|
|
try:
|
|
|
|
subscription_id = _kv_read_checked(
|
2018-05-17 19:35:58 +03:00
|
|
|
creds['management'], 'subscription_id')
|
2017-03-09 01:56:39 +03:00
|
|
|
except (KeyError, TypeError):
|
|
|
|
subscription_id = None
|
2018-06-25 17:49:45 +03:00
|
|
|
account, location = parse_batch_service_url(
|
|
|
|
account_service_url, test_cluster=test_cluster)
|
2018-05-17 17:30:31 +03:00
|
|
|
aad = _aad_credentials(
|
2018-05-17 19:35:58 +03:00
|
|
|
creds,
|
2018-05-17 17:30:31 +03:00
|
|
|
'batch',
|
|
|
|
default_endpoint='https://batch.core.windows.net/',
|
|
|
|
default_token_cache_file=(
|
|
|
|
'.batch_shipyard_aad_batch_token.json'
|
2017-03-09 10:43:16 +03:00
|
|
|
),
|
2018-05-17 17:30:31 +03:00
|
|
|
)
|
|
|
|
if util.is_not_empty(account_key) and util.is_not_empty(aad.directory_id):
|
|
|
|
raise ValueError(
|
|
|
|
'Both account_key and aad settings specified for batch '
|
|
|
|
'credentials. If using Azure Active Directory, then do not '
|
|
|
|
'specify an account_key.')
|
2019-01-16 18:28:06 +03:00
|
|
|
app_insights = _kv_read_checked(conf, 'application_insights', default={})
|
2018-05-17 17:30:31 +03:00
|
|
|
return BatchCredentialsSettings(
|
|
|
|
aad=aad,
|
2017-03-09 20:40:16 +03:00
|
|
|
account=account,
|
2017-03-08 22:13:09 +03:00
|
|
|
account_key=account_key,
|
|
|
|
account_service_url=conf['account_service_url'],
|
2017-03-09 01:56:39 +03:00
|
|
|
resource_group=resource_group,
|
|
|
|
location=location,
|
|
|
|
subscription_id=subscription_id,
|
2019-01-16 18:28:06 +03:00
|
|
|
app_insights_instrumentation_key=_kv_read_checked(
|
|
|
|
app_insights, 'instrumentation_key'),
|
|
|
|
app_insights_application_id=_kv_read_checked(
|
|
|
|
app_insights, 'application_id'),
|
2016-11-12 08:08:58 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
|
2017-01-05 21:20:13 +03:00
|
|
|
def credentials_batch_account_key_secret_id(config):
|
|
|
|
# type: (dict) -> str
|
|
|
|
"""Get Batch account key KeyVault Secret Id
|
|
|
|
:param dict config: configuration object
|
|
|
|
:rtype: str
|
|
|
|
:return: keyvault secret id
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
secid = config[
|
|
|
|
'credentials']['batch']['account_key_keyvault_secret_id']
|
|
|
|
if util.is_none_or_empty(secid):
|
|
|
|
raise KeyError()
|
|
|
|
except KeyError:
|
|
|
|
return None
|
|
|
|
return secid
|
|
|
|
|
|
|
|
|
|
|
|
def set_credentials_batch_account_key(config, bakey):
|
|
|
|
# type: (dict, str) -> None
|
|
|
|
"""Set Batch account key
|
|
|
|
:param dict config: configuration object
|
|
|
|
:param str bakey: batch account key
|
|
|
|
"""
|
|
|
|
config['credentials']['batch']['account_key'] = bakey
|
|
|
|
|
|
|
|
|
2018-04-18 01:01:12 +03:00
|
|
|
def credentials_storage_aad(config):
|
|
|
|
# type: (dict) -> AADSettings
|
|
|
|
"""Get storage AAD credentials
|
|
|
|
:param dict config: configuration object
|
|
|
|
:rtype: AADSettings
|
|
|
|
:return: storage aad settings
|
|
|
|
"""
|
|
|
|
if 'aad' in config['credentials']['storage']:
|
|
|
|
return _aad_credentials(
|
|
|
|
config['credentials'],
|
|
|
|
'storage',
|
|
|
|
default_endpoint='https://management.azure.com/',
|
|
|
|
default_token_cache_file=(
|
|
|
|
'.batch_shipyard_aad_storage_token.json'
|
|
|
|
),
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
return _aad_credentials(
|
|
|
|
config['credentials'],
|
|
|
|
'management',
|
|
|
|
default_endpoint='https://management.azure.com/',
|
|
|
|
default_token_cache_file=(
|
|
|
|
'.batch_shipyard_aad_storage_token.json'
|
|
|
|
),
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2016-11-12 08:08:58 +03:00
|
|
|
def credentials_storage(config, ssel):
|
|
|
|
# type: (dict, str) -> StorageCredentialsSettings
|
|
|
|
"""Get specific storage credentials
|
|
|
|
:param dict config: configuration object
|
|
|
|
:param str ssel: storage selector link
|
|
|
|
:rtype: StorageCredentialsSettings
|
|
|
|
:return: storage creds
|
|
|
|
"""
|
2017-11-06 19:12:49 +03:00
|
|
|
try:
|
|
|
|
conf = config['credentials']['storage'][ssel]
|
|
|
|
except KeyError:
|
|
|
|
raise ValueError(
|
|
|
|
('Could not find storage account alias {} in credentials:storage '
|
|
|
|
'configuration. Please ensure the storage account alias '
|
|
|
|
'exists.').format(ssel))
|
2016-11-12 08:08:58 +03:00
|
|
|
return StorageCredentialsSettings(
|
|
|
|
account=conf['account'],
|
2018-04-18 01:01:12 +03:00
|
|
|
account_key=_kv_read_checked(conf, 'account_key'),
|
|
|
|
endpoint=_kv_read_checked(
|
|
|
|
conf, 'endpoint', default='core.windows.net'),
|
|
|
|
resource_group=_kv_read_checked(conf, 'resource_group'),
|
2016-11-12 08:08:58 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
|
2017-01-05 21:20:13 +03:00
|
|
|
def iterate_storage_credentials(config):
|
|
|
|
# type: (dict) -> str
|
|
|
|
"""Iterate storage credential storage select links
|
|
|
|
:param dict config: configuration object
|
|
|
|
:rtype: str
|
|
|
|
:return: storage selector link
|
|
|
|
"""
|
|
|
|
for conf in config['credentials']['storage']:
|
2018-04-18 01:01:12 +03:00
|
|
|
if conf == 'aad':
|
|
|
|
continue
|
2017-01-05 21:20:13 +03:00
|
|
|
yield conf
|
|
|
|
|
|
|
|
|
|
|
|
def credentials_storage_account_key_secret_id(config, ssel):
|
|
|
|
# type: (dict, str) -> str
|
|
|
|
"""Get Storage account key KeyVault Secret Id
|
|
|
|
:param dict config: configuration object
|
|
|
|
:param str ssel: storage selector link
|
|
|
|
:rtype: str
|
|
|
|
:return: keyvault secret id
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
secid = config[
|
|
|
|
'credentials']['storage'][ssel]['account_key_keyvault_secret_id']
|
|
|
|
if util.is_none_or_empty(secid):
|
|
|
|
raise KeyError()
|
|
|
|
except KeyError:
|
|
|
|
return None
|
|
|
|
return secid
|
|
|
|
|
|
|
|
|
2018-04-18 01:01:12 +03:00
|
|
|
def set_credentials_storage_account(config, ssel, sakey, ep=None):
|
|
|
|
# type: (dict, str, str, str) -> None
|
|
|
|
"""Set Storage account key and endpoint
|
2017-01-05 21:20:13 +03:00
|
|
|
:param dict config: configuration object
|
|
|
|
:param str ssel: storage selector link
|
|
|
|
:param str sakey: storage account key
|
2018-04-18 01:01:12 +03:00
|
|
|
:param str ep: endpoint
|
2017-01-05 21:20:13 +03:00
|
|
|
"""
|
|
|
|
config['credentials']['storage'][ssel]['account_key'] = sakey
|
2018-04-18 01:01:12 +03:00
|
|
|
if util.is_not_empty(ep):
|
|
|
|
config['credentials']['storage'][ssel]['endpoint'] = ep
|
2017-01-05 21:20:13 +03:00
|
|
|
|
|
|
|
|
2016-11-19 19:54:52 +03:00
|
|
|
def docker_registry_login(config, server):
|
|
|
|
# type: (dict, str) -> tuple
|
|
|
|
"""Get docker registry login settings
|
2016-11-12 08:08:58 +03:00
|
|
|
:param dict config: configuration object
|
2016-11-19 19:54:52 +03:00
|
|
|
:param str server: credentials for login server to retrieve
|
2016-11-12 08:08:58 +03:00
|
|
|
:rtype: tuple
|
|
|
|
:return: (user, pw)
|
|
|
|
"""
|
|
|
|
try:
|
2016-11-19 19:54:52 +03:00
|
|
|
user = config['credentials']['docker_registry'][server]['username']
|
|
|
|
pw = config['credentials']['docker_registry'][server]['password']
|
2016-11-12 08:08:58 +03:00
|
|
|
if util.is_none_or_empty(user) or util.is_none_or_empty(pw):
|
|
|
|
raise KeyError()
|
|
|
|
except KeyError:
|
|
|
|
user = None
|
|
|
|
pw = None
|
|
|
|
return user, pw
|
|
|
|
|
|
|
|
|
2017-10-18 04:51:27 +03:00
|
|
|
def singularity_registry_login(config, server):
|
|
|
|
# type: (dict, str) -> tuple
|
|
|
|
"""Get singularity registry login settings
|
|
|
|
:param dict config: configuration object
|
|
|
|
:param str server: credentials for login server to retrieve
|
|
|
|
:rtype: tuple
|
|
|
|
:return: (user, pw)
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
user = config['credentials']['singularity_registry'][
|
|
|
|
server]['username']
|
|
|
|
pw = config['credentials']['singularity_registry'][
|
|
|
|
server]['password']
|
|
|
|
if util.is_none_or_empty(user) or util.is_none_or_empty(pw):
|
|
|
|
raise KeyError()
|
|
|
|
except KeyError:
|
|
|
|
user = None
|
|
|
|
pw = None
|
|
|
|
return user, pw
|
|
|
|
|
|
|
|
|
2017-11-08 05:16:30 +03:00
|
|
|
def credentials_iterate_registry_servers(config, is_docker):
|
|
|
|
# type: (dict, bool) -> str
|
|
|
|
"""Iterate registry servers
|
2017-01-05 21:20:13 +03:00
|
|
|
:param dict config: configuration object
|
2017-11-08 05:16:30 +03:00
|
|
|
:param bool is_docker: is a docker registry
|
2017-01-05 21:20:13 +03:00
|
|
|
:rtype: str
|
2017-11-08 05:16:30 +03:00
|
|
|
:return: registry server name
|
2017-01-05 21:20:13 +03:00
|
|
|
"""
|
2017-11-08 05:16:30 +03:00
|
|
|
if is_docker:
|
|
|
|
kind = 'docker_registry'
|
|
|
|
else:
|
|
|
|
kind = 'singularity_registry'
|
2017-01-05 21:20:13 +03:00
|
|
|
try:
|
2017-11-08 05:16:30 +03:00
|
|
|
for conf in config['credentials'][kind]:
|
2017-01-05 21:20:13 +03:00
|
|
|
yield conf
|
|
|
|
except KeyError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
2017-11-08 05:16:30 +03:00
|
|
|
def credentials_registry_password_secret_id(config, link, is_docker):
|
|
|
|
# type: (dict, str, bool) -> str
|
|
|
|
"""Get registry password KeyVault Secret Id
|
2017-01-05 21:20:13 +03:00
|
|
|
:param dict config: configuration object
|
2017-11-08 05:16:30 +03:00
|
|
|
:param str link: registry link
|
|
|
|
:param bool is_docker: is docker registry
|
2017-01-05 21:20:13 +03:00
|
|
|
:rtype: str
|
|
|
|
:return: keyvault secret id
|
|
|
|
"""
|
2017-11-08 05:16:30 +03:00
|
|
|
if is_docker:
|
|
|
|
kind = 'docker_registry'
|
|
|
|
else:
|
|
|
|
kind = 'singularity_registry'
|
2017-01-05 21:20:13 +03:00
|
|
|
try:
|
2017-11-08 05:16:30 +03:00
|
|
|
secid = config['credentials'][kind][link][
|
|
|
|
'password_keyvault_secret_id']
|
2017-01-05 21:20:13 +03:00
|
|
|
if util.is_none_or_empty(secid):
|
|
|
|
raise KeyError()
|
|
|
|
except KeyError:
|
|
|
|
return None
|
|
|
|
return secid
|
|
|
|
|
|
|
|
|
2017-11-08 05:16:30 +03:00
|
|
|
def set_credentials_registry_password(config, link, is_docker, password):
|
|
|
|
# type: (dict, str, bool, str) -> None
|
|
|
|
"""Set registry password
|
2017-01-05 21:20:13 +03:00
|
|
|
:param dict config: configuration object
|
2017-11-08 05:16:30 +03:00
|
|
|
:param str link: registry link
|
|
|
|
:param bool is_docker: is docker registry
|
2017-01-05 21:20:13 +03:00
|
|
|
:param str password: password
|
|
|
|
"""
|
2017-11-08 05:16:30 +03:00
|
|
|
if is_docker:
|
|
|
|
kind = 'docker_registry'
|
|
|
|
else:
|
|
|
|
kind = 'singularity_registry'
|
|
|
|
config['credentials'][kind][link]['password'] = password
|
2017-01-05 21:20:13 +03:00
|
|
|
|
|
|
|
|
2019-01-15 20:56:03 +03:00
|
|
|
def credentials_slurm(config):
|
|
|
|
# type: (dict) -> SlurmCredentialsSettings
|
|
|
|
"""Get slurm settings
|
|
|
|
:param dict config: configuration object
|
|
|
|
:rtype: SlurmCredentialsSettings
|
|
|
|
:return: Slurm settings
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
creds = config['credentials']
|
|
|
|
except (KeyError, TypeError):
|
|
|
|
creds = {}
|
|
|
|
conf = _kv_read_checked(creds, 'slurm', default={})
|
|
|
|
return SlurmCredentialsSettings(
|
|
|
|
db_password=_kv_read_checked(conf, 'db_password'),
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2016-11-12 08:08:58 +03:00
|
|
|
# GLOBAL SETTINGS
|
|
|
|
def batch_shipyard_settings(config):
|
|
|
|
# type: (dict) -> BatchShipyardSettings
|
|
|
|
"""Get batch shipyard settings
|
|
|
|
:param dict config: configuration object
|
|
|
|
:rtype: BatchShipyardSettings
|
|
|
|
:return: batch shipyard settings
|
|
|
|
"""
|
|
|
|
conf = config['batch_shipyard']
|
|
|
|
stlink = conf['storage_account_settings']
|
|
|
|
if util.is_none_or_empty(stlink):
|
|
|
|
raise ValueError('batch_shipyard:storage_account_settings is invalid')
|
|
|
|
try:
|
|
|
|
sep = conf['storage_entity_prefix']
|
2016-11-12 08:51:11 +03:00
|
|
|
if sep is None:
|
2016-11-12 08:08:58 +03:00
|
|
|
raise KeyError()
|
|
|
|
except KeyError:
|
|
|
|
sep = 'shipyard'
|
|
|
|
return BatchShipyardSettings(
|
|
|
|
storage_account_settings=stlink,
|
|
|
|
storage_entity_prefix=sep,
|
2018-06-26 21:25:53 +03:00
|
|
|
generated_sas_expiry_days=_kv_read(conf, 'generated_sas_expiry_days'),
|
|
|
|
use_shipyard_docker_image=_kv_read(
|
|
|
|
conf, 'use_shipyard_docker_image', default=True),
|
|
|
|
store_timing_metrics=_kv_read(
|
|
|
|
conf, 'store_timing_metrics', default=False),
|
|
|
|
fallback_registry=_kv_read_checked(conf, 'fallback_registry'),
|
2018-07-29 02:09:22 +03:00
|
|
|
delay_docker_image_preload=_kv_read(
|
|
|
|
conf, 'delay_docker_image_preload', default=False),
|
2016-11-12 08:08:58 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
|
2018-07-29 02:09:22 +03:00
|
|
|
def requires_populate_global_resources_storage(config):
|
|
|
|
# type: (dict) -> bool
|
|
|
|
"""Requires populating global resources in storage
|
|
|
|
:param dict config: configuration object
|
|
|
|
:rtype: bool
|
|
|
|
:return: if populating gr is required
|
|
|
|
"""
|
|
|
|
pool = pool_settings(config)
|
|
|
|
native = is_native_docker_pool(config, vm_config=pool.vm_configuration)
|
|
|
|
if not native:
|
|
|
|
return True
|
|
|
|
bs = batch_shipyard_settings(config)
|
|
|
|
is_windows = is_windows_pool(config, vm_config=pool.vm_configuration)
|
|
|
|
return bs.delay_docker_image_preload and not is_windows
|
|
|
|
|
|
|
|
|
2016-11-12 10:19:28 +03:00
|
|
|
def set_use_shipyard_docker_image(config, flag):
|
|
|
|
# type: (dict, bool) -> None
|
|
|
|
"""Set shipyard docker image use
|
|
|
|
:param dict config: configuration object
|
|
|
|
:param bool flag: flag to set
|
|
|
|
"""
|
|
|
|
config['batch_shipyard']['use_shipyard_docker_image'] = flag
|
|
|
|
|
|
|
|
|
2016-11-11 20:19:04 +03:00
|
|
|
def batch_shipyard_encryption_enabled(config):
|
|
|
|
# type: (dict) -> bool
|
|
|
|
"""Get credential encryption enabled setting
|
|
|
|
:param dict config: configuration object
|
|
|
|
:rtype: bool
|
|
|
|
:return: if credential encryption is enabled
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
encrypt = config['batch_shipyard']['encryption']['enabled']
|
|
|
|
except KeyError:
|
|
|
|
encrypt = False
|
|
|
|
return encrypt
|
|
|
|
|
|
|
|
|
2016-11-12 02:11:13 +03:00
|
|
|
def set_batch_shipyard_encryption_enabled(config, flag):
|
|
|
|
# type: (dict, bool) -> None
|
|
|
|
"""Set credential encryption enabled setting
|
|
|
|
:param dict config: configuration object
|
|
|
|
:param bool flag: flag to set
|
|
|
|
"""
|
|
|
|
if 'encryption' not in config['batch_shipyard']:
|
|
|
|
config['batch_shipyard']['encryption'] = {}
|
|
|
|
config['batch_shipyard']['encryption']['enabled'] = flag
|
|
|
|
|
|
|
|
|
2016-11-11 20:19:04 +03:00
|
|
|
def batch_shipyard_encryption_pfx_filename(config):
|
|
|
|
# type: (dict) -> str
|
|
|
|
"""Get filename of pfx cert
|
|
|
|
:param dict config: configuration object
|
|
|
|
:rtype: str
|
|
|
|
:return: pfx filename
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
pfxfile = config['batch_shipyard']['encryption']['pfx']['filename']
|
|
|
|
except KeyError:
|
|
|
|
pfxfile = None
|
|
|
|
return pfxfile
|
|
|
|
|
|
|
|
|
|
|
|
def batch_shipyard_encryption_pfx_passphrase(config):
|
|
|
|
# type: (dict) -> str
|
|
|
|
"""Get passphrase of pfx cert
|
|
|
|
:param dict config: configuration object
|
|
|
|
:rtype: str
|
|
|
|
:return: pfx passphrase
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
passphrase = config['batch_shipyard']['encryption'][
|
|
|
|
'pfx']['passphrase']
|
|
|
|
except KeyError:
|
|
|
|
passphrase = None
|
|
|
|
return passphrase
|
|
|
|
|
|
|
|
|
|
|
|
def batch_shipyard_encryption_pfx_sha1_thumbprint(config):
|
|
|
|
# type: (dict) -> str
|
|
|
|
"""Get sha1 tp of pfx cert
|
|
|
|
:param dict config: configuration object
|
|
|
|
:rtype: str
|
|
|
|
:return: pfx sha1 thumbprint
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
tp = config['batch_shipyard']['encryption']['pfx']['sha1_thumbprint']
|
|
|
|
except KeyError:
|
|
|
|
tp = None
|
|
|
|
return tp
|
|
|
|
|
|
|
|
|
|
|
|
def set_batch_shipyard_encryption_pfx_sha1_thumbprint(config, tp):
|
|
|
|
# type: (dict, str) -> None
|
|
|
|
"""Set sha1 tp of pfx cert
|
|
|
|
:param dict config: configuration object
|
|
|
|
"""
|
|
|
|
config['batch_shipyard']['encryption']['pfx']['sha1_thumbprint'] = tp
|
|
|
|
|
|
|
|
|
2016-11-12 20:13:09 +03:00
|
|
|
def batch_shipyard_encryption_public_key_pem(config):
|
|
|
|
# type: (dict) -> str
|
|
|
|
"""Get filename of pem public key
|
|
|
|
:param dict config: configuration object
|
|
|
|
:rtype: str
|
|
|
|
:return: pem filename
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
pem = config['batch_shipyard']['encryption']['public_key_pem']
|
|
|
|
except KeyError:
|
|
|
|
pem = None
|
|
|
|
return pem
|
|
|
|
|
|
|
|
|
2018-06-25 17:49:45 +03:00
|
|
|
def docker_registries(config, images=None):
|
|
|
|
# type: (dict, List[str]) -> list
|
2017-09-30 00:59:15 +03:00
|
|
|
"""Get Docker registries specified
|
2016-11-12 08:51:11 +03:00
|
|
|
:param dict config: configuration object
|
2018-06-25 17:49:45 +03:00
|
|
|
:param list images: list of images to base return
|
2017-09-30 00:59:15 +03:00
|
|
|
:rtype: list
|
|
|
|
:return: list of batchmodels.ContainerRegistry objects
|
2016-11-12 08:51:11 +03:00
|
|
|
"""
|
2017-09-30 00:59:15 +03:00
|
|
|
servers = []
|
2018-06-25 17:49:45 +03:00
|
|
|
if images is None:
|
|
|
|
# get fallback docker registry
|
|
|
|
bs = batch_shipyard_settings(config)
|
|
|
|
if util.is_not_empty(bs.fallback_registry):
|
|
|
|
servers.append(bs.fallback_registry)
|
|
|
|
# get additional docker registries
|
|
|
|
try:
|
|
|
|
servers.extend(
|
|
|
|
config['global_resources']['additional_registries']['docker'])
|
|
|
|
except KeyError:
|
|
|
|
pass
|
|
|
|
images = global_resources_docker_images(config)
|
2017-09-30 00:59:15 +03:00
|
|
|
# parse images for servers
|
|
|
|
for image in images:
|
|
|
|
tmp = image.split('/')
|
|
|
|
if len(tmp) > 1:
|
|
|
|
if '.' in tmp[0] or ':' in tmp[0] and tmp[0] != 'localhost':
|
|
|
|
servers.append(tmp[0])
|
2018-02-15 20:12:54 +03:00
|
|
|
# create unique set
|
|
|
|
servers = set(servers)
|
2017-09-30 00:59:15 +03:00
|
|
|
# get login info for each registry
|
|
|
|
registries = []
|
|
|
|
# add docker hub if found
|
|
|
|
hubuser, hubpw = docker_registry_login(config, 'hub')
|
|
|
|
if util.is_not_empty(hubuser) or util.is_not_empty(hubpw):
|
|
|
|
registries.append(
|
|
|
|
batchmodels.ContainerRegistry(
|
|
|
|
registry_server=None,
|
|
|
|
user_name=hubuser,
|
|
|
|
password=hubpw,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
del hubuser
|
|
|
|
del hubpw
|
|
|
|
for server in servers:
|
2016-11-19 19:54:52 +03:00
|
|
|
user, pw = docker_registry_login(config, server)
|
|
|
|
if util.is_none_or_empty(user) or util.is_none_or_empty(pw):
|
2017-09-30 00:59:15 +03:00
|
|
|
# registries can be public with a specified server
|
|
|
|
continue
|
|
|
|
registries.append(
|
|
|
|
batchmodels.ContainerRegistry(
|
|
|
|
registry_server=server,
|
|
|
|
user_name=user,
|
|
|
|
password=pw,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
return registries
|
2016-11-12 08:51:11 +03:00
|
|
|
|
|
|
|
|
2018-06-25 17:49:45 +03:00
|
|
|
def singularity_registries(config, images=None):
|
|
|
|
# type: (dict, List[str]) -> list
|
2017-10-18 04:51:27 +03:00
|
|
|
"""Get Singularity registries specified
|
|
|
|
:param dict config: configuration object
|
2018-06-25 17:49:45 +03:00
|
|
|
:param list images: list of images to base return
|
2017-10-18 04:51:27 +03:00
|
|
|
:rtype: list
|
|
|
|
:return: list of batchmodels.ContainerRegistry objects
|
|
|
|
"""
|
|
|
|
servers = []
|
2018-06-25 17:49:45 +03:00
|
|
|
if images is None:
|
|
|
|
try:
|
|
|
|
servers.extend(
|
|
|
|
config['global_resources']['additional_registries'][
|
|
|
|
'singularity'])
|
|
|
|
except KeyError:
|
|
|
|
pass
|
|
|
|
images = global_resources_singularity_images(config)
|
2017-10-18 04:51:27 +03:00
|
|
|
# parse images for servers
|
|
|
|
for image in images:
|
2019-06-17 18:29:25 +03:00
|
|
|
_, _, image_name = (
|
|
|
|
image.partition('://'))
|
|
|
|
tmp = image_name.split('/')
|
2017-10-18 04:51:27 +03:00
|
|
|
if len(tmp) > 1:
|
|
|
|
if '.' in tmp[0] or ':' in tmp[0] and tmp[0] != 'localhost':
|
|
|
|
servers.append(tmp[0])
|
2019-06-13 20:22:18 +03:00
|
|
|
# create unique set
|
|
|
|
servers = set(servers)
|
2017-10-18 04:51:27 +03:00
|
|
|
# get login info for each registry
|
|
|
|
registries = []
|
|
|
|
# add docker hub if found and no servers are specified
|
|
|
|
if len(servers) == 0:
|
|
|
|
hubuser, hubpw = docker_registry_login(config, 'hub')
|
|
|
|
if util.is_not_empty(hubuser) or util.is_not_empty(hubpw):
|
|
|
|
registries.append(
|
|
|
|
batchmodels.ContainerRegistry(
|
|
|
|
registry_server=None,
|
|
|
|
user_name=hubuser,
|
|
|
|
password=hubpw,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
del hubuser
|
|
|
|
del hubpw
|
|
|
|
for server in servers:
|
|
|
|
user, pw = singularity_registry_login(config, server)
|
|
|
|
if util.is_none_or_empty(user) or util.is_none_or_empty(pw):
|
|
|
|
# registries can be public with a specified server
|
|
|
|
continue
|
|
|
|
registries.append(
|
|
|
|
batchmodels.ContainerRegistry(
|
|
|
|
registry_server=server,
|
|
|
|
user_name=user,
|
|
|
|
password=pw,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
return registries
|
|
|
|
|
|
|
|
|
2016-11-12 08:51:11 +03:00
|
|
|
def data_replication_settings(config):
|
|
|
|
# type: (dict) -> DataReplicationSettings
|
|
|
|
"""Get data replication settings
|
|
|
|
:param dict config: configuration object
|
|
|
|
:rtype: DataReplicationSettings
|
|
|
|
:return: data replication settings
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
conf = config['data_replication']
|
|
|
|
except KeyError:
|
2016-11-12 10:19:28 +03:00
|
|
|
conf = {}
|
2016-11-12 08:51:11 +03:00
|
|
|
try:
|
2017-07-14 19:58:16 +03:00
|
|
|
concurrent_source_downloads = conf['concurrent_source_downloads']
|
2018-01-24 01:23:39 +03:00
|
|
|
if concurrent_source_downloads is None:
|
|
|
|
raise KeyError()
|
2016-11-12 08:51:11 +03:00
|
|
|
except KeyError:
|
2017-07-14 19:58:16 +03:00
|
|
|
concurrent_source_downloads = 10
|
2016-11-12 10:19:28 +03:00
|
|
|
return DataReplicationSettings(
|
2017-07-14 19:58:16 +03:00
|
|
|
concurrent_source_downloads=concurrent_source_downloads,
|
2016-11-12 10:19:28 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
|
2016-11-12 08:08:58 +03:00
|
|
|
def global_resources_docker_images(config):
|
|
|
|
# type: (dict) -> list
|
|
|
|
"""Get list of docker images
|
2016-11-11 20:19:04 +03:00
|
|
|
:param dict config: configuration object
|
2016-11-12 08:08:58 +03:00
|
|
|
:rtype: list
|
|
|
|
:return: docker images
|
2016-11-11 20:19:04 +03:00
|
|
|
"""
|
|
|
|
try:
|
2016-11-12 08:08:58 +03:00
|
|
|
images = config['global_resources']['docker_images']
|
|
|
|
if util.is_none_or_empty(images):
|
2016-11-11 20:19:04 +03:00
|
|
|
raise KeyError()
|
|
|
|
except KeyError:
|
2016-11-12 08:08:58 +03:00
|
|
|
images = []
|
|
|
|
return images
|
|
|
|
|
|
|
|
|
2017-10-18 04:51:27 +03:00
|
|
|
def global_resources_singularity_images(config):
|
|
|
|
# type: (dict) -> list
|
2019-06-05 21:14:37 +03:00
|
|
|
"""Get list of all singularity images (signed and unsigned)
|
2017-10-18 04:51:27 +03:00
|
|
|
:param dict config: configuration object
|
|
|
|
:rtype: list
|
2019-06-05 21:14:37 +03:00
|
|
|
:return: all singularity images (signed and unsigned)
|
|
|
|
"""
|
2019-11-06 07:14:40 +03:00
|
|
|
singularity_unsigned_images_settings = (
|
|
|
|
global_resources_singularity_images_settings(config, False)
|
|
|
|
)
|
|
|
|
singularity_unsigned_images = [
|
|
|
|
settings.image for settings in singularity_unsigned_images_settings
|
|
|
|
]
|
2019-06-05 21:14:37 +03:00
|
|
|
singularity_signed_images_settings = (
|
2019-11-06 07:14:40 +03:00
|
|
|
global_resources_singularity_images_settings(config, True)
|
|
|
|
)
|
|
|
|
singularity_signed_images = [
|
|
|
|
settings.image for settings in singularity_signed_images_settings
|
|
|
|
]
|
2019-06-05 21:14:37 +03:00
|
|
|
images = singularity_unsigned_images + singularity_signed_images
|
|
|
|
singularity_signed_and_unsigned_images = (
|
|
|
|
set(singularity_unsigned_images).intersection(
|
|
|
|
singularity_signed_images))
|
2019-11-06 07:14:40 +03:00
|
|
|
if len(singularity_signed_and_unsigned_images) > 0:
|
2019-06-05 21:14:37 +03:00
|
|
|
raise ValueError(
|
2019-11-06 07:14:40 +03:00
|
|
|
'image(s) "{}" should not be both signed and unsigned'.format(
|
|
|
|
'", "'.join(singularity_signed_and_unsigned_images)))
|
2017-10-18 04:51:27 +03:00
|
|
|
return images
|
|
|
|
|
|
|
|
|
2019-11-06 07:14:40 +03:00
|
|
|
def global_resources_singularity_images_settings(config, signed):
|
|
|
|
# type: (dict, bool) -> list
|
|
|
|
"""Get list of singularity images settings
|
2019-06-05 21:14:37 +03:00
|
|
|
:param dict config: configuration object
|
2019-11-06 07:14:40 +03:00
|
|
|
:param bool signed: get signed images if True, else unsigned images
|
2019-06-05 21:14:37 +03:00
|
|
|
:rtype: list
|
2019-11-06 07:14:40 +03:00
|
|
|
:return: singularity images settings
|
2019-06-05 21:14:37 +03:00
|
|
|
"""
|
|
|
|
global_resources = _kv_read_checked(config, 'global_resources', default={})
|
2019-11-06 07:14:40 +03:00
|
|
|
images = _kv_read_checked(
|
2019-06-05 21:14:37 +03:00
|
|
|
global_resources, 'singularity_images', default={})
|
2019-11-06 07:14:40 +03:00
|
|
|
singularity_images = _kv_read_checked(
|
|
|
|
images, 'signed' if signed else 'unsigned', default=[])
|
|
|
|
singularity_images_settings = []
|
|
|
|
for settings in singularity_images:
|
2019-06-05 21:14:37 +03:00
|
|
|
image = _kv_read_checked(settings, 'image')
|
2019-11-06 07:14:40 +03:00
|
|
|
if util.is_none_or_empty(image):
|
|
|
|
raise ValueError('singularity image is invalid')
|
|
|
|
key_fingerprint = None
|
2019-06-05 21:14:37 +03:00
|
|
|
key_file_path = None
|
2019-11-06 07:14:40 +03:00
|
|
|
if signed:
|
|
|
|
key = _kv_read_checked(settings, 'signing_key', default={})
|
|
|
|
key_fingerprint = _kv_read_checked(key, 'fingerprint')
|
|
|
|
if util.is_none_or_empty(key_fingerprint):
|
|
|
|
raise ValueError(
|
|
|
|
'key_fingerprint for singularity signed image "{}" is '
|
|
|
|
'invalid'.format(image))
|
|
|
|
key_file = _kv_read_checked(key, 'file')
|
|
|
|
if key_file is not None:
|
|
|
|
key_file_path = pathlib.Path(key_file)
|
|
|
|
if not key_file_path.is_file():
|
|
|
|
raise ValueError(
|
|
|
|
'invalid key file for image "{}"'.format(image))
|
|
|
|
enc = _kv_read_checked(settings, 'encryption', default={})
|
|
|
|
enc_cert = _kv_read_checked(enc, 'certificate', default={})
|
|
|
|
singularity_images_settings.append(
|
|
|
|
SingularityImageSettings(
|
2019-06-05 21:14:37 +03:00
|
|
|
image=image,
|
|
|
|
key_fingerprint=key_fingerprint,
|
|
|
|
key_file=key_file_path,
|
2019-11-06 07:14:40 +03:00
|
|
|
encryption_certificate_sha1_thumbprint=_kv_read_checked(
|
|
|
|
enc_cert, 'sha1_thumbprint'),
|
2019-06-05 21:14:37 +03:00
|
|
|
)
|
|
|
|
)
|
2019-11-06 07:14:40 +03:00
|
|
|
return singularity_images_settings
|
2019-06-05 21:14:37 +03:00
|
|
|
|
|
|
|
|
|
|
|
def singularity_signed_images_key_fingerprint_dict(config):
|
|
|
|
# type: (dict) -> dict
|
|
|
|
"""Get dict of singularity signed images to key fingerprint
|
|
|
|
:param dict config: configuration object
|
|
|
|
:rtype: dict
|
|
|
|
:return: singularity signed images to key fingerprint
|
|
|
|
"""
|
2019-11-06 07:14:40 +03:00
|
|
|
images_settings = global_resources_singularity_images_settings(
|
|
|
|
config, True)
|
|
|
|
return dict(
|
|
|
|
(settings.image, settings.key_fingerprint)
|
|
|
|
for settings in images_settings
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def singularity_image_to_encryption_cert_map(config):
|
|
|
|
# type: (dict) -> dict
|
|
|
|
"""Get mapping of image to encryptiong cert thumbprint
|
|
|
|
:param dict config: configuration object
|
|
|
|
:rtype: dict
|
|
|
|
:return: singularity image name to cert thumbprint
|
|
|
|
"""
|
2019-06-05 21:14:37 +03:00
|
|
|
images_settings = (
|
2019-11-06 07:14:40 +03:00
|
|
|
global_resources_singularity_images_settings(config, False) +
|
|
|
|
global_resources_singularity_images_settings(config, True)
|
|
|
|
)
|
|
|
|
image_map = {}
|
|
|
|
for image in images_settings:
|
|
|
|
if util.is_not_empty(image.encryption_certificate_sha1_thumbprint):
|
|
|
|
image_map[
|
|
|
|
image.image] = image.encryption_certificate_sha1_thumbprint
|
|
|
|
return image_map
|
2019-06-05 21:14:37 +03:00
|
|
|
|
|
|
|
|
2016-11-12 10:19:28 +03:00
|
|
|
def global_resources_files(config):
|
|
|
|
# type: (dict) -> list
|
|
|
|
"""Get list of global files ingress
|
|
|
|
:param dict config: configuration object
|
|
|
|
:rtype: list
|
|
|
|
:return: global files ingress list
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
files = config['global_resources']['files']
|
|
|
|
if util.is_none_or_empty(files):
|
|
|
|
raise KeyError()
|
|
|
|
except KeyError:
|
|
|
|
files = []
|
|
|
|
return files
|
|
|
|
|
|
|
|
|
|
|
|
def is_direct_transfer(filespair):
|
|
|
|
# type: (dict) -> bool
|
|
|
|
"""Determine if src/dst pair for files ingress is a direct compute node
|
|
|
|
transfer
|
|
|
|
:param dict filespair: src/dst pair
|
|
|
|
:rtype: bool
|
|
|
|
:return: if ingress is direct
|
|
|
|
"""
|
|
|
|
return 'storage_account_settings' not in filespair['destination']
|
|
|
|
|
|
|
|
|
2016-11-12 23:35:56 +03:00
|
|
|
def files_source_settings(conf):
|
|
|
|
# type: (dict) -> SourceSettings
|
|
|
|
"""Get global resources files source
|
|
|
|
:param dict conf: configuration block
|
|
|
|
:rtype: SourceSettings
|
|
|
|
:return: source settings
|
|
|
|
"""
|
2017-10-03 03:29:55 +03:00
|
|
|
source = _kv_read_checked(conf, 'source', default={})
|
|
|
|
path = _kv_read_checked(source, 'path')
|
2016-11-12 23:35:56 +03:00
|
|
|
if util.is_none_or_empty(path):
|
|
|
|
raise ValueError('global resource files path is invalid')
|
2017-10-03 03:29:55 +03:00
|
|
|
return SourceSettings(
|
|
|
|
path=path,
|
|
|
|
include=_kv_read_checked(source, 'include'),
|
|
|
|
exclude=_kv_read_checked(source, 'exclude'),
|
|
|
|
)
|
2016-11-12 23:35:56 +03:00
|
|
|
|
|
|
|
|
|
|
|
def files_destination_settings(fdict):
|
|
|
|
# type: (dict) -> DestinationSettings
|
|
|
|
"""Get global resources files destination
|
|
|
|
:param dict fdict: configuration block
|
|
|
|
:rtype: DestinationSettings
|
|
|
|
:return: destination settings
|
|
|
|
"""
|
|
|
|
conf = fdict['destination']
|
|
|
|
try:
|
|
|
|
shared = conf['shared_data_volume']
|
|
|
|
except KeyError:
|
|
|
|
shared = None
|
|
|
|
try:
|
|
|
|
storage = conf['storage_account_settings']
|
|
|
|
except KeyError:
|
|
|
|
storage = None
|
|
|
|
try:
|
|
|
|
rdp = conf['relative_destination_path']
|
|
|
|
if rdp is not None:
|
|
|
|
rdp = rdp.lstrip('/').rstrip('/')
|
|
|
|
if len(rdp) == 0:
|
|
|
|
rdp = None
|
|
|
|
except KeyError:
|
|
|
|
rdp = None
|
2017-10-03 03:29:55 +03:00
|
|
|
data_transfer = _kv_read_checked(conf, 'data_transfer', default={})
|
|
|
|
method = _kv_read_checked(data_transfer, 'method')
|
|
|
|
if util.is_none_or_empty(method):
|
2016-11-12 23:35:56 +03:00
|
|
|
if storage is None:
|
|
|
|
raise RuntimeError(
|
|
|
|
'no transfer method specified for data transfer of '
|
|
|
|
'source: {} to {} rdp={}'.format(
|
|
|
|
files_source_settings(fdict).path, shared, rdp))
|
|
|
|
else:
|
|
|
|
method = None
|
2017-10-03 03:29:55 +03:00
|
|
|
else:
|
|
|
|
method = method.lower()
|
|
|
|
ssh_eo = _kv_read_checked(
|
|
|
|
data_transfer, 'scp_ssh_extra_options', default='')
|
|
|
|
rsync_eo = _kv_read_checked(
|
|
|
|
data_transfer, 'rsync_extra_options', default='')
|
2016-11-12 23:35:56 +03:00
|
|
|
try:
|
2017-10-03 03:29:55 +03:00
|
|
|
mpt = data_transfer['max_parallel_transfers_per_node']
|
2016-11-12 23:35:56 +03:00
|
|
|
if mpt is not None and mpt <= 0:
|
|
|
|
raise KeyError()
|
|
|
|
except KeyError:
|
|
|
|
mpt = None
|
|
|
|
# ensure valid mpt number
|
|
|
|
if mpt is None:
|
|
|
|
mpt = 1
|
|
|
|
try:
|
2017-10-03 03:29:55 +03:00
|
|
|
split = data_transfer['split_files_megabytes']
|
2016-11-12 23:35:56 +03:00
|
|
|
if split is not None and split <= 0:
|
|
|
|
raise KeyError()
|
|
|
|
# convert to bytes
|
|
|
|
if split is not None:
|
|
|
|
split <<= 20
|
|
|
|
except KeyError:
|
|
|
|
split = None
|
2017-10-03 03:29:55 +03:00
|
|
|
ssh_private_key = _kv_read_checked(data_transfer, 'ssh_private_key')
|
2017-04-13 23:13:06 +03:00
|
|
|
if util.is_not_empty(ssh_private_key):
|
|
|
|
ssh_private_key = pathlib.Path(ssh_private_key)
|
2016-11-12 23:35:56 +03:00
|
|
|
return DestinationSettings(
|
|
|
|
storage_account_settings=storage,
|
|
|
|
shared_data_volume=shared,
|
|
|
|
relative_destination_path=rdp,
|
|
|
|
data_transfer=DataTransferSettings(
|
2017-10-03 03:29:55 +03:00
|
|
|
is_file_share=data_is_file_share(data_transfer),
|
|
|
|
remote_path=data_remote_path(data_transfer),
|
|
|
|
blobxfer_extra_options=data_blobxfer_extra_options(data_transfer),
|
2016-11-12 23:35:56 +03:00
|
|
|
method=method,
|
|
|
|
ssh_private_key=ssh_private_key,
|
|
|
|
scp_ssh_extra_options=ssh_eo,
|
|
|
|
rsync_extra_options=rsync_eo,
|
|
|
|
split_files_megabytes=split,
|
|
|
|
max_parallel_transfers_per_node=mpt,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2017-10-17 22:57:02 +03:00
|
|
|
def _global_resources_volumes(config):
|
|
|
|
# type: (dict) -> dict
|
|
|
|
"""Get global resources volumes dictionary
|
|
|
|
:param dict config: configuration object
|
|
|
|
:rtype: dict
|
|
|
|
:return: volumes
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
vols = config['global_resources']['volumes']
|
|
|
|
if util.is_none_or_empty(vols):
|
|
|
|
raise KeyError()
|
|
|
|
except KeyError:
|
2018-01-24 01:23:39 +03:00
|
|
|
vols = {}
|
2017-10-17 22:57:02 +03:00
|
|
|
return vols
|
|
|
|
|
|
|
|
|
2016-11-13 09:13:55 +03:00
|
|
|
def global_resources_data_volumes(config):
|
|
|
|
# type: (dict) -> dict
|
|
|
|
"""Get data volumes dictionary
|
|
|
|
:param dict config: configuration object
|
|
|
|
:rtype: dict
|
|
|
|
:return: data volumes
|
|
|
|
"""
|
|
|
|
try:
|
2017-10-17 22:57:02 +03:00
|
|
|
dv = _global_resources_volumes(config)['data_volumes']
|
2016-11-13 09:13:55 +03:00
|
|
|
if util.is_none_or_empty(dv):
|
|
|
|
raise KeyError()
|
|
|
|
except KeyError:
|
|
|
|
dv = {}
|
|
|
|
return dv
|
|
|
|
|
|
|
|
|
2016-11-12 08:08:58 +03:00
|
|
|
def global_resources_shared_data_volumes(config):
|
|
|
|
# type: (dict) -> dict
|
|
|
|
"""Get shared data volumes dictionary
|
|
|
|
:param dict config: configuration object
|
|
|
|
:rtype: dict
|
|
|
|
:return: shared data volumes
|
|
|
|
"""
|
|
|
|
try:
|
2017-10-17 22:57:02 +03:00
|
|
|
sdv = _global_resources_volumes(config)['shared_data_volumes']
|
2016-11-12 08:08:58 +03:00
|
|
|
if util.is_none_or_empty(sdv):
|
|
|
|
raise KeyError()
|
|
|
|
except KeyError:
|
2016-11-12 10:19:28 +03:00
|
|
|
sdv = {}
|
2016-11-12 08:08:58 +03:00
|
|
|
return sdv
|
|
|
|
|
|
|
|
|
|
|
|
def shared_data_volume_driver(sdv, sdvkey):
|
|
|
|
# type: (dict, str) -> str
|
|
|
|
"""Get shared data volume driver
|
|
|
|
:param dict sdv: shared_data_volume configuration object
|
|
|
|
:param str sdvkey: key to sdv
|
|
|
|
:rtype: str
|
|
|
|
:return: volume driver
|
|
|
|
"""
|
|
|
|
return sdv[sdvkey]['volume_driver']
|
|
|
|
|
|
|
|
|
|
|
|
def shared_data_volume_container_path(sdv, sdvkey):
|
|
|
|
# type: (dict, str) -> str
|
|
|
|
"""Get shared data volume container path
|
|
|
|
:param dict sdv: shared_data_volume configuration object
|
|
|
|
:param str sdvkey: key to sdv
|
|
|
|
:rtype: str
|
|
|
|
:return: container path
|
|
|
|
"""
|
|
|
|
return sdv[sdvkey]['container_path']
|
|
|
|
|
|
|
|
|
2017-03-09 07:18:58 +03:00
|
|
|
def shared_data_volume_mount_options(sdv, sdvkey):
|
|
|
|
# type: (dict, str) -> str
|
|
|
|
"""Get shared data volume mount options
|
|
|
|
:param dict sdv: shared_data_volume configuration object
|
|
|
|
:param str sdvkey: key to sdv
|
|
|
|
:rtype: str
|
|
|
|
:return: shared data volume mount options
|
|
|
|
"""
|
|
|
|
try:
|
2018-02-12 21:34:33 +03:00
|
|
|
if is_shared_data_volume_custom_linux_mount(sdv, sdvkey):
|
|
|
|
mo = sdv[sdvkey]['fstab_entry']['fs_mntops']
|
|
|
|
else:
|
|
|
|
mo = sdv[sdvkey]['mount_options']
|
2017-03-09 07:18:58 +03:00
|
|
|
except KeyError:
|
|
|
|
mo = None
|
|
|
|
return mo
|
|
|
|
|
|
|
|
|
2018-01-24 01:23:39 +03:00
|
|
|
def azure_storage_account_settings(sdv, sdvkey):
|
2016-11-12 08:08:58 +03:00
|
|
|
# type: (dict, str) -> str
|
2018-01-24 01:23:39 +03:00
|
|
|
"""Get azure storage account link
|
2016-11-12 08:08:58 +03:00
|
|
|
:param dict sdv: shared_data_volume configuration object
|
|
|
|
:param str sdvkey: key to sdv
|
|
|
|
:rtype: str
|
|
|
|
:return: storage account link
|
|
|
|
"""
|
|
|
|
return sdv[sdvkey]['storage_account_settings']
|
|
|
|
|
|
|
|
|
|
|
|
def azure_file_share_name(sdv, sdvkey):
|
|
|
|
# type: (dict, str) -> str
|
|
|
|
"""Get azure file share name
|
|
|
|
:param dict sdv: shared_data_volume configuration object
|
|
|
|
:param str sdvkey: key to sdv
|
|
|
|
:rtype: str
|
|
|
|
:return: azure file share name
|
|
|
|
"""
|
|
|
|
return sdv[sdvkey]['azure_file_share_name']
|
|
|
|
|
|
|
|
|
2018-01-24 01:23:39 +03:00
|
|
|
def azure_blob_container_name(sdv, sdvkey):
|
|
|
|
# type: (dict, str) -> str
|
|
|
|
"""Get azure blob container name
|
|
|
|
:param dict sdv: shared_data_volume configuration object
|
|
|
|
:param str sdvkey: key to sdv
|
|
|
|
:rtype: str
|
|
|
|
:return: azure blob container name
|
|
|
|
"""
|
|
|
|
return sdv[sdvkey]['azure_blob_container_name']
|
|
|
|
|
|
|
|
|
2017-11-05 21:38:22 +03:00
|
|
|
def azure_file_host_mount_path(storage_account_name, share_name, is_windows):
|
|
|
|
# type: (str, str, bool) -> str
|
2017-10-05 03:59:30 +03:00
|
|
|
"""Get azure file share host mount path
|
|
|
|
:param str storage_account_name: storage account name
|
|
|
|
:param str share_name: file share name
|
2017-11-05 21:38:22 +03:00
|
|
|
:param bool is_windows: is windows
|
2017-10-05 03:59:30 +03:00
|
|
|
:rtype: str
|
|
|
|
:return: host mount path for azure file share
|
|
|
|
"""
|
2017-11-05 21:38:22 +03:00
|
|
|
return '{root}{sep}azfile-{sa}-{share}'.format(
|
|
|
|
root=get_host_mounts_path(is_windows),
|
|
|
|
sep='\\' if is_windows else '/',
|
2017-10-05 03:59:30 +03:00
|
|
|
sa=storage_account_name,
|
|
|
|
share=share_name)
|
|
|
|
|
|
|
|
|
2018-01-24 01:23:39 +03:00
|
|
|
def azure_blob_host_mount_path(storage_account_name, container_name):
|
|
|
|
# type: (str, str) -> str
|
|
|
|
"""Get azure blob container host mount path
|
|
|
|
:param str storage_account_name: storage account name
|
|
|
|
:param str container_name: container name
|
|
|
|
:rtype: str
|
|
|
|
:return: host mount path for azure file share
|
|
|
|
"""
|
|
|
|
return '{root}/azblob-{sa}-{cont}'.format(
|
|
|
|
root=get_host_mounts_path(False),
|
|
|
|
sa=storage_account_name,
|
|
|
|
cont=container_name)
|
|
|
|
|
|
|
|
|
2016-11-12 08:08:58 +03:00
|
|
|
def gluster_volume_type(sdv, sdvkey):
|
|
|
|
# type: (dict, str) -> str
|
|
|
|
"""Get gluster volume type
|
|
|
|
:param dict sdv: shared_data_volume configuration object
|
|
|
|
:param str sdvkey: key to sdv
|
|
|
|
:rtype: str
|
|
|
|
:return: gluster volume type
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
vt = sdv[sdvkey]['volume_type']
|
|
|
|
if util.is_none_or_empty(vt):
|
|
|
|
raise KeyError()
|
|
|
|
except KeyError:
|
|
|
|
vt = 'replica'
|
|
|
|
return vt
|
|
|
|
|
|
|
|
|
|
|
|
def gluster_volume_options(sdv, sdvkey):
|
|
|
|
# type: (dict, str) -> str
|
|
|
|
"""Get gluster volume options
|
|
|
|
:param dict sdv: shared_data_volume configuration object
|
|
|
|
:param str sdvkey: key to sdv
|
|
|
|
:rtype: str
|
|
|
|
:return: gluster volume options
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
vo = sdv[sdvkey]['volume_options']
|
|
|
|
if util.is_none_or_empty(vo):
|
|
|
|
raise KeyError()
|
|
|
|
except KeyError:
|
|
|
|
vo = None
|
|
|
|
return vo
|
|
|
|
|
|
|
|
|
2018-02-12 21:34:33 +03:00
|
|
|
def custom_linux_mount_fstab_options(sdv, sdvkey):
|
|
|
|
# type: (dict, str) -> str
|
|
|
|
"""Get custom mount fstab options
|
|
|
|
:param dict sdv: shared_data_volume configuration object
|
|
|
|
:param str sdvkey: key to sdv
|
|
|
|
:rtype: str
|
|
|
|
:return: custom mount fstab options
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
fstab = sdv[sdvkey]['fstab_entry']
|
|
|
|
if util.is_none_or_empty(fstab):
|
|
|
|
raise KeyError()
|
|
|
|
fs_spec = _kv_read_checked(fstab, 'fs_spec')
|
|
|
|
if util.is_none_or_empty(fs_spec):
|
|
|
|
raise ValueError(
|
|
|
|
('fs_spec for fstab_entry of custom mount {} is '
|
|
|
|
'invalid').format(sdvkey))
|
|
|
|
fs_vfstype = _kv_read_checked(fstab, 'fs_vfstype')
|
|
|
|
if util.is_none_or_empty(fs_vfstype):
|
|
|
|
raise ValueError(
|
|
|
|
('fs_vfstype for fstab_entry of custom mount {} is '
|
|
|
|
'invalid').format(sdvkey))
|
|
|
|
fs_mntops = _kv_read_checked(fstab, 'fs_mntops', default='defaults')
|
|
|
|
fs_freq = _kv_read(fstab, 'fs_freq', default=0)
|
|
|
|
fs_passno = _kv_read(fstab, 'fs_passno', default=0)
|
|
|
|
except KeyError:
|
|
|
|
return None
|
|
|
|
return CustomMountFstabSettings(
|
|
|
|
fs_spec=fs_spec,
|
|
|
|
fs_vfstype=fs_vfstype,
|
|
|
|
fs_mntops=fs_mntops,
|
|
|
|
fs_freq=fs_freq,
|
|
|
|
fs_passno=fs_passno,
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2016-11-12 08:08:58 +03:00
|
|
|
def is_shared_data_volume_azure_file(sdv, sdvkey):
|
|
|
|
# type: (dict, str) -> bool
|
|
|
|
"""Determine if shared data volume is an azure file share
|
|
|
|
:param dict sdv: shared_data_volume configuration object
|
|
|
|
:param str sdvkey: key to sdv
|
|
|
|
:rtype: bool
|
|
|
|
:return: if shared data volume is azure file
|
|
|
|
"""
|
|
|
|
return shared_data_volume_driver(sdv, sdvkey).lower() == 'azurefile'
|
|
|
|
|
|
|
|
|
2018-01-24 01:23:39 +03:00
|
|
|
def is_shared_data_volume_azure_blob(sdv, sdvkey):
|
|
|
|
# type: (dict, str) -> bool
|
|
|
|
"""Determine if shared data volume is an azure blob container via fuse
|
|
|
|
:param dict sdv: shared_data_volume configuration object
|
|
|
|
:param str sdvkey: key to sdv
|
|
|
|
:rtype: bool
|
|
|
|
:return: if shared data volume is azure blob
|
|
|
|
"""
|
|
|
|
return shared_data_volume_driver(sdv, sdvkey).lower() == 'azureblob'
|
|
|
|
|
|
|
|
|
2017-03-09 07:18:58 +03:00
|
|
|
def is_shared_data_volume_gluster_on_compute(sdv, sdvkey):
|
|
|
|
# type: (dict, str) -> bool
|
|
|
|
"""Determine if shared data volume is a glusterfs share on compute
|
|
|
|
:param dict sdv: shared_data_volume configuration object
|
|
|
|
:param str sdvkey: key to sdv
|
|
|
|
:rtype: bool
|
|
|
|
:return: if shared data volume is glusterfs on compute
|
|
|
|
"""
|
|
|
|
return shared_data_volume_driver(
|
|
|
|
sdv, sdvkey).lower() == 'glusterfs_on_compute'
|
|
|
|
|
|
|
|
|
|
|
|
def is_shared_data_volume_storage_cluster(sdv, sdvkey):
|
2016-11-12 08:08:58 +03:00
|
|
|
# type: (dict, str) -> bool
|
2017-03-09 07:18:58 +03:00
|
|
|
"""Determine if shared data volume is a storage cluster
|
2016-11-12 08:08:58 +03:00
|
|
|
:param dict sdv: shared_data_volume configuration object
|
|
|
|
:param str sdvkey: key to sdv
|
|
|
|
:rtype: bool
|
2017-03-09 07:18:58 +03:00
|
|
|
:return: if shared data volume is storage_cluster
|
2016-11-12 08:08:58 +03:00
|
|
|
"""
|
2017-03-09 07:18:58 +03:00
|
|
|
return shared_data_volume_driver(sdv, sdvkey).lower() == 'storage_cluster'
|
2016-11-12 23:35:56 +03:00
|
|
|
|
|
|
|
|
2018-02-12 21:34:33 +03:00
|
|
|
def is_shared_data_volume_custom_linux_mount(sdv, sdvkey):
|
|
|
|
# type: (dict, str) -> bool
|
|
|
|
"""Determine if shared data volume is a custom linux mount
|
|
|
|
:param dict sdv: shared_data_volume configuration object
|
|
|
|
:param str sdvkey: key to sdv
|
|
|
|
:rtype: bool
|
|
|
|
:return: if shared data volume is a custom linux mount
|
|
|
|
"""
|
|
|
|
return (
|
|
|
|
shared_data_volume_driver(sdv, sdvkey).lower() == 'custom_linux_mount'
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2016-11-13 09:13:55 +03:00
|
|
|
# INPUT AND OUTPUT DATA SETTINGS
|
2016-11-12 23:35:56 +03:00
|
|
|
def input_data(conf):
|
|
|
|
# type: (dict) -> str
|
|
|
|
"""Retrieve input data config block
|
|
|
|
:param dict conf: configuration object
|
|
|
|
:rtype: str
|
|
|
|
:return: input data config block
|
|
|
|
"""
|
2016-12-01 01:35:44 +03:00
|
|
|
try:
|
|
|
|
id = conf['input_data']
|
|
|
|
if util.is_none_or_empty(id):
|
|
|
|
raise KeyError()
|
|
|
|
except KeyError:
|
|
|
|
id = None
|
|
|
|
return id
|
2016-11-12 23:35:56 +03:00
|
|
|
|
|
|
|
|
|
|
|
def output_data(conf):
|
|
|
|
# type: (dict) -> str
|
|
|
|
"""Retrieve output data config block
|
|
|
|
:param dict conf: configuration object
|
|
|
|
:rtype: str
|
|
|
|
:return: output data config block
|
|
|
|
"""
|
2016-12-01 01:35:44 +03:00
|
|
|
try:
|
|
|
|
od = conf['output_data']
|
|
|
|
if util.is_none_or_empty(od):
|
|
|
|
raise KeyError()
|
|
|
|
except KeyError:
|
|
|
|
od = None
|
|
|
|
return od
|
2016-11-12 23:35:56 +03:00
|
|
|
|
|
|
|
|
|
|
|
def data_storage_account_settings(conf):
|
|
|
|
# type: (dict) -> str
|
|
|
|
"""Retrieve input data storage account settings link
|
|
|
|
:param dict conf: configuration object
|
|
|
|
:rtype: str
|
|
|
|
:return: storage account link
|
|
|
|
"""
|
|
|
|
return conf['storage_account_settings']
|
|
|
|
|
|
|
|
|
2017-10-03 03:29:55 +03:00
|
|
|
def data_remote_path(conf):
|
2016-11-12 23:35:56 +03:00
|
|
|
# type: (dict) -> str
|
2017-10-03 03:29:55 +03:00
|
|
|
"""Retrieve remote path on Azure Storage for data transfer
|
2016-11-12 23:35:56 +03:00
|
|
|
:param dict conf: configuration object
|
|
|
|
:rtype: str
|
2017-10-03 03:29:55 +03:00
|
|
|
:return: remote path
|
2016-11-12 23:35:56 +03:00
|
|
|
"""
|
2017-10-03 03:29:55 +03:00
|
|
|
return _kv_read_checked(conf, 'remote_path')
|
2016-11-12 23:35:56 +03:00
|
|
|
|
|
|
|
|
2017-10-03 03:29:55 +03:00
|
|
|
def data_container_from_remote_path(conf, rp=None):
|
|
|
|
# type: (dict, str) -> str
|
|
|
|
"""Get Container or File share name from remote path
|
2016-11-12 23:35:56 +03:00
|
|
|
:param dict conf: configuration object
|
2017-10-03 03:29:55 +03:00
|
|
|
:param str rp: remote path
|
2016-11-12 23:35:56 +03:00
|
|
|
:rtype: str
|
2017-10-03 03:29:55 +03:00
|
|
|
:return: container/fshare name
|
|
|
|
"""
|
|
|
|
if rp is None:
|
|
|
|
rp = data_remote_path(conf)
|
|
|
|
if util.is_none_or_empty(rp):
|
|
|
|
raise ValueError(
|
2017-10-04 18:59:03 +03:00
|
|
|
'cannot derive container name from invalid remote_path')
|
2017-10-03 03:29:55 +03:00
|
|
|
return rp.split('/')[0]
|
|
|
|
|
|
|
|
|
|
|
|
def data_local_path(conf, on_task, task_wd=True):
|
|
|
|
# type: (dict, bool) -> str
|
|
|
|
"""Retrieve local path for data transfer
|
|
|
|
:param dict conf: configuration object
|
|
|
|
:param bool on_task: if input data is on the task spec
|
|
|
|
:param bool task_wd: if path is not specified use task working dir, else
|
|
|
|
use task dir
|
|
|
|
:rtype: str
|
|
|
|
:return: local path
|
2016-11-12 23:35:56 +03:00
|
|
|
"""
|
|
|
|
try:
|
2017-10-03 03:29:55 +03:00
|
|
|
dst = conf['local_path']
|
|
|
|
if util.is_none_or_empty(dst):
|
2016-11-12 23:35:56 +03:00
|
|
|
raise KeyError()
|
|
|
|
except KeyError:
|
2017-10-03 03:29:55 +03:00
|
|
|
if on_task:
|
|
|
|
if task_wd:
|
|
|
|
dst = '$AZ_BATCH_TASK_WORKING_DIR'
|
|
|
|
else:
|
|
|
|
dst = '$AZ_BATCH_TASK_DIR'
|
|
|
|
else:
|
2019-09-05 22:29:33 +03:00
|
|
|
raise ValueError(
|
|
|
|
'local_path not specified for input_data or output_data '
|
|
|
|
'for storage_account_settings {}'.format(
|
|
|
|
conf['storage_account_settings']))
|
2017-10-03 03:29:55 +03:00
|
|
|
return dst
|
|
|
|
|
|
|
|
|
|
|
|
def data_is_file_share(conf):
|
|
|
|
# type: (dict) -> bool
|
|
|
|
"""Retrieve if data transfer originates/destined for file share
|
|
|
|
:param dict conf: configuration object
|
|
|
|
:rtype: bool
|
|
|
|
:return: is Azure file share
|
|
|
|
"""
|
|
|
|
return _kv_read(conf, 'is_file_share', default=False)
|
2016-11-12 23:35:56 +03:00
|
|
|
|
|
|
|
|
|
|
|
def data_blobxfer_extra_options(conf):
|
|
|
|
# type: (dict) -> str
|
|
|
|
"""Retrieve input data blobxfer extra options
|
|
|
|
:param dict conf: configuration object
|
|
|
|
:rtype: str
|
|
|
|
:return: blobxfer extra options
|
|
|
|
"""
|
2017-10-03 03:29:55 +03:00
|
|
|
return _kv_read_checked(conf, 'blobxfer_extra_options', default='')
|
2016-11-12 23:35:56 +03:00
|
|
|
|
|
|
|
|
2017-10-03 03:29:55 +03:00
|
|
|
def data_include(conf):
|
2016-11-12 23:35:56 +03:00
|
|
|
# type: (dict) -> str
|
2017-10-03 03:29:55 +03:00
|
|
|
"""Retrieve input data include filters
|
2016-11-12 23:35:56 +03:00
|
|
|
:param dict conf: configuration object
|
|
|
|
:rtype: str
|
2017-10-03 03:29:55 +03:00
|
|
|
:return: include filters
|
2016-11-12 23:35:56 +03:00
|
|
|
"""
|
2017-11-15 08:26:52 +03:00
|
|
|
return _kv_read_checked(conf, 'include', [])
|
2016-11-12 23:35:56 +03:00
|
|
|
|
|
|
|
|
2017-10-03 03:29:55 +03:00
|
|
|
def data_exclude(conf):
|
|
|
|
# type: (dict) -> str
|
|
|
|
"""Retrieve input data exclude filters
|
2016-11-12 23:35:56 +03:00
|
|
|
:param dict conf: configuration object
|
|
|
|
:rtype: str
|
2017-10-03 03:29:55 +03:00
|
|
|
:return: exclude filters
|
2016-11-12 23:35:56 +03:00
|
|
|
"""
|
2017-11-15 08:26:52 +03:00
|
|
|
return _kv_read_checked(conf, 'exclude', [])
|
2016-11-12 23:35:56 +03:00
|
|
|
|
|
|
|
|
2019-06-24 21:03:43 +03:00
|
|
|
def data_condition(conf):
|
|
|
|
# type: (dict) -> str
|
|
|
|
"""Retrieve output data condition
|
|
|
|
:param dict conf: configuration object
|
|
|
|
:rtype: str
|
|
|
|
:return: condition
|
|
|
|
"""
|
|
|
|
return _kv_read_checked(conf, 'condition', default='tasksuccess')
|
|
|
|
|
|
|
|
|
2016-11-12 23:35:56 +03:00
|
|
|
def input_data_job_id(conf):
|
|
|
|
# type: (dict) -> str
|
|
|
|
"""Retrieve input data job id
|
|
|
|
:param dict conf: configuration object
|
|
|
|
:rtype: str
|
|
|
|
:return: job id
|
|
|
|
"""
|
|
|
|
return conf['job_id']
|
|
|
|
|
|
|
|
|
|
|
|
def input_data_task_id(conf):
|
|
|
|
# type: (dict) -> str
|
|
|
|
"""Retrieve input data task id
|
|
|
|
:param dict conf: configuration object
|
|
|
|
:rtype: str
|
|
|
|
:return: task id
|
|
|
|
"""
|
|
|
|
return conf['task_id']
|
|
|
|
|
|
|
|
|
2016-11-13 09:13:55 +03:00
|
|
|
# JOBS SETTINGS
|
|
|
|
def job_specifications(config):
|
|
|
|
# type: (dict) -> dict
|
|
|
|
"""Get job specifications config block
|
|
|
|
:param dict config: configuration object
|
|
|
|
:rtype: dict
|
|
|
|
:return: job specifications
|
|
|
|
"""
|
2018-02-17 00:56:46 +03:00
|
|
|
try:
|
|
|
|
return config['job_specifications']
|
|
|
|
except KeyError:
|
|
|
|
raise ValueError(
|
|
|
|
'job_specifications is not found or invalid, did you specify a '
|
|
|
|
'jobs configuration file?')
|
2016-11-13 09:13:55 +03:00
|
|
|
|
|
|
|
|
2019-11-14 05:59:02 +03:00
|
|
|
def autogenerated_task_id_settings(config, level=None):
|
|
|
|
# type: (dict, str) -> Tuple[str, str]
|
2017-08-08 18:24:15 +03:00
|
|
|
"""Get the autogenerated task id prefix to use
|
2017-07-31 22:46:41 +03:00
|
|
|
:param dict config: configuration object
|
2019-11-14 05:59:02 +03:00
|
|
|
:param str level: 'global', 'job', or 'task_factory'
|
|
|
|
:rtype: tuple
|
|
|
|
:return: (auto-gen task id prefix, auto-gen task id zfill)
|
|
|
|
"""
|
|
|
|
if level == 'global':
|
|
|
|
conf = config['batch_shipyard']
|
|
|
|
elif level == 'job':
|
|
|
|
conf = config
|
|
|
|
elif level == 'task_factory':
|
|
|
|
conf = config['task_factory']
|
|
|
|
else:
|
|
|
|
raise RuntimeError(
|
|
|
|
'invalid level={} for autogenerated task id setting'.format(level))
|
|
|
|
conf = _kv_read_checked(conf, 'autogenerated_task_id', {})
|
2017-08-08 18:24:15 +03:00
|
|
|
# do not use _kv_read_checked for prefix we want to allow empty string
|
|
|
|
try:
|
|
|
|
prefix = conf['prefix']
|
|
|
|
if prefix is None:
|
|
|
|
raise KeyError()
|
|
|
|
except KeyError:
|
|
|
|
prefix = 'task-'
|
2019-11-14 05:59:02 +03:00
|
|
|
padding = _kv_read(conf, 'zfill_width', 5)
|
|
|
|
return (prefix, padding)
|
2017-07-31 22:46:41 +03:00
|
|
|
|
|
|
|
|
2017-08-04 20:19:04 +03:00
|
|
|
def job_tasks(config, conf):
|
|
|
|
# type: (dict, dict) -> list
|
2016-11-13 09:13:55 +03:00
|
|
|
"""Get all tasks for job
|
|
|
|
:param dict config: configuration object
|
2017-08-04 20:19:04 +03:00
|
|
|
:param dict conf: job configuration object
|
2016-11-13 09:13:55 +03:00
|
|
|
:rtype: list
|
|
|
|
:return: list of tasks
|
|
|
|
"""
|
2019-11-14 05:59:02 +03:00
|
|
|
if 'autogenerated_task_id' in conf:
|
|
|
|
prefix, padding = autogenerated_task_id_settings(conf, level='job')
|
|
|
|
else:
|
|
|
|
prefix, padding = autogenerated_task_id_settings(
|
|
|
|
config, level='global')
|
2017-07-28 01:01:41 +03:00
|
|
|
for _task in conf['tasks']:
|
|
|
|
if 'task_factory' in _task:
|
2017-08-04 20:19:04 +03:00
|
|
|
# get storage settings if applicable
|
|
|
|
if 'file' in _task['task_factory']:
|
|
|
|
az = _task['task_factory']['file']['azure_storage']
|
2017-11-10 19:24:50 +03:00
|
|
|
drp = data_remote_path(az)
|
2017-08-04 20:19:04 +03:00
|
|
|
tfstorage = TaskFactoryStorageSettings(
|
|
|
|
storage_settings=credentials_storage(
|
|
|
|
config, data_storage_account_settings(az)),
|
|
|
|
storage_link_name=az['storage_account_settings'],
|
2017-11-10 19:24:50 +03:00
|
|
|
container=data_container_from_remote_path(None, drp),
|
|
|
|
remote_path=drp,
|
2017-10-03 03:29:55 +03:00
|
|
|
is_file_share=data_is_file_share(az),
|
2017-08-04 20:19:04 +03:00
|
|
|
include=_kv_read_checked(az, 'include'),
|
|
|
|
exclude=_kv_read_checked(az, 'exclude'),
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
tfstorage = None
|
2019-11-14 05:59:02 +03:00
|
|
|
# get autogenerated task id settings
|
|
|
|
if 'autogenerated_task_id' in _task['task_factory']:
|
|
|
|
tfprefix, tfpadding = autogenerated_task_id_settings(
|
|
|
|
_task, level='task_factory')
|
|
|
|
else:
|
|
|
|
tfprefix = prefix
|
|
|
|
tfpadding = padding
|
2017-08-04 20:19:04 +03:00
|
|
|
for task in task_factory.generate_task(_task, tfstorage):
|
2018-03-13 18:20:08 +03:00
|
|
|
task['##tfgen'] = True
|
2019-11-14 05:59:02 +03:00
|
|
|
task['##task_id_prefix'] = tfprefix
|
|
|
|
task['##task_id_padding'] = tfpadding
|
2017-07-28 01:01:41 +03:00
|
|
|
yield task
|
|
|
|
else:
|
2019-11-14 05:59:02 +03:00
|
|
|
_task['##task_id_prefix'] = prefix
|
|
|
|
_task['##task_id_padding'] = padding
|
2017-07-28 01:01:41 +03:00
|
|
|
yield _task
|
2016-11-13 09:13:55 +03:00
|
|
|
|
|
|
|
|
|
|
|
def job_id(conf):
|
|
|
|
# type: (dict) -> str
|
|
|
|
"""Get job id of a job specification
|
|
|
|
:param dict conf: job configuration object
|
|
|
|
:rtype: str
|
|
|
|
:return: job id
|
|
|
|
"""
|
|
|
|
return conf['id']
|
|
|
|
|
|
|
|
|
2017-04-03 20:47:52 +03:00
|
|
|
def job_auto_complete(conf):
|
2016-11-13 09:13:55 +03:00
|
|
|
# type: (dict) -> bool
|
2017-04-03 20:47:52 +03:00
|
|
|
"""Get job (and multi-instance) autocomplete setting
|
2016-11-13 09:13:55 +03:00
|
|
|
:param dict conf: job configuration object
|
|
|
|
:rtype: bool
|
2017-04-03 20:47:52 +03:00
|
|
|
:return: job autocomplete
|
2016-11-13 09:13:55 +03:00
|
|
|
"""
|
|
|
|
try:
|
2017-04-03 20:47:52 +03:00
|
|
|
ac = conf['auto_complete']
|
2016-11-13 09:13:55 +03:00
|
|
|
except KeyError:
|
2017-04-03 20:47:52 +03:00
|
|
|
ac = False
|
|
|
|
return ac
|
2016-11-13 09:13:55 +03:00
|
|
|
|
|
|
|
|
2017-07-21 20:32:04 +03:00
|
|
|
def job_auto_pool(conf):
|
|
|
|
# type: (dict) -> PoolAutopoolSettings
|
|
|
|
"""Get job autopool setting
|
|
|
|
:param dict conf: job configuration object
|
|
|
|
:rtype: PoolAutopoolSettings
|
|
|
|
:return: job autopool settings
|
|
|
|
"""
|
|
|
|
ap = _kv_read_checked(conf, 'auto_pool')
|
|
|
|
if ap is not None:
|
|
|
|
return PoolAutopoolSettings(
|
|
|
|
pool_lifetime=_kv_read_checked(
|
|
|
|
ap, 'pool_lifetime', 'job').lower(),
|
|
|
|
keep_alive=_kv_read(ap, 'keep_alive', False),
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
2017-08-08 05:42:09 +03:00
|
|
|
def job_recurrence(conf):
|
|
|
|
# type: (dict) -> JobRecurrenceSettings
|
|
|
|
"""Get job recurrence setting
|
|
|
|
:param dict conf: job configuration object
|
|
|
|
:rtype: JobRecurrenceSettings
|
|
|
|
:return: job recurrence settings
|
|
|
|
"""
|
|
|
|
rec = _kv_read_checked(conf, 'recurrence')
|
|
|
|
if rec is not None:
|
|
|
|
do_not_run_until = _kv_read_checked(
|
|
|
|
rec['schedule'], 'do_not_run_until')
|
|
|
|
if do_not_run_until is not None:
|
|
|
|
do_not_run_until = dateutil.parser.parse(do_not_run_until)
|
|
|
|
do_not_run_after = _kv_read_checked(
|
|
|
|
rec['schedule'], 'do_not_run_after')
|
|
|
|
if do_not_run_after is not None:
|
|
|
|
do_not_run_after = dateutil.parser.parse(do_not_run_after)
|
|
|
|
start_window = _kv_read_checked(rec['schedule'], 'start_window')
|
|
|
|
if start_window is not None:
|
|
|
|
start_window = util.convert_string_to_timedelta(start_window)
|
2017-08-08 18:24:15 +03:00
|
|
|
recurrence_interval = util.convert_string_to_timedelta(
|
|
|
|
_kv_read_checked(rec['schedule'], 'recurrence_interval')
|
|
|
|
)
|
2017-08-08 05:42:09 +03:00
|
|
|
jm = _kv_read_checked(rec, 'job_manager', {})
|
|
|
|
return JobRecurrenceSettings(
|
|
|
|
schedule=JobScheduleSettings(
|
|
|
|
do_not_run_until=do_not_run_until,
|
|
|
|
do_not_run_after=do_not_run_after,
|
|
|
|
start_window=start_window,
|
|
|
|
recurrence_interval=recurrence_interval,
|
|
|
|
),
|
|
|
|
job_manager=JobManagerSettings(
|
|
|
|
allow_low_priority_node=_kv_read(
|
|
|
|
jm, 'allow_low_priority_node', True),
|
|
|
|
run_exclusive=_kv_read(jm, 'run_exclusive', False),
|
2017-08-09 06:29:46 +03:00
|
|
|
monitor_task_completion=_kv_read(
|
|
|
|
jm, 'monitor_task_completion', False),
|
2017-08-08 05:42:09 +03:00
|
|
|
)
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
2017-07-20 19:34:44 +03:00
|
|
|
def job_priority(conf):
|
|
|
|
# type: (dict) -> int
|
|
|
|
"""Get job priority setting
|
|
|
|
:param dict conf: job configuration object
|
|
|
|
:rtype: bool
|
|
|
|
:return: job autocomplete
|
|
|
|
"""
|
|
|
|
pri = _kv_read(conf, 'priority', 0)
|
|
|
|
if pri < -1000 or pri > 1000:
|
|
|
|
raise ValueError('job priority is invalid: {}'.format(pri))
|
|
|
|
return pri
|
|
|
|
|
|
|
|
|
2016-11-13 09:13:55 +03:00
|
|
|
def job_environment_variables(conf):
|
|
|
|
# type: (dict) -> str
|
|
|
|
"""Get env vars of a job specification
|
|
|
|
:param dict conf: job configuration object
|
|
|
|
:rtype: list
|
|
|
|
:return: job env vars
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
env_vars = conf['environment_variables']
|
|
|
|
if util.is_none_or_empty(env_vars):
|
|
|
|
raise KeyError()
|
|
|
|
except KeyError:
|
2017-01-19 21:15:32 +03:00
|
|
|
env_vars = {}
|
2016-11-13 09:13:55 +03:00
|
|
|
return env_vars
|
|
|
|
|
|
|
|
|
2017-01-25 00:54:26 +03:00
|
|
|
def job_environment_variables_keyvault_secret_id(conf):
|
2017-01-19 21:15:32 +03:00
|
|
|
# type: (dict) -> str
|
|
|
|
"""Get keyvault env vars of a job specification
|
|
|
|
:param dict conf: job configuration object
|
|
|
|
:rtype: list
|
|
|
|
:return: job env vars
|
|
|
|
"""
|
|
|
|
try:
|
2017-01-25 00:54:26 +03:00
|
|
|
secid = conf['environment_variables_keyvault_secret_id']
|
2017-01-19 21:15:32 +03:00
|
|
|
if util.is_none_or_empty(secid):
|
|
|
|
raise KeyError()
|
|
|
|
except KeyError:
|
|
|
|
secid = None
|
|
|
|
return secid
|
|
|
|
|
|
|
|
|
2017-01-24 18:46:52 +03:00
|
|
|
def job_max_task_retries(conf):
|
|
|
|
# type: (dict) -> int
|
|
|
|
"""Get number of times a task should be retried in a particular job
|
|
|
|
:param dict conf: job configuration object
|
|
|
|
:rtype: int
|
|
|
|
:return: max task retry count
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
max_task_retries = conf['max_task_retries']
|
|
|
|
if max_task_retries is None:
|
|
|
|
raise KeyError()
|
|
|
|
except KeyError:
|
|
|
|
max_task_retries = None
|
|
|
|
return max_task_retries
|
|
|
|
|
|
|
|
|
2017-05-23 19:29:00 +03:00
|
|
|
def job_max_wall_time(conf):
|
|
|
|
# type: (dict) -> int
|
|
|
|
"""Get maximum wall time for any task of a job
|
|
|
|
:param dict conf: job configuration object
|
|
|
|
:rtype: datetime.timedelta
|
|
|
|
:return: max wall time
|
|
|
|
"""
|
|
|
|
max_wall_time = _kv_read_checked(conf, 'max_wall_time')
|
|
|
|
if util.is_not_empty(max_wall_time):
|
|
|
|
max_wall_time = util.convert_string_to_timedelta(max_wall_time)
|
|
|
|
return max_wall_time
|
|
|
|
|
|
|
|
|
2017-03-10 01:38:16 +03:00
|
|
|
def job_allow_run_on_missing(conf):
|
|
|
|
# type: (dict) -> int
|
|
|
|
"""Get allow task run on missing image
|
|
|
|
:param dict conf: job configuration object
|
|
|
|
:rtype: bool
|
|
|
|
:return: allow run on missing image
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
allow = conf['allow_run_on_missing_image']
|
|
|
|
if allow is None:
|
|
|
|
raise KeyError()
|
|
|
|
except KeyError:
|
|
|
|
allow = False
|
|
|
|
return allow
|
|
|
|
|
|
|
|
|
2018-11-01 02:06:00 +03:00
|
|
|
def job_requires_auto_scratch(conf):
|
|
|
|
# type: (dict) -> bool
|
|
|
|
"""Get job auto scratch setting
|
|
|
|
:param dict conf: job configuration object
|
|
|
|
:rtype: bool
|
|
|
|
:return: job auto scratch
|
|
|
|
"""
|
|
|
|
return _kv_read(conf, 'auto_scratch', default=False)
|
|
|
|
|
|
|
|
|
2019-06-24 19:02:30 +03:00
|
|
|
def job_preparation_command(conf):
|
|
|
|
# type: (dict) -> str
|
|
|
|
"""Get arbitrary job preparation command
|
|
|
|
:param dict conf: job configuration object
|
|
|
|
:rtype: str
|
|
|
|
:return: job prep command
|
|
|
|
"""
|
|
|
|
return _kv_read_checked(
|
|
|
|
_kv_read_checked(conf, 'job_preparation', default={}),
|
|
|
|
'command')
|
|
|
|
|
|
|
|
|
|
|
|
def job_release_command(conf):
|
|
|
|
# type: (dict) -> str
|
|
|
|
"""Get arbitrary job release command
|
|
|
|
:param dict conf: job configuration object
|
|
|
|
:rtype: str
|
|
|
|
:return: job release command
|
|
|
|
"""
|
|
|
|
return _kv_read_checked(
|
|
|
|
_kv_read_checked(conf, 'job_release', default={}),
|
|
|
|
'command')
|
|
|
|
|
|
|
|
|
2018-06-25 17:49:45 +03:00
|
|
|
def job_federation_constraint_settings(conf, federation_id):
|
|
|
|
# type: (dict, str) -> dict
|
|
|
|
"""Gets federation constraints
|
|
|
|
:param dict conf: job configuration object
|
|
|
|
:param str federation_id: federation id
|
|
|
|
:rtype: dict
|
|
|
|
:return: federation constraints
|
|
|
|
"""
|
|
|
|
if util.is_none_or_empty(federation_id):
|
|
|
|
return None
|
|
|
|
fc_conf = _kv_read_checked(conf, 'federation_constraints', default={})
|
|
|
|
pool_conf = _kv_read_checked(fc_conf, 'pool', default={})
|
|
|
|
native = _kv_read(pool_conf, 'native')
|
|
|
|
windows = _kv_read(pool_conf, 'windows')
|
|
|
|
if windows and native is not None and not native:
|
|
|
|
raise ValueError(
|
|
|
|
'cannot set constraint windows as true and native as false')
|
|
|
|
pool_location = _kv_read_checked(pool_conf, 'location')
|
|
|
|
if pool_location is not None:
|
|
|
|
if ' ' in pool_location:
|
|
|
|
raise ValueError(
|
|
|
|
'pool:location "{}" is invalid, please ensure proper region '
|
|
|
|
'name and not its display name'.format(pool_location))
|
|
|
|
pool_location = pool_location.lower()
|
|
|
|
pool_custom_image_arm_id = _kv_read_checked(
|
|
|
|
pool_conf, 'custom_image_arm_id')
|
|
|
|
if pool_custom_image_arm_id is not None:
|
|
|
|
pool_custom_image_arm_id = pool_custom_image_arm_id.lower()
|
|
|
|
pool_virtual_network_arm_id = _kv_read_checked(
|
|
|
|
pool_conf, 'virtual_network_arm_id')
|
|
|
|
if pool_virtual_network_arm_id is not None:
|
|
|
|
pool_virtual_network_arm_id = pool_virtual_network_arm_id.lower()
|
|
|
|
pool_lp_conf = _kv_read_checked(
|
|
|
|
pool_conf, 'low_priority_nodes', default={})
|
|
|
|
lp_allow = _kv_read(pool_lp_conf, 'allow', default=True)
|
|
|
|
lp_exclusive = _kv_read(pool_lp_conf, 'exclusive', default=False)
|
|
|
|
if not lp_allow and lp_exclusive:
|
|
|
|
raise ValueError(
|
|
|
|
'cannot set constraint low_priority:allow to false and '
|
|
|
|
'low_priority:exclusive to true')
|
|
|
|
pool_as_conf = _kv_read_checked(
|
|
|
|
pool_conf, 'autoscale', default={})
|
|
|
|
autoscale_allow = _kv_read(pool_as_conf, 'allow', default=True)
|
|
|
|
autoscale_exclusive = _kv_read(pool_as_conf, 'exclusive', default=False)
|
|
|
|
if not autoscale_allow and autoscale_exclusive:
|
|
|
|
raise ValueError(
|
|
|
|
'cannot set constraint autoscale:allow to false and '
|
|
|
|
'autoscale:exclusive to true')
|
|
|
|
pool_reg_conf = _kv_read_checked(
|
|
|
|
pool_conf, 'container_registries', default={})
|
|
|
|
pool_mab_conf = _kv_read_checked(
|
|
|
|
pool_conf, 'max_active_task_backlog', default={})
|
|
|
|
matbr = _kv_read(pool_mab_conf, 'ratio')
|
|
|
|
if matbr is not None:
|
|
|
|
matbr = float(matbr)
|
|
|
|
if matbr < 0:
|
|
|
|
raise ValueError(
|
|
|
|
'cannot set constraint max_active_task_backlog:ratio to '
|
|
|
|
'a negative value')
|
|
|
|
matbae = _kv_read(
|
|
|
|
pool_mab_conf, 'autoscale_exempt', default=True)
|
|
|
|
node_conf = _kv_read_checked(fc_conf, 'compute_node', default={})
|
|
|
|
vm_size = _kv_read_checked(node_conf, 'vm_size')
|
|
|
|
if vm_size is not None:
|
|
|
|
vm_size = vm_size.lower()
|
|
|
|
core_conf = _kv_read_checked(node_conf, 'cores', default={})
|
|
|
|
node_cores = _kv_read(core_conf, 'amount')
|
|
|
|
if util.is_not_empty(vm_size) and node_cores is not None:
|
|
|
|
raise ValueError(
|
|
|
|
'cannot specify both vm_size and cores for compute_node '
|
|
|
|
'constraint')
|
|
|
|
node_core_variance = _kv_read(core_conf, 'schedulable_variance')
|
|
|
|
if node_core_variance is not None:
|
|
|
|
node_core_variance = float(node_core_variance)
|
|
|
|
if node_core_variance < 0:
|
|
|
|
raise ValueError(
|
|
|
|
'cannot specify a negative cores:schedulable_variance')
|
|
|
|
memory_conf = _kv_read(node_conf, 'memory', default={})
|
|
|
|
node_memory = _kv_read_checked(memory_conf, 'amount')
|
|
|
|
if util.is_not_empty(vm_size) and node_memory is not None:
|
|
|
|
raise ValueError(
|
|
|
|
'cannot specify both vm_size and memory for compute_node '
|
|
|
|
'constraint')
|
|
|
|
if node_memory is not None:
|
|
|
|
node_memory = node_memory.lower()
|
|
|
|
if node_memory[-1] not in ('b', 'k', 'm', 'g', 't'):
|
|
|
|
raise ValueError(
|
|
|
|
'federation_constraints:compute_node:memory has invalid '
|
|
|
|
'suffix')
|
|
|
|
if int(node_memory[:-1]) <= 0:
|
|
|
|
raise ValueError(
|
|
|
|
'federation_constraints:compute_node:memory is a '
|
|
|
|
'non-positive value')
|
|
|
|
node_memory_variance = _kv_read(memory_conf, 'schedulable_variance')
|
|
|
|
if node_memory_variance is not None:
|
|
|
|
node_memory_variance = float(node_memory_variance)
|
|
|
|
if node_memory_variance < 0:
|
|
|
|
raise ValueError(
|
|
|
|
'cannot specify a negative memory:schedulable_variance')
|
|
|
|
node_gpu = _kv_read(node_conf, 'gpu')
|
|
|
|
if node_gpu and util.is_not_empty(vm_size) and not is_gpu_pool(vm_size):
|
|
|
|
raise ValueError(
|
|
|
|
'cannot specify gpu=True while vm_size does not have GPUs')
|
|
|
|
node_ib = _kv_read(node_conf, 'infiniband')
|
|
|
|
if node_ib and util.is_not_empty(vm_size) and not is_rdma_pool(vm_size):
|
|
|
|
raise ValueError(
|
|
|
|
'cannot specify infiniband=True while vm_size does not have '
|
|
|
|
'RDMA/IB')
|
|
|
|
return FederationConstraintSettings(
|
|
|
|
pool=FederationPoolConstraintSettings(
|
|
|
|
native=native,
|
|
|
|
windows=windows,
|
|
|
|
location=pool_location,
|
|
|
|
custom_image_arm_id=pool_custom_image_arm_id,
|
|
|
|
virtual_network_arm_id=pool_virtual_network_arm_id,
|
|
|
|
low_priority_nodes_allow=lp_allow,
|
|
|
|
low_priority_nodes_exclusive=lp_exclusive,
|
|
|
|
autoscale_allow=autoscale_allow,
|
|
|
|
autoscale_exclusive=autoscale_exclusive,
|
|
|
|
container_registries_private_docker_hub=_kv_read(
|
|
|
|
pool_reg_conf, 'private_docker_hub', default=False),
|
|
|
|
container_registries_public=_kv_read_checked(
|
|
|
|
pool_reg_conf, 'public'),
|
|
|
|
max_active_task_backlog_ratio=matbr,
|
|
|
|
max_active_task_backlog_autoscale_exempt=matbae,
|
|
|
|
),
|
|
|
|
compute_node=FederationComputeNodeConstraintSettings(
|
|
|
|
vm_size=vm_size,
|
|
|
|
cores=node_cores,
|
|
|
|
core_variance=node_core_variance,
|
|
|
|
memory=node_memory,
|
|
|
|
memory_variance=node_memory_variance,
|
|
|
|
exclusive=_kv_read(node_conf, 'exclusive', default=False),
|
|
|
|
gpu=node_gpu,
|
|
|
|
infiniband=node_ib,
|
|
|
|
),
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2018-01-24 22:30:34 +03:00
|
|
|
def job_has_merge_task(conf):
|
|
|
|
# type: (dict) -> bool
|
|
|
|
"""Determines if job has a merge task
|
|
|
|
:param dict conf: job configuration object
|
|
|
|
:rtype: bool
|
|
|
|
:return: job has merge task
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
merge = conf['merge_task']
|
|
|
|
except KeyError:
|
|
|
|
return False
|
|
|
|
else:
|
|
|
|
if any(x in merge for x in _FORBIDDEN_MERGE_TASK_PROPERTIES):
|
|
|
|
raise ValueError(
|
|
|
|
'merge_task has one or more forbidden properties: {}'.format(
|
|
|
|
_FORBIDDEN_MERGE_TASK_PROPERTIES))
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
2019-11-17 07:07:48 +03:00
|
|
|
def job_merge_task(config, conf):
|
2018-01-24 22:30:34 +03:00
|
|
|
# type: (dict) -> dict
|
|
|
|
"""Gets merge task
|
2019-11-17 07:07:48 +03:00
|
|
|
:param dict config: configuration object
|
2018-01-24 22:30:34 +03:00
|
|
|
:param dict conf: job configuration object
|
|
|
|
:rtype: dict
|
|
|
|
:return: merge task
|
|
|
|
"""
|
2019-11-17 07:07:48 +03:00
|
|
|
if 'autogenerated_task_id' in conf:
|
|
|
|
prefix, padding = autogenerated_task_id_settings(conf, level='job')
|
|
|
|
else:
|
|
|
|
prefix, padding = autogenerated_task_id_settings(
|
|
|
|
config, level='global')
|
|
|
|
_task = conf['merge_task']
|
|
|
|
_task['##task_id_prefix'] = prefix
|
|
|
|
_task['##task_id_padding'] = padding
|
|
|
|
return _task
|
2018-01-24 22:30:34 +03:00
|
|
|
|
|
|
|
|
2018-11-29 19:31:47 +03:00
|
|
|
def job_force_enable_task_dependencies(conf):
|
|
|
|
# type: (dict) -> bool
|
|
|
|
"""If task dependencies are force enabled
|
|
|
|
:param dict conf: task configuration object
|
|
|
|
:rtype: bool
|
|
|
|
:return: jobs with force enabled task dependencies
|
|
|
|
"""
|
|
|
|
return _kv_read(conf, 'force_enable_task_dependencies', default=False)
|
|
|
|
|
|
|
|
|
2016-11-13 09:13:55 +03:00
|
|
|
def has_depends_on_task(conf):
|
|
|
|
# type: (dict) -> bool
|
|
|
|
"""Determines if task has task dependencies
|
2018-02-27 02:06:55 +03:00
|
|
|
:param dict conf: task configuration object
|
2016-11-13 09:13:55 +03:00
|
|
|
:rtype: bool
|
|
|
|
:return: task has task dependencies
|
|
|
|
"""
|
2017-01-12 20:23:25 +03:00
|
|
|
if ('depends_on' in conf and util.is_not_empty(conf['depends_on']) or
|
|
|
|
'depends_on_range' in conf and
|
|
|
|
util.is_not_empty(conf['depends_on_range'])):
|
2018-03-13 18:20:08 +03:00
|
|
|
if (('id' not in conf or util.is_none_or_empty(conf['id'])) and
|
|
|
|
('##tfgen' not in conf or not conf['##tfgen'])):
|
2016-11-13 09:13:55 +03:00
|
|
|
raise ValueError(
|
2017-01-12 20:23:25 +03:00
|
|
|
'task id is not specified, but depends_on or '
|
|
|
|
'depends_on_range is set')
|
2016-11-13 09:13:55 +03:00
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
2018-03-16 19:25:35 +03:00
|
|
|
def has_task_exit_condition_job_action(jobspec, conf):
|
|
|
|
# type: (dict, dict) -> bool
|
2018-02-27 02:06:55 +03:00
|
|
|
"""Determines if task has task exit condition job action
|
2018-03-16 19:25:35 +03:00
|
|
|
:param dict jobspec: job configuration object
|
2018-02-27 02:06:55 +03:00
|
|
|
:param dict conf: task configuration object
|
|
|
|
:rtype: bool
|
|
|
|
:return: task has exit condition job action
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
conf['exit_conditions']['default']['exit_options']['job_action']
|
|
|
|
except KeyError:
|
2018-03-16 19:25:35 +03:00
|
|
|
try:
|
|
|
|
jobspec['exit_conditions']['default']['exit_options']['job_action']
|
|
|
|
except KeyError:
|
|
|
|
return False
|
2018-02-27 02:06:55 +03:00
|
|
|
return True
|
|
|
|
|
|
|
|
|
2016-11-13 09:13:55 +03:00
|
|
|
def is_multi_instance_task(conf):
|
|
|
|
# type: (dict) -> bool
|
|
|
|
"""Determines if task is multi-isntance
|
2017-03-10 01:38:16 +03:00
|
|
|
:param dict conf: task configuration object
|
2016-11-13 09:13:55 +03:00
|
|
|
:rtype: bool
|
|
|
|
:return: task is multi-instance
|
|
|
|
"""
|
|
|
|
return 'multi_instance' in conf
|
|
|
|
|
|
|
|
|
|
|
|
def task_name(conf):
|
|
|
|
# type: (dict) -> str
|
|
|
|
"""Get task name
|
2017-03-10 01:38:16 +03:00
|
|
|
:param dict conf: task configuration object
|
2016-11-13 09:13:55 +03:00
|
|
|
:rtype: str
|
|
|
|
:return: task name
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
name = conf['name']
|
|
|
|
if util.is_none_or_empty(name):
|
|
|
|
raise KeyError()
|
|
|
|
except KeyError:
|
|
|
|
name = None
|
|
|
|
return name
|
|
|
|
|
|
|
|
|
2017-03-10 01:38:16 +03:00
|
|
|
def task_docker_image(conf):
|
|
|
|
# type: (dict) -> str
|
|
|
|
"""Get docker image used by task
|
|
|
|
:param dict conf: task configuration object
|
|
|
|
:rtype: str
|
|
|
|
:return: docker image used by task
|
|
|
|
"""
|
2017-09-29 08:13:14 +03:00
|
|
|
return (
|
|
|
|
_kv_read_checked(conf, 'docker_image') or
|
|
|
|
_kv_read_checked(conf, 'image')
|
|
|
|
)
|
2017-03-10 01:38:16 +03:00
|
|
|
|
|
|
|
|
2017-10-21 06:15:46 +03:00
|
|
|
def task_singularity_image(conf):
|
|
|
|
# type: (dict) -> str
|
|
|
|
"""Get singularity image used by task
|
|
|
|
:param dict conf: task configuration object
|
|
|
|
:rtype: str
|
|
|
|
:return: singularity image used by task
|
|
|
|
"""
|
|
|
|
return _kv_read_checked(conf, 'singularity_image')
|
|
|
|
|
|
|
|
|
2016-11-13 09:13:55 +03:00
|
|
|
def set_task_name(conf, name):
|
|
|
|
# type: (dict, str) -> None
|
|
|
|
"""Set task name
|
2017-03-10 01:38:16 +03:00
|
|
|
:param dict conf: task configuration object
|
2016-11-13 09:13:55 +03:00
|
|
|
:param str name: task name to set
|
|
|
|
"""
|
|
|
|
conf['name'] = name
|
|
|
|
|
|
|
|
|
|
|
|
def task_id(conf):
|
|
|
|
# type: (dict) -> str
|
|
|
|
"""Get task id
|
2017-03-10 01:38:16 +03:00
|
|
|
:param dict conf: task configuration object
|
2016-11-13 09:13:55 +03:00
|
|
|
:rtype: str
|
|
|
|
:return: task id
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
id = conf['id']
|
|
|
|
if util.is_none_or_empty(id):
|
|
|
|
raise KeyError()
|
|
|
|
except KeyError:
|
|
|
|
id = None
|
|
|
|
return id
|
|
|
|
|
|
|
|
|
|
|
|
def set_task_id(conf, id):
|
|
|
|
# type: (dict, str) -> None
|
|
|
|
"""Set task id
|
2017-03-10 01:38:16 +03:00
|
|
|
:param dict conf: task configuration object
|
2016-11-13 09:13:55 +03:00
|
|
|
:param str id: task id to set
|
|
|
|
"""
|
|
|
|
conf['id'] = id
|
|
|
|
|
|
|
|
|
2018-06-25 17:49:45 +03:00
|
|
|
def task_settings(
|
|
|
|
cloud_pool, config, poolconf, jobspec, conf, federation_id=None):
|
2017-03-14 18:52:09 +03:00
|
|
|
# type: (azure.batch.models.CloudPool, dict, PoolSettings, dict,
|
2018-06-25 17:49:45 +03:00
|
|
|
# dict, str) -> TaskSettings
|
2016-11-13 09:13:55 +03:00
|
|
|
"""Get task settings
|
2017-03-10 01:38:16 +03:00
|
|
|
:param azure.batch.models.CloudPool cloud_pool: cloud pool object
|
2016-11-13 09:13:55 +03:00
|
|
|
:param dict config: configuration dict
|
2017-03-10 01:38:16 +03:00
|
|
|
:param PoolSettings poolconf: pool settings
|
2017-03-14 18:52:09 +03:00
|
|
|
:param dict jobspec: job specification
|
2017-03-10 01:38:16 +03:00
|
|
|
:param dict conf: task configuration object
|
2018-06-25 17:49:45 +03:00
|
|
|
:param str federation_id: federation id
|
2016-11-13 09:13:55 +03:00
|
|
|
:rtype: TaskSettings
|
|
|
|
:return: task settings
|
|
|
|
"""
|
2017-09-19 23:37:21 +03:00
|
|
|
native = is_native_docker_pool(config, vm_config=poolconf.vm_configuration)
|
2017-11-03 09:24:45 +03:00
|
|
|
is_windows = is_windows_pool(config, vm_config=poolconf.vm_configuration)
|
2016-11-13 09:13:55 +03:00
|
|
|
# id must be populated by the time this function is invoked
|
|
|
|
task_id = conf['id']
|
|
|
|
if util.is_none_or_empty(task_id):
|
|
|
|
raise ValueError('task id is invalid')
|
2017-08-10 18:11:46 +03:00
|
|
|
# check task id length
|
|
|
|
if len(task_id) > 64:
|
|
|
|
raise ValueError('task id exceeds 64 characters')
|
2017-10-21 06:15:46 +03:00
|
|
|
docker_image = task_docker_image(conf)
|
|
|
|
singularity_image = _kv_read_checked(conf, 'singularity_image')
|
|
|
|
if (util.is_none_or_empty(docker_image) and
|
|
|
|
util.is_none_or_empty(singularity_image)):
|
|
|
|
raise ValueError('Container image is unspecified or invalid')
|
|
|
|
if (util.is_not_empty(docker_image) and
|
|
|
|
util.is_not_empty(singularity_image)):
|
|
|
|
raise ValueError(
|
|
|
|
'Cannot specify both a Docker and Singularity image for a task')
|
|
|
|
if util.is_not_empty(singularity_image) and native:
|
|
|
|
raise ValueError(
|
|
|
|
'Cannot run Singularity containers on native container '
|
|
|
|
'support pools')
|
2017-11-03 09:24:45 +03:00
|
|
|
if is_windows and util.is_not_empty(singularity_image):
|
|
|
|
raise ValueError(
|
|
|
|
'Cannot run Singularity containers on windows pools')
|
2016-11-13 09:13:55 +03:00
|
|
|
# get some pool props
|
2017-12-09 03:28:25 +03:00
|
|
|
publisher = None
|
|
|
|
offer = None
|
|
|
|
node_agent = None
|
2017-03-10 01:38:16 +03:00
|
|
|
if cloud_pool is None:
|
|
|
|
pool_id = poolconf.id
|
|
|
|
vm_size = poolconf.vm_size
|
|
|
|
inter_node_comm = poolconf.inter_node_communication_enabled
|
2017-10-04 18:59:03 +03:00
|
|
|
is_custom_image = not is_platform_image(
|
|
|
|
config, vm_config=poolconf.vm_configuration)
|
2017-06-06 18:29:44 +03:00
|
|
|
if is_custom_image:
|
|
|
|
node_agent = poolconf.vm_configuration.node_agent
|
|
|
|
else:
|
2017-07-21 20:32:04 +03:00
|
|
|
publisher = poolconf.vm_configuration.publisher.lower()
|
|
|
|
offer = poolconf.vm_configuration.offer.lower()
|
2017-03-10 01:38:16 +03:00
|
|
|
else:
|
|
|
|
pool_id = cloud_pool.id
|
|
|
|
vm_size = cloud_pool.vm_size.lower()
|
|
|
|
inter_node_comm = cloud_pool.enable_inter_node_communication
|
2017-11-06 19:12:49 +03:00
|
|
|
is_custom_image = util.is_not_empty(
|
|
|
|
cloud_pool.virtual_machine_configuration.image_reference.
|
|
|
|
virtual_machine_image_id)
|
2017-06-06 18:29:44 +03:00
|
|
|
if is_custom_image:
|
|
|
|
node_agent = cloud_pool.virtual_machine_configuration.\
|
|
|
|
node_agent_sku_id.lower()
|
|
|
|
else:
|
|
|
|
publisher = cloud_pool.virtual_machine_configuration.\
|
|
|
|
image_reference.publisher.lower()
|
|
|
|
offer = cloud_pool.virtual_machine_configuration.\
|
|
|
|
image_reference.offer.lower()
|
2018-06-25 17:49:45 +03:00
|
|
|
# get federation job constraint overrides
|
|
|
|
if util.is_not_empty(federation_id):
|
|
|
|
fed_constraints = job_federation_constraint_settings(
|
|
|
|
jobspec, federation_id)
|
|
|
|
if fed_constraints.pool.native is not None:
|
|
|
|
native = fed_constraints.pool.native
|
|
|
|
if fed_constraints.pool.windows is not None:
|
|
|
|
is_windows = fed_constraints.pool.windows
|
|
|
|
is_custom_image = util.is_not_empty(
|
|
|
|
fed_constraints.pool.custom_image_arm_id)
|
|
|
|
if is_multi_instance_task(conf):
|
|
|
|
inter_node_comm = True
|
|
|
|
else:
|
|
|
|
fed_constraints = None
|
2016-11-13 09:13:55 +03:00
|
|
|
# get depends on
|
|
|
|
try:
|
|
|
|
depends_on = conf['depends_on']
|
|
|
|
if util.is_none_or_empty(depends_on):
|
|
|
|
raise KeyError()
|
|
|
|
except KeyError:
|
|
|
|
depends_on = None
|
2017-01-12 20:23:25 +03:00
|
|
|
try:
|
|
|
|
depends_on_range = conf['depends_on_range']
|
|
|
|
if util.is_none_or_empty(depends_on_range):
|
|
|
|
raise KeyError()
|
|
|
|
if len(depends_on_range) != 2:
|
|
|
|
raise ValueError('depends_on_range requires 2 elements exactly')
|
2017-04-29 02:11:54 +03:00
|
|
|
if not (isinstance(depends_on_range[0], int) and
|
|
|
|
isinstance(depends_on_range[1], int)):
|
2017-01-12 20:23:25 +03:00
|
|
|
raise ValueError('depends_on_range requires integral members only')
|
|
|
|
except KeyError:
|
|
|
|
depends_on_range = None
|
2016-11-13 09:13:55 +03:00
|
|
|
# get additional resource files
|
|
|
|
try:
|
|
|
|
rfs = conf['resource_files']
|
|
|
|
if util.is_none_or_empty(rfs):
|
|
|
|
raise KeyError()
|
|
|
|
resource_files = []
|
|
|
|
for rf in rfs:
|
|
|
|
try:
|
|
|
|
fm = rf['file_mode']
|
|
|
|
if util.is_none_or_empty(fm):
|
|
|
|
raise KeyError()
|
|
|
|
except KeyError:
|
|
|
|
fm = None
|
|
|
|
resource_files.append(
|
|
|
|
ResourceFileSettings(
|
|
|
|
file_path=rf['file_path'],
|
|
|
|
blob_source=rf['blob_source'],
|
|
|
|
file_mode=fm,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
except KeyError:
|
|
|
|
resource_files = None
|
|
|
|
# get generic run opts
|
2017-10-25 06:07:00 +03:00
|
|
|
docker_exec_options = []
|
2017-10-21 06:15:46 +03:00
|
|
|
singularity_cmd = None
|
2017-10-22 23:59:00 +03:00
|
|
|
run_elevated = True
|
2017-10-21 06:15:46 +03:00
|
|
|
if util.is_not_empty(docker_image):
|
|
|
|
run_opts = _kv_read_checked(
|
|
|
|
conf, 'additional_docker_run_options', default=[])
|
2017-10-25 06:07:00 +03:00
|
|
|
if '--privileged' in run_opts:
|
|
|
|
docker_exec_options.append('--privileged')
|
2017-10-21 06:15:46 +03:00
|
|
|
else:
|
|
|
|
run_opts = _kv_read_checked(
|
|
|
|
conf, 'additional_singularity_options', default=[])
|
2017-10-22 23:59:00 +03:00
|
|
|
singularity_execution = _kv_read_checked(
|
|
|
|
conf, 'singularity_execution', default={})
|
2017-10-21 06:15:46 +03:00
|
|
|
singularity_cmd = _kv_read_checked(
|
2017-10-22 23:59:00 +03:00
|
|
|
singularity_execution, 'cmd', default='exec')
|
|
|
|
run_elevated = _kv_read(
|
|
|
|
singularity_execution, 'elevated', default=False)
|
2017-10-21 06:15:46 +03:00
|
|
|
if singularity_cmd not in _SINGULARITY_COMMANDS:
|
|
|
|
raise ValueError('singularity_cmd is invalid: {}'.format(
|
|
|
|
singularity_cmd))
|
|
|
|
# docker specific options
|
|
|
|
name = None
|
|
|
|
if util.is_not_empty(docker_image):
|
|
|
|
# parse remove container option
|
2018-01-26 00:37:29 +03:00
|
|
|
rm_container = _kv_read(conf, 'remove_container_after_exit')
|
|
|
|
if rm_container is None:
|
|
|
|
rm_container = _kv_read(
|
|
|
|
jobspec, 'remove_container_after_exit', default=True)
|
2017-10-21 06:15:46 +03:00
|
|
|
if rm_container and '--rm' not in run_opts:
|
|
|
|
run_opts.append('--rm')
|
|
|
|
del rm_container
|
|
|
|
# parse /dev/shm option
|
|
|
|
shm_size = (
|
|
|
|
_kv_read(conf, 'shm_size') or
|
|
|
|
_kv_read_checked(jobspec, 'shm_size')
|
|
|
|
)
|
|
|
|
if (util.is_not_empty(shm_size) and
|
|
|
|
not any(x.startswith('--shm-size=') for x in run_opts)):
|
|
|
|
run_opts.append('--shm-size={}'.format(shm_size))
|
|
|
|
del shm_size
|
|
|
|
# parse name option, if not specified use task id
|
|
|
|
name = _kv_read_checked(conf, 'name')
|
2016-11-13 09:13:55 +03:00
|
|
|
if util.is_none_or_empty(name):
|
2017-10-21 06:15:46 +03:00
|
|
|
name = task_id
|
|
|
|
set_task_name(conf, name)
|
|
|
|
run_opts.append('--name {}'.format(name))
|
|
|
|
# parse labels option
|
|
|
|
labels = _kv_read_checked(conf, 'labels')
|
2016-11-13 09:13:55 +03:00
|
|
|
if util.is_not_empty(labels):
|
|
|
|
for label in labels:
|
|
|
|
run_opts.append('-l {}'.format(label))
|
|
|
|
del labels
|
2017-10-21 06:15:46 +03:00
|
|
|
# parse ports option
|
|
|
|
ports = _kv_read_checked(conf, 'ports')
|
2016-11-13 09:13:55 +03:00
|
|
|
if util.is_not_empty(ports):
|
|
|
|
for port in ports:
|
|
|
|
run_opts.append('-p {}'.format(port))
|
|
|
|
del ports
|
2017-10-21 06:15:46 +03:00
|
|
|
# parse entrypoint
|
|
|
|
entrypoint = _kv_read_checked(conf, 'entrypoint')
|
2016-11-13 09:13:55 +03:00
|
|
|
if util.is_not_empty(entrypoint):
|
|
|
|
run_opts.append('--entrypoint {}'.format(entrypoint))
|
|
|
|
del entrypoint
|
2017-10-21 06:15:46 +03:00
|
|
|
# get user identity settings
|
2017-11-03 09:24:45 +03:00
|
|
|
if not is_windows:
|
|
|
|
ui = _kv_read_checked(jobspec, 'user_identity', {})
|
|
|
|
ui_default_pool_admin = _kv_read(ui, 'default_pool_admin', False)
|
|
|
|
ui_specific = _kv_read(ui, 'specific_user', {})
|
|
|
|
ui_specific_uid = _kv_read(ui_specific, 'uid')
|
|
|
|
ui_specific_gid = _kv_read(ui_specific, 'gid')
|
|
|
|
del ui
|
|
|
|
del ui_specific
|
|
|
|
if ui_default_pool_admin and ui_specific_uid is not None:
|
2017-10-21 06:15:46 +03:00
|
|
|
raise ValueError(
|
2017-11-03 09:24:45 +03:00
|
|
|
'cannot specify both default_pool_admin and '
|
|
|
|
'specific_user:uid/gid at the same time')
|
|
|
|
ui = UserIdentitySettings(
|
|
|
|
default_pool_admin=ui_default_pool_admin,
|
|
|
|
specific_user_uid=ui_specific_uid,
|
|
|
|
specific_user_gid=ui_specific_gid,
|
|
|
|
)
|
|
|
|
# append user identity options
|
|
|
|
uiopt = None
|
|
|
|
attach_ui = False
|
|
|
|
if ui.default_pool_admin:
|
|
|
|
# run as the default pool admin user. note that this is
|
|
|
|
# *undocumented* behavior and may break at anytime
|
|
|
|
uiopt = '-u `id -u _azbatch`:`id -g _azbatch`'
|
|
|
|
attach_ui = True
|
|
|
|
elif ui.specific_user_uid is not None:
|
|
|
|
if ui.specific_user_gid is None:
|
|
|
|
raise ValueError(
|
|
|
|
'cannot specify a user identity uid without a gid')
|
|
|
|
uiopt = '-u {}:{}'.format(
|
|
|
|
ui.specific_user_uid, ui.specific_user_gid)
|
|
|
|
attach_ui = True
|
|
|
|
if util.is_not_empty(uiopt):
|
|
|
|
run_opts.append(uiopt)
|
|
|
|
docker_exec_options.append(uiopt)
|
|
|
|
if attach_ui:
|
|
|
|
run_opts.append('-v /etc/passwd:/etc/passwd:ro')
|
|
|
|
run_opts.append('-v /etc/group:/etc/group:ro')
|
|
|
|
run_opts.append('-v /etc/sudoers:/etc/sudoers:ro')
|
|
|
|
del attach_ui
|
|
|
|
del ui
|
|
|
|
del uiopt
|
2016-11-13 09:13:55 +03:00
|
|
|
# get command
|
2017-10-21 06:15:46 +03:00
|
|
|
command = _kv_read_checked(conf, 'command')
|
2016-11-13 09:13:55 +03:00
|
|
|
# parse data volumes
|
2017-05-23 19:29:00 +03:00
|
|
|
data_volumes = _kv_read_checked(jobspec, 'data_volumes')
|
2017-10-21 06:15:46 +03:00
|
|
|
tdv = _kv_read_checked(conf, 'data_volumes')
|
|
|
|
if util.is_not_empty(tdv):
|
|
|
|
if util.is_not_empty(data_volumes):
|
|
|
|
# check for intersection
|
|
|
|
if len(set(data_volumes).intersection(set(tdv))) > 0:
|
|
|
|
raise ValueError('data volumes must be unique')
|
|
|
|
data_volumes.extend(tdv)
|
|
|
|
else:
|
|
|
|
data_volumes = tdv
|
|
|
|
del tdv
|
|
|
|
# binding order matters for Singularity
|
|
|
|
bindparm = '-v' if util.is_not_empty(docker_image) else '-B'
|
2018-04-25 18:19:27 +03:00
|
|
|
# get working dir default
|
|
|
|
def_wd = _kv_read_checked(
|
|
|
|
conf, 'default_working_dir',
|
|
|
|
default=_kv_read_checked(jobspec, 'default_working_dir')
|
|
|
|
)
|
|
|
|
if util.is_none_or_empty(def_wd) or def_wd == 'batch':
|
|
|
|
if is_windows:
|
|
|
|
def_wd = '%AZ_BATCH_TASK_WORKING_DIR%'
|
|
|
|
else:
|
|
|
|
def_wd = '$AZ_BATCH_TASK_WORKING_DIR'
|
2019-06-25 01:06:20 +03:00
|
|
|
# set working directory if not already set
|
|
|
|
if def_wd != 'container':
|
|
|
|
if util.is_not_empty(docker_image):
|
|
|
|
if not any((x.startswith('-w ') or x.startswith('--workdir '))
|
|
|
|
for x in run_opts):
|
|
|
|
run_opts.append('-w {}'.format(def_wd))
|
|
|
|
else:
|
|
|
|
if not any(x.startswith('--pwd ') for x in run_opts):
|
|
|
|
run_opts.append('--pwd {}'.format(def_wd))
|
|
|
|
working_dir = (
|
|
|
|
batchmodels.ContainerWorkingDirectory.task_working_directory
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
working_dir = (
|
|
|
|
batchmodels.ContainerWorkingDirectory.container_image_default
|
|
|
|
)
|
|
|
|
del def_wd
|
2017-10-21 06:15:46 +03:00
|
|
|
# bind root dir and set working dir
|
|
|
|
if not native:
|
2018-10-31 00:22:27 +03:00
|
|
|
restrict_bind = _kv_read(
|
|
|
|
jobspec, 'restrict_default_bind_mounts', default=False)
|
|
|
|
if restrict_bind:
|
|
|
|
# mount task directory only
|
|
|
|
if is_windows:
|
|
|
|
run_opts.append(
|
|
|
|
'{} %AZ_BATCH_TASK_DIR%:%AZ_BATCH_TASK_DIR%'.format(
|
|
|
|
bindparm))
|
|
|
|
else:
|
|
|
|
run_opts.append(
|
|
|
|
'{} $AZ_BATCH_TASK_DIR:$AZ_BATCH_TASK_DIR'.format(
|
|
|
|
bindparm))
|
2017-11-03 09:24:45 +03:00
|
|
|
else:
|
2018-10-31 00:22:27 +03:00
|
|
|
# mount batch root dir
|
|
|
|
if is_windows:
|
|
|
|
run_opts.append(
|
|
|
|
'{} %AZ_BATCH_NODE_ROOT_DIR%:'
|
|
|
|
'%AZ_BATCH_NODE_ROOT_DIR%'.format(bindparm))
|
|
|
|
else:
|
|
|
|
run_opts.append(
|
|
|
|
'{} $AZ_BATCH_NODE_ROOT_DIR:'
|
|
|
|
'$AZ_BATCH_NODE_ROOT_DIR'.format(bindparm))
|
2017-05-23 19:29:00 +03:00
|
|
|
if util.is_not_empty(data_volumes):
|
2016-11-13 09:13:55 +03:00
|
|
|
dv = global_resources_data_volumes(config)
|
|
|
|
for dvkey in data_volumes:
|
|
|
|
try:
|
2017-10-21 06:15:46 +03:00
|
|
|
hostpath = _kv_read_checked(dv[dvkey], 'host_path')
|
2016-11-13 09:13:55 +03:00
|
|
|
except KeyError:
|
2017-10-21 06:15:46 +03:00
|
|
|
raise ValueError(
|
|
|
|
('ensure that the {} data volume exists in the '
|
|
|
|
'global configuration').format(dvkey))
|
|
|
|
bindopt = _kv_read_checked(dv[dvkey], 'bind_options', default='')
|
|
|
|
if util.is_not_empty(bindopt):
|
|
|
|
bindopt = ':{}'.format(bindopt)
|
2016-11-13 09:13:55 +03:00
|
|
|
if util.is_not_empty(hostpath):
|
2017-10-21 06:15:46 +03:00
|
|
|
run_opts.append('{} {}:{}{}'.format(
|
|
|
|
bindparm, hostpath, dv[dvkey]['container_path'], bindopt))
|
2016-11-13 09:13:55 +03:00
|
|
|
else:
|
2017-10-21 06:15:46 +03:00
|
|
|
if util.is_not_empty(bindopt):
|
|
|
|
run_opts.append('{bp} {cp}:{cp}{bo}'.format(
|
|
|
|
bp=bindparm, cp=dv[dvkey]['container_path'],
|
|
|
|
bo=bindopt))
|
|
|
|
else:
|
|
|
|
run_opts.append('{} {}'.format(
|
|
|
|
bindparm, dv[dvkey]['container_path']))
|
2017-05-23 19:29:00 +03:00
|
|
|
del data_volumes
|
2016-11-13 09:13:55 +03:00
|
|
|
# parse shared data volumes
|
2017-05-23 19:29:00 +03:00
|
|
|
shared_data_volumes = _kv_read_checked(jobspec, 'shared_data_volumes')
|
2017-10-21 06:15:46 +03:00
|
|
|
tsdv = _kv_read_checked(conf, 'shared_data_volumes')
|
|
|
|
if util.is_not_empty(tsdv):
|
|
|
|
if util.is_not_empty(shared_data_volumes):
|
|
|
|
# check for intersection
|
|
|
|
if len(set(shared_data_volumes).intersection(set(tsdv))) > 0:
|
|
|
|
raise ValueError('shared data volumes must be unique')
|
|
|
|
shared_data_volumes.extend(tsdv)
|
|
|
|
else:
|
|
|
|
shared_data_volumes = tsdv
|
|
|
|
del tsdv
|
2018-11-01 02:06:00 +03:00
|
|
|
if job_requires_auto_scratch(jobspec):
|
|
|
|
run_opts.append(
|
|
|
|
'{} {}/auto_scratch/{}:$AZ_BATCH_TASK_DIR/auto_scratch'.format(
|
|
|
|
bindparm,
|
|
|
|
_HOST_MOUNTS_DIR,
|
|
|
|
jobspec['id']))
|
2017-05-23 19:29:00 +03:00
|
|
|
if util.is_not_empty(shared_data_volumes):
|
2016-11-13 09:13:55 +03:00
|
|
|
sdv = global_resources_shared_data_volumes(config)
|
|
|
|
for sdvkey in shared_data_volumes:
|
2017-10-21 06:15:46 +03:00
|
|
|
try:
|
|
|
|
bindopt = _kv_read_checked(
|
|
|
|
sdv[sdvkey], 'bind_options', default='')
|
|
|
|
except KeyError:
|
|
|
|
raise ValueError(
|
|
|
|
('ensure that the {} shared data volume exists in the '
|
|
|
|
'global configuration').format(sdvkey))
|
2017-12-09 03:28:25 +03:00
|
|
|
if util.is_not_empty(bindopt):
|
|
|
|
bindopt = ':{}'.format(bindopt)
|
2017-03-09 07:18:58 +03:00
|
|
|
if is_shared_data_volume_gluster_on_compute(sdv, sdvkey):
|
2017-10-21 06:15:46 +03:00
|
|
|
run_opts.append('{} {}/{}:{}{}'.format(
|
|
|
|
bindparm,
|
2017-10-05 03:59:30 +03:00
|
|
|
_HOST_MOUNTS_DIR,
|
2017-03-12 06:15:14 +03:00
|
|
|
get_gluster_on_compute_volume(),
|
2017-10-21 06:15:46 +03:00
|
|
|
shared_data_volume_container_path(sdv, sdvkey),
|
|
|
|
bindopt))
|
2017-03-13 12:10:08 +03:00
|
|
|
elif is_shared_data_volume_storage_cluster(sdv, sdvkey):
|
2017-10-21 06:15:46 +03:00
|
|
|
run_opts.append('{} {}/{}:{}{}'.format(
|
|
|
|
bindparm,
|
2017-10-05 03:59:30 +03:00
|
|
|
_HOST_MOUNTS_DIR,
|
2017-03-13 12:10:08 +03:00
|
|
|
sdvkey,
|
2017-10-21 06:15:46 +03:00
|
|
|
shared_data_volume_container_path(sdv, sdvkey),
|
|
|
|
bindopt))
|
2018-01-24 01:23:39 +03:00
|
|
|
elif is_shared_data_volume_azure_blob(sdv, sdvkey):
|
2017-10-05 03:59:30 +03:00
|
|
|
sa = credentials_storage(
|
|
|
|
config,
|
2018-01-24 01:23:39 +03:00
|
|
|
azure_storage_account_settings(sdv, sdvkey))
|
|
|
|
cont_name = azure_blob_container_name(sdv, sdvkey)
|
|
|
|
hmp = azure_blob_host_mount_path(sa.account, cont_name)
|
|
|
|
run_opts.append('{} {}:{}{}'.format(
|
|
|
|
bindparm,
|
|
|
|
hmp,
|
|
|
|
shared_data_volume_container_path(sdv, sdvkey),
|
|
|
|
bindopt))
|
|
|
|
elif is_shared_data_volume_azure_file(sdv, sdvkey):
|
|
|
|
sa = credentials_storage(
|
|
|
|
config,
|
|
|
|
azure_storage_account_settings(sdv, sdvkey))
|
2017-10-05 03:59:30 +03:00
|
|
|
share_name = azure_file_share_name(sdv, sdvkey)
|
2017-11-05 21:38:22 +03:00
|
|
|
hmp = azure_file_host_mount_path(
|
|
|
|
sa.account, share_name, is_windows)
|
2017-10-21 06:15:46 +03:00
|
|
|
run_opts.append('{} {}:{}{}'.format(
|
|
|
|
bindparm,
|
|
|
|
hmp,
|
|
|
|
shared_data_volume_container_path(sdv, sdvkey),
|
|
|
|
bindopt))
|
2018-02-12 21:34:33 +03:00
|
|
|
elif is_shared_data_volume_custom_linux_mount(sdv, sdvkey):
|
|
|
|
run_opts.append('{} {}/{}:{}{}'.format(
|
|
|
|
bindparm,
|
|
|
|
_HOST_MOUNTS_DIR,
|
|
|
|
sdvkey,
|
|
|
|
shared_data_volume_container_path(sdv, sdvkey),
|
|
|
|
bindopt))
|
2018-01-24 01:23:39 +03:00
|
|
|
else:
|
|
|
|
raise RuntimeError(
|
|
|
|
'unknown shared data volume type: {}'.format(sdvkey))
|
2017-05-23 19:29:00 +03:00
|
|
|
del shared_data_volumes
|
2016-11-13 09:13:55 +03:00
|
|
|
# env vars
|
2017-10-21 06:15:46 +03:00
|
|
|
env_vars = _kv_read_checked(conf, 'environment_variables', default={})
|
|
|
|
ev_secid = _kv_read_checked(
|
|
|
|
conf, 'environment_variables_keyvault_secret_id')
|
2019-06-19 23:11:09 +03:00
|
|
|
# singularity login
|
|
|
|
if util.is_not_empty(singularity_image):
|
|
|
|
registry_type, _, image_name = singularity_image.partition('://')
|
2019-06-21 20:36:28 +03:00
|
|
|
if (registry_type.lower() == 'oras' or
|
|
|
|
registry_type.lower() == 'docker'):
|
2019-06-19 23:11:09 +03:00
|
|
|
registry = image_name.partition('/')[0]
|
|
|
|
username, password = singularity_registry_login(config, registry)
|
|
|
|
if username is not None and password is not None:
|
|
|
|
env_vars['SINGULARITY_DOCKER_USERNAME'] = username
|
|
|
|
env_vars['SINGULARITY_DOCKER_PASSWORD'] = password
|
2019-11-06 07:14:40 +03:00
|
|
|
singularity_cert_map = singularity_image_to_encryption_cert_map(config)
|
|
|
|
cert = singularity_cert_map.get(singularity_image)
|
|
|
|
if cert is not None:
|
|
|
|
# use run option over env var to use az batch env var to cert path
|
|
|
|
run_opts.append(
|
|
|
|
'--pem-path=$AZ_BATCH_NODE_STARTUP_DIR/certs/'
|
|
|
|
'sha1-{}-rsa.pem'.format(cert))
|
2017-10-21 06:15:46 +03:00
|
|
|
# constraints
|
|
|
|
max_task_retries = _kv_read(conf, 'max_task_retries')
|
|
|
|
max_wall_time = _kv_read_checked(conf, 'max_wall_time')
|
|
|
|
if util.is_not_empty(max_wall_time):
|
|
|
|
max_wall_time = util.convert_string_to_timedelta(max_wall_time)
|
|
|
|
retention_time = (
|
|
|
|
_kv_read_checked(conf, 'retention_time') or
|
|
|
|
_kv_read_checked(jobspec, 'retention_time')
|
|
|
|
)
|
2017-05-23 19:29:00 +03:00
|
|
|
if util.is_not_empty(retention_time):
|
|
|
|
retention_time = util.convert_string_to_timedelta(retention_time)
|
2018-02-27 02:06:55 +03:00
|
|
|
# exit conditions, right now specific exit codes/ranges are not supported
|
2018-03-16 19:25:35 +03:00
|
|
|
job_default_eo = _kv_read_checked(
|
|
|
|
_kv_read_checked(
|
|
|
|
_kv_read_checked(
|
|
|
|
jobspec,
|
|
|
|
'exit_conditions',
|
|
|
|
default={}
|
|
|
|
),
|
|
|
|
'default',
|
|
|
|
default={}
|
|
|
|
),
|
|
|
|
'exit_options',
|
|
|
|
default={}
|
|
|
|
)
|
|
|
|
task_default_eo = _kv_read_checked(
|
|
|
|
_kv_read_checked(
|
|
|
|
_kv_read_checked(
|
|
|
|
conf,
|
|
|
|
'exit_conditions',
|
|
|
|
default={}
|
|
|
|
),
|
|
|
|
'default',
|
|
|
|
default={}
|
|
|
|
),
|
|
|
|
'exit_options',
|
|
|
|
default={}
|
|
|
|
)
|
2018-02-27 02:06:55 +03:00
|
|
|
job_action = batchmodels.JobAction(
|
2018-03-16 19:25:35 +03:00
|
|
|
_kv_read_checked(
|
|
|
|
task_default_eo,
|
|
|
|
'job_action',
|
|
|
|
default=batchmodels.JobAction(
|
|
|
|
_kv_read_checked(job_default_eo, 'job_action', default='none')
|
|
|
|
)
|
|
|
|
)
|
|
|
|
)
|
2018-02-27 02:06:55 +03:00
|
|
|
dependency_action = batchmodels.DependencyAction(
|
2018-03-16 19:25:35 +03:00
|
|
|
_kv_read_checked(
|
|
|
|
task_default_eo,
|
|
|
|
'dependency_action',
|
|
|
|
default=batchmodels.DependencyAction(
|
|
|
|
_kv_read_checked(
|
|
|
|
job_default_eo, 'dependency_action', default='block')
|
|
|
|
)
|
|
|
|
)
|
|
|
|
)
|
2016-11-13 09:13:55 +03:00
|
|
|
# gpu
|
2019-08-08 23:36:41 +03:00
|
|
|
gpu = _kv_read(conf, 'gpus')
|
2018-01-26 00:37:29 +03:00
|
|
|
if gpu is None:
|
2019-08-08 23:36:41 +03:00
|
|
|
gpu = _kv_read(jobspec, 'gpus')
|
|
|
|
if gpu is not None:
|
|
|
|
gpu = str(gpu)
|
2017-09-27 18:41:17 +03:00
|
|
|
# if not specified check for gpu pool and implicitly enable
|
2019-08-08 23:36:41 +03:00
|
|
|
if util.is_none_or_empty(gpu):
|
2017-11-03 09:24:45 +03:00
|
|
|
if is_gpu_pool(vm_size) and not is_windows:
|
2019-08-08 23:36:41 +03:00
|
|
|
gpu = 'all'
|
2017-09-27 18:41:17 +03:00
|
|
|
else:
|
2019-08-08 23:36:41 +03:00
|
|
|
gpu = 'disable'
|
2016-11-13 09:13:55 +03:00
|
|
|
# adjust for gpu settings
|
2019-08-08 23:36:41 +03:00
|
|
|
if util.is_not_empty(gpu) and gpu != 'disable':
|
2018-06-25 17:49:45 +03:00
|
|
|
if util.is_not_empty(federation_id):
|
|
|
|
# ensure that the job-level constraint does not conflict with
|
|
|
|
# job/task level requirements
|
|
|
|
if (fed_constraints.compute_node.gpu is not None and
|
|
|
|
not fed_constraints.compute_node.gpu):
|
|
|
|
raise ValueError(
|
|
|
|
'job or task requirement of gpu conflicts with '
|
|
|
|
'compute_node:gpu federation constraint')
|
2017-03-10 01:38:16 +03:00
|
|
|
if not is_gpu_pool(vm_size):
|
2016-11-13 09:13:55 +03:00
|
|
|
raise RuntimeError(
|
|
|
|
('cannot initialize a gpu task on nodes without '
|
2017-09-27 18:41:17 +03:00
|
|
|
'gpus: pool={} vm_size={}').format(pool_id, vm_size))
|
2019-08-08 23:36:41 +03:00
|
|
|
if is_windows:
|
|
|
|
raise ValueError(
|
|
|
|
'cannot execute a gpu task on a windows pool: {}'.format(
|
|
|
|
pool_id))
|
|
|
|
# set docker commands with nvidia container runtime
|
2017-10-21 06:15:46 +03:00
|
|
|
if util.is_not_empty(singularity_image):
|
2019-08-08 23:36:41 +03:00
|
|
|
if gpu != 'all':
|
|
|
|
raise ValueError(
|
|
|
|
'cannot execute a singularity container with non-default '
|
|
|
|
'gpu options (i.e., "all")')
|
2017-10-21 06:15:46 +03:00
|
|
|
run_opts.append('--nv')
|
2019-06-12 22:23:08 +03:00
|
|
|
else:
|
2019-08-08 23:36:41 +03:00
|
|
|
# batch native mode will take care of using the proper runtime
|
|
|
|
# TODO once native mode supports new native docker gpu support
|
|
|
|
# this should change and support gpus options
|
|
|
|
if not native:
|
|
|
|
run_opts.append('--gpus={}'.format(gpu))
|
2017-09-27 18:41:17 +03:00
|
|
|
# infiniband
|
2018-01-26 00:37:29 +03:00
|
|
|
infiniband = _kv_read(conf, 'infiniband')
|
|
|
|
if infiniband is None:
|
|
|
|
_kv_read(jobspec, 'infiniband')
|
2017-09-27 18:41:17 +03:00
|
|
|
# if not specified, check for rdma pool and implicitly enable
|
|
|
|
if infiniband is None:
|
2017-11-03 09:24:45 +03:00
|
|
|
if is_rdma_pool(vm_size) and inter_node_comm and not is_windows:
|
2017-09-27 18:41:17 +03:00
|
|
|
infiniband = True
|
|
|
|
else:
|
|
|
|
infiniband = False
|
2016-11-13 09:13:55 +03:00
|
|
|
# adjust for infiniband
|
2017-09-26 21:08:37 +03:00
|
|
|
if infiniband:
|
2018-06-25 17:49:45 +03:00
|
|
|
# adjust ib with fed constraints (normalize to a known base config)
|
|
|
|
if util.is_not_empty(federation_id):
|
|
|
|
# ensure that the job-level constraint does not conflict with
|
|
|
|
# job/task level requirements
|
|
|
|
if (fed_constraints.compute_node.infiniband is not None and
|
|
|
|
not fed_constraints.compute_node.infiniband):
|
|
|
|
raise ValueError(
|
|
|
|
'job or task requirement of infiniband conflicts with '
|
|
|
|
'compute_node:infiniband federation constraint')
|
|
|
|
# set publisher and offer (or node agent)
|
|
|
|
if infiniband:
|
|
|
|
if is_custom_image:
|
|
|
|
node_agent = 'batch.node.centos'
|
|
|
|
else:
|
|
|
|
publisher = 'openlogic'
|
|
|
|
offer = 'centos-hpc'
|
2017-03-10 01:38:16 +03:00
|
|
|
if not inter_node_comm:
|
2016-11-13 09:13:55 +03:00
|
|
|
raise RuntimeError(
|
|
|
|
('cannot initialize an infiniband task on a '
|
|
|
|
'non-internode communication enabled '
|
2017-03-10 01:38:16 +03:00
|
|
|
'pool: {}').format(pool_id))
|
|
|
|
if not is_rdma_pool(vm_size):
|
2016-11-13 09:13:55 +03:00
|
|
|
raise RuntimeError(
|
|
|
|
('cannot initialize an infiniband task on nodes '
|
2017-09-27 18:41:17 +03:00
|
|
|
'without RDMA: pool={} vm_size={}').format(
|
2017-03-10 01:38:16 +03:00
|
|
|
pool_id, vm_size))
|
2019-06-27 23:08:11 +03:00
|
|
|
# mount /opt/intel for NetworkDirect RDMA
|
|
|
|
if is_networkdirect_rdma_pool(vm_size):
|
|
|
|
run_opts.append('{} /opt/intel:/opt/intel:ro'.format(bindparm))
|
2019-11-05 22:33:32 +03:00
|
|
|
# mutate run options
|
2017-09-26 21:08:37 +03:00
|
|
|
if not native:
|
2017-10-21 06:15:46 +03:00
|
|
|
if util.is_not_empty(docker_image):
|
|
|
|
run_opts.append('--net=host')
|
|
|
|
run_opts.append('--ulimit memlock=9223372036854775807')
|
|
|
|
run_opts.append('--device=/dev/infiniband/rdma_cm')
|
|
|
|
run_opts.append('--device=/dev/infiniband/uverbs0')
|
2019-11-05 22:33:32 +03:00
|
|
|
if is_sriov_rdma_pool(vm_size):
|
|
|
|
run_opts.append('--device=/dev/infiniband/issm0')
|
|
|
|
run_opts.append('--device=/dev/infiniband/ucm0')
|
|
|
|
run_opts.append('--device=/dev/infiniband/umad0')
|
|
|
|
run_opts.append('{} /etc/dat.conf:/etc/dat.conf:ro'.format(
|
|
|
|
bindparm))
|
2017-10-21 06:15:46 +03:00
|
|
|
else:
|
|
|
|
# ensure singularity opts do not have network namespace
|
|
|
|
# or contain options
|
|
|
|
try:
|
|
|
|
run_opts.remove('-c')
|
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
try:
|
|
|
|
run_opts.remove('--contain')
|
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
try:
|
|
|
|
run_opts.remove('-C')
|
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
try:
|
|
|
|
run_opts.remove('--containall')
|
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
try:
|
|
|
|
run_opts.remove('-n')
|
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
try:
|
|
|
|
run_opts.remove('--net')
|
|
|
|
except ValueError:
|
|
|
|
pass
|
2019-06-27 23:08:11 +03:00
|
|
|
# add rdma dat files into container space
|
2018-06-11 23:00:17 +03:00
|
|
|
if (((publisher == 'openlogic' and offer == 'centos-hpc') or
|
|
|
|
(publisher == 'microsoft-azure-batch' and
|
|
|
|
offer == 'centos-container-rdma')) or
|
2017-12-09 03:28:25 +03:00
|
|
|
(is_custom_image and
|
|
|
|
node_agent.startswith('batch.node.centos'))):
|
2019-06-27 23:08:11 +03:00
|
|
|
if is_networkdirect_rdma_pool(vm_size):
|
|
|
|
run_opts.append('{} /etc/rdma:/etc/rdma:ro'.format(
|
|
|
|
bindparm))
|
|
|
|
run_opts.append(
|
|
|
|
'{} /etc/rdma/dat.conf:/etc/dat.conf:ro'.format(
|
|
|
|
bindparm))
|
2018-06-11 23:00:17 +03:00
|
|
|
elif ((publisher == 'microsoft-azure-batch' and
|
|
|
|
offer == 'ubuntu-server-container-rdma') or
|
|
|
|
(is_custom_image and
|
|
|
|
node_agent.startswith('batch.node.ubuntu'))):
|
2019-06-27 23:08:11 +03:00
|
|
|
if is_networkdirect_rdma_pool(vm_size):
|
|
|
|
run_opts.append('{} /etc/dat.conf:/etc/dat.conf:ro'.format(
|
|
|
|
bindparm))
|
|
|
|
run_opts.append(
|
|
|
|
'{} /etc/dat.conf:/etc/rdma/dat.conf:ro'.format(
|
|
|
|
bindparm))
|
2017-12-09 03:28:25 +03:00
|
|
|
elif ((publisher == 'suse' and offer == 'sles-hpc') or
|
|
|
|
(is_custom_image and
|
|
|
|
node_agent.startswith('batch.node.opensuse'))):
|
2019-11-05 22:33:32 +03:00
|
|
|
if is_networkdirect_rdma_pool(vm_size):
|
|
|
|
run_opts.append('{} /etc/dat.conf:/etc/dat.conf:ro'.format(
|
|
|
|
bindparm))
|
|
|
|
run_opts.append(
|
|
|
|
'{} /etc/dat.conf:/etc/rdma/dat.conf:ro'.format(
|
|
|
|
bindparm))
|
|
|
|
if util.is_not_empty(docker_image):
|
|
|
|
run_opts.append('--device=/dev/hvnd_rdma')
|
2017-09-26 21:08:37 +03:00
|
|
|
else:
|
|
|
|
raise ValueError(
|
|
|
|
('Unsupported infiniband VM config, publisher={} '
|
|
|
|
'offer={}').format(publisher, offer))
|
2016-11-13 09:13:55 +03:00
|
|
|
# always add option for envfile
|
2018-11-17 01:35:55 +03:00
|
|
|
envfile = '.shipyard.envlist'
|
|
|
|
if util.is_not_empty(docker_image) and not native:
|
|
|
|
run_opts.append('--env-file {}'.format(envfile))
|
2016-11-13 09:13:55 +03:00
|
|
|
# populate mult-instance settings
|
|
|
|
if is_multi_instance_task(conf):
|
2017-03-10 01:38:16 +03:00
|
|
|
if not inter_node_comm:
|
2016-11-13 09:13:55 +03:00
|
|
|
raise RuntimeError(
|
|
|
|
('cannot run a multi-instance task on a '
|
|
|
|
'non-internode communication enabled '
|
|
|
|
'pool: {}').format(pool_id))
|
2017-10-21 06:15:46 +03:00
|
|
|
# Docker container must be named
|
|
|
|
if util.is_not_empty(docker_image):
|
|
|
|
if util.is_none_or_empty(name):
|
|
|
|
raise ValueError(
|
|
|
|
'multi-instance task with a Docker image must be invoked '
|
|
|
|
'with a named container')
|
|
|
|
# application command cannot be empty/None
|
2016-11-13 09:13:55 +03:00
|
|
|
if util.is_none_or_empty(command):
|
|
|
|
raise ValueError(
|
|
|
|
'multi-instance task must have an application command')
|
2017-09-27 20:56:51 +03:00
|
|
|
# set docker run options for coordination command
|
2017-10-21 06:15:46 +03:00
|
|
|
if util.is_not_empty(docker_image):
|
|
|
|
if not native:
|
|
|
|
try:
|
|
|
|
run_opts.remove('--rm')
|
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
# run in detached mode
|
|
|
|
run_opts.append('-d')
|
|
|
|
# ensure host networking stack is used
|
|
|
|
if '--net=host' not in run_opts:
|
|
|
|
run_opts.append('--net=host')
|
|
|
|
else:
|
|
|
|
# ensure network namespace is not enabled
|
2017-09-27 20:56:51 +03:00
|
|
|
try:
|
2017-10-21 06:15:46 +03:00
|
|
|
run_opts.remove('-n')
|
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
try:
|
|
|
|
run_opts.remove('--net')
|
2017-09-27 20:56:51 +03:00
|
|
|
except ValueError:
|
|
|
|
pass
|
2016-11-13 09:13:55 +03:00
|
|
|
# get coordination command
|
|
|
|
try:
|
|
|
|
coordination_command = conf[
|
|
|
|
'multi_instance']['coordination_command']
|
|
|
|
if util.is_none_or_empty(coordination_command):
|
|
|
|
raise KeyError()
|
2017-09-19 23:37:21 +03:00
|
|
|
coordination_command = '{}'.format(' ' + coordination_command)
|
2016-11-13 09:13:55 +03:00
|
|
|
except KeyError:
|
2017-09-27 20:56:51 +03:00
|
|
|
# manually set coordination command to ssh for native
|
|
|
|
# containers in daemon mode if not specified
|
|
|
|
if native:
|
|
|
|
coordination_command = '/usr/sbin/sshd -p 23'
|
|
|
|
else:
|
2017-11-08 23:23:23 +03:00
|
|
|
coordination_command = ''
|
2017-10-21 06:15:46 +03:00
|
|
|
if native or util.is_not_empty(singularity_image):
|
2017-09-27 20:56:51 +03:00
|
|
|
if util.is_not_empty(coordination_command):
|
|
|
|
cc_args = [coordination_command]
|
|
|
|
else:
|
|
|
|
cc_args = None
|
2017-09-19 23:37:21 +03:00
|
|
|
else:
|
2017-11-08 23:23:23 +03:00
|
|
|
cc_args = [
|
2019-06-12 22:23:08 +03:00
|
|
|
'docker run {} {}{}'.format(
|
2017-11-08 23:23:23 +03:00
|
|
|
' '.join(run_opts),
|
|
|
|
docker_image,
|
|
|
|
coordination_command),
|
|
|
|
]
|
2017-11-05 21:38:22 +03:00
|
|
|
del coordination_command
|
2016-11-13 09:13:55 +03:00
|
|
|
# get num instances
|
|
|
|
num_instances = conf['multi_instance']['num_instances']
|
|
|
|
if not isinstance(num_instances, int):
|
2018-06-25 17:49:45 +03:00
|
|
|
if util.is_not_empty(federation_id):
|
|
|
|
raise ValueError(
|
|
|
|
'cannot specify a non-integral value "{}" for '
|
|
|
|
'num_instances for a multi-instance task destined for '
|
|
|
|
'a federation'.format(num_instances))
|
2017-05-23 05:45:25 +03:00
|
|
|
# TODO remove deprecation path
|
|
|
|
if (num_instances == 'pool_specification_vm_count_dedicated' or
|
|
|
|
num_instances == 'pool_specification_vm_count'):
|
2017-05-13 00:40:06 +03:00
|
|
|
pool_vm_count = _pool_vm_count(config)
|
|
|
|
num_instances = pool_vm_count.dedicated
|
|
|
|
elif num_instances == 'pool_specification_vm_count_low_priority':
|
|
|
|
pool_vm_count = _pool_vm_count(config)
|
|
|
|
num_instances = pool_vm_count.low_priority
|
|
|
|
elif (num_instances == 'pool_current_dedicated' or
|
|
|
|
num_instances == 'pool_current_low_priority'):
|
2017-03-10 01:38:16 +03:00
|
|
|
if cloud_pool is None:
|
|
|
|
raise RuntimeError(
|
|
|
|
('Cannot retrieve current dedicated count for '
|
|
|
|
'pool: {}. Ensure pool exists.)'.format(pool_id)))
|
2017-05-13 00:40:06 +03:00
|
|
|
if num_instances == 'pool_current_dedicated':
|
|
|
|
num_instances = cloud_pool.current_dedicated_nodes
|
|
|
|
elif num_instances == 'pool_current_low_priority':
|
|
|
|
num_instances = cloud_pool.current_low_priority_nodes
|
2016-11-13 09:13:55 +03:00
|
|
|
else:
|
|
|
|
raise ValueError(
|
|
|
|
('multi instance num instances setting '
|
|
|
|
'invalid: {}').format(num_instances))
|
|
|
|
# get common resource files
|
|
|
|
try:
|
|
|
|
mi_rfs = conf['multi_instance']['resource_files']
|
|
|
|
if util.is_none_or_empty(mi_rfs):
|
|
|
|
raise KeyError()
|
|
|
|
mi_resource_files = []
|
|
|
|
for rf in mi_rfs:
|
|
|
|
try:
|
|
|
|
fm = rf['file_mode']
|
|
|
|
if util.is_none_or_empty(fm):
|
|
|
|
raise KeyError()
|
|
|
|
except KeyError:
|
|
|
|
fm = None
|
|
|
|
mi_resource_files.append(
|
|
|
|
ResourceFileSettings(
|
|
|
|
file_path=rf['file_path'],
|
|
|
|
blob_source=rf['blob_source'],
|
|
|
|
file_mode=fm,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
except KeyError:
|
|
|
|
mi_resource_files = None
|
2019-07-03 22:40:54 +03:00
|
|
|
pre_execution_command = _kv_read_checked(
|
|
|
|
conf['multi_instance'], 'pre_execution_command', None)
|
|
|
|
mpi = _kv_read(conf['multi_instance'], 'mpi', None)
|
|
|
|
if mpi is not None:
|
2019-07-18 04:57:06 +03:00
|
|
|
if is_windows:
|
|
|
|
raise ValueError(
|
|
|
|
'The mpi setting is not supported on windows pools')
|
2019-07-03 22:40:54 +03:00
|
|
|
mpi_runtime = _kv_read_checked(mpi, 'runtime', '').lower()
|
2019-07-18 04:57:06 +03:00
|
|
|
mpi_executable_path = _kv_read_checked(
|
|
|
|
mpi, 'executable_path', 'mpirun').lower()
|
2019-07-03 22:40:54 +03:00
|
|
|
mpi_options = _kv_read_checked(mpi, 'options', [])
|
|
|
|
mpi_ppn = _kv_read(mpi, 'processes_per_node', None)
|
2016-11-13 09:13:55 +03:00
|
|
|
else:
|
|
|
|
num_instances = 0
|
|
|
|
cc_args = None
|
|
|
|
mi_resource_files = None
|
2019-07-03 22:40:54 +03:00
|
|
|
pre_execution_command = None
|
|
|
|
mpi = None
|
2016-11-13 09:13:55 +03:00
|
|
|
return TaskSettings(
|
|
|
|
id=task_id,
|
2017-10-21 06:15:46 +03:00
|
|
|
docker_image=docker_image,
|
|
|
|
singularity_image=singularity_image,
|
2016-11-13 09:13:55 +03:00
|
|
|
name=name,
|
2017-10-21 06:15:46 +03:00
|
|
|
run_options=run_opts,
|
2017-10-25 06:07:00 +03:00
|
|
|
docker_exec_options=docker_exec_options,
|
2019-06-25 01:06:20 +03:00
|
|
|
working_dir=working_dir,
|
2016-11-13 09:13:55 +03:00
|
|
|
environment_variables=env_vars,
|
2017-01-25 00:54:26 +03:00
|
|
|
environment_variables_keyvault_secret_id=ev_secid,
|
2016-11-13 09:13:55 +03:00
|
|
|
envfile=envfile,
|
|
|
|
resource_files=resource_files,
|
2017-01-31 20:31:21 +03:00
|
|
|
max_task_retries=max_task_retries,
|
2017-05-23 19:29:00 +03:00
|
|
|
max_wall_time=max_wall_time,
|
2017-01-31 20:31:21 +03:00
|
|
|
retention_time=retention_time,
|
2016-11-13 09:13:55 +03:00
|
|
|
command=command,
|
|
|
|
infiniband=infiniband,
|
|
|
|
gpu=gpu,
|
|
|
|
depends_on=depends_on,
|
2017-01-12 20:23:25 +03:00
|
|
|
depends_on_range=depends_on_range,
|
2017-10-21 06:15:46 +03:00
|
|
|
singularity_cmd=singularity_cmd,
|
2017-10-22 23:59:00 +03:00
|
|
|
run_elevated=run_elevated,
|
2016-11-13 09:13:55 +03:00
|
|
|
multi_instance=MultiInstanceSettings(
|
|
|
|
num_instances=num_instances,
|
|
|
|
coordination_command=cc_args,
|
|
|
|
resource_files=mi_resource_files,
|
2019-07-03 22:40:54 +03:00
|
|
|
pre_execution_command=pre_execution_command,
|
|
|
|
mpi=None if mpi is None else MpiSettings(
|
|
|
|
runtime=mpi_runtime,
|
2019-07-18 04:57:06 +03:00
|
|
|
executable_path=mpi_executable_path,
|
2019-07-03 22:40:54 +03:00
|
|
|
options=mpi_options,
|
2019-07-18 04:57:06 +03:00
|
|
|
processes_per_node=mpi_ppn
|
2019-07-03 22:40:54 +03:00
|
|
|
),
|
2016-11-13 09:13:55 +03:00
|
|
|
),
|
2018-02-27 02:06:55 +03:00
|
|
|
default_exit_options=TaskExitOptions(
|
|
|
|
job_action=job_action,
|
|
|
|
dependency_action=dependency_action,
|
|
|
|
),
|
2016-11-13 09:13:55 +03:00
|
|
|
)
|
2017-03-03 21:28:10 +03:00
|
|
|
|
|
|
|
|
|
|
|
# REMOTEFS SETTINGS
|
2017-03-09 07:18:58 +03:00
|
|
|
def virtual_network_settings(
|
2017-03-11 02:10:31 +03:00
|
|
|
config, default_resource_group=None, default_existing_ok=False,
|
|
|
|
default_create_nonexistant=True):
|
2017-03-09 07:18:58 +03:00
|
|
|
# type: (dict) -> VirtualNetworkSettings
|
|
|
|
"""Get virtual network settings
|
|
|
|
:param dict config: configuration dict
|
2017-03-11 02:10:31 +03:00
|
|
|
:param str default_resource_group: default resource group
|
2017-03-09 07:18:58 +03:00
|
|
|
:param bool default_existing_ok: default existing ok
|
|
|
|
:param bool default_create_nonexistant: default create nonexistant
|
|
|
|
:rtype: VirtualNetworkSettings
|
|
|
|
:return: virtual network settings
|
|
|
|
"""
|
2017-03-11 02:10:31 +03:00
|
|
|
conf = _kv_read_checked(config, 'virtual_network', {})
|
2017-09-29 04:39:15 +03:00
|
|
|
arm_subnet_id = _kv_read_checked(conf, 'arm_subnet_id')
|
2017-03-09 07:18:58 +03:00
|
|
|
name = _kv_read_checked(conf, 'name')
|
2017-09-29 04:39:15 +03:00
|
|
|
if util.is_not_empty(arm_subnet_id) and util.is_not_empty(name):
|
|
|
|
raise ValueError(
|
|
|
|
'cannot specify both arm_subnet_id and virtual_network.name')
|
2017-03-11 02:10:31 +03:00
|
|
|
resource_group = _kv_read_checked(
|
|
|
|
conf, 'resource_group', default_resource_group)
|
2017-03-09 07:18:58 +03:00
|
|
|
address_space = _kv_read_checked(conf, 'address_space')
|
|
|
|
existing_ok = _kv_read(conf, 'existing_ok', default_existing_ok)
|
|
|
|
create_nonexistant = _kv_read(
|
|
|
|
conf, 'create_nonexistant', default_create_nonexistant)
|
2017-03-11 02:10:31 +03:00
|
|
|
sub_conf = _kv_read_checked(conf, 'subnet', {})
|
|
|
|
subnet_name = _kv_read_checked(sub_conf, 'name')
|
2017-09-29 04:39:15 +03:00
|
|
|
if util.is_not_empty(name) and util.is_none_or_empty(subnet_name):
|
|
|
|
raise ValueError(
|
|
|
|
'subnet name not specified on virtual_network: {}'.format(name))
|
2017-03-11 02:10:31 +03:00
|
|
|
subnet_address_prefix = _kv_read_checked(sub_conf, 'address_prefix')
|
2017-03-09 07:18:58 +03:00
|
|
|
return VirtualNetworkSettings(
|
2017-09-29 04:39:15 +03:00
|
|
|
arm_subnet_id=arm_subnet_id,
|
2017-03-09 07:18:58 +03:00
|
|
|
name=name,
|
2017-03-10 01:38:16 +03:00
|
|
|
resource_group=resource_group,
|
2017-03-09 07:18:58 +03:00
|
|
|
address_space=address_space,
|
|
|
|
subnet_name=subnet_name,
|
|
|
|
subnet_address_prefix=subnet_address_prefix,
|
|
|
|
existing_ok=existing_ok,
|
|
|
|
create_nonexistant=create_nonexistant,
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2017-03-11 02:10:31 +03:00
|
|
|
def fileserver_settings(config, vm_count):
|
|
|
|
conf = _kv_read_checked(config, 'file_server', {})
|
|
|
|
sc_fs_type = _kv_read_checked(conf, 'type')
|
|
|
|
if util.is_none_or_empty(sc_fs_type):
|
2017-03-17 01:12:22 +03:00
|
|
|
raise ValueError('file_server:type must be specified')
|
2017-03-12 22:55:13 +03:00
|
|
|
sc_fs_type = sc_fs_type.lower()
|
2017-03-11 02:10:31 +03:00
|
|
|
# cross check against number of vms
|
|
|
|
if ((sc_fs_type == 'nfs' and vm_count != 1) or
|
|
|
|
(sc_fs_type == 'glusterfs' and vm_count <= 1)):
|
|
|
|
raise ValueError(
|
|
|
|
('invalid combination of file_server:type {} and '
|
|
|
|
'vm_count {}').format(sc_fs_type, vm_count))
|
|
|
|
sc_fs_mountpoint = _kv_read_checked(conf, 'mountpoint')
|
|
|
|
if util.is_none_or_empty(sc_fs_mountpoint):
|
2017-03-17 01:12:22 +03:00
|
|
|
raise ValueError('file_server must be specified')
|
2017-03-12 02:23:55 +03:00
|
|
|
sc_mo = _kv_read_checked(conf, 'mount_options')
|
2017-03-11 02:10:31 +03:00
|
|
|
# get server options
|
|
|
|
so_conf = _kv_read_checked(conf, 'server_options', {})
|
2017-03-30 23:18:47 +03:00
|
|
|
# get samba options
|
|
|
|
sc_samba = _kv_read_checked(conf, 'samba', {})
|
|
|
|
smb_share_name = _kv_read_checked(sc_samba, 'share_name')
|
|
|
|
sc_samba_account = _kv_read_checked(sc_samba, 'account', {})
|
|
|
|
smb_account = SambaAccountSettings(
|
|
|
|
username=_kv_read_checked(sc_samba_account, 'username', 'nobody'),
|
|
|
|
password=_kv_read_checked(sc_samba_account, 'password'),
|
|
|
|
uid=_kv_read(sc_samba_account, 'uid'),
|
|
|
|
gid=_kv_read(sc_samba_account, 'gid'),
|
|
|
|
)
|
|
|
|
if smb_account.username != 'nobody':
|
|
|
|
if util.is_none_or_empty(smb_account.password):
|
|
|
|
raise ValueError(
|
|
|
|
'samba account password is invalid for username {}'.format(
|
|
|
|
smb_account.username))
|
2017-04-15 08:39:27 +03:00
|
|
|
if '\n' in smb_account.password:
|
2017-04-15 00:56:35 +03:00
|
|
|
raise ValueError(
|
2017-04-15 08:39:27 +03:00
|
|
|
'samba account password contains invalid characters')
|
2017-03-30 23:18:47 +03:00
|
|
|
if smb_account.uid is None or smb_account.gid is None:
|
|
|
|
raise ValueError(
|
|
|
|
('samba account uid and/or gid is invalid for '
|
|
|
|
'username {}').format(smb_account.username))
|
|
|
|
smb_ro = _kv_read(sc_samba, 'read_only', False)
|
|
|
|
if smb_ro:
|
|
|
|
smb_ro = 'yes'
|
|
|
|
else:
|
|
|
|
smb_ro = 'no'
|
|
|
|
smb_cm = _kv_read_checked(sc_samba, 'create_mask', '0700')
|
|
|
|
smb_dm = _kv_read_checked(sc_samba, 'directory_mask', '0700')
|
2017-03-11 02:10:31 +03:00
|
|
|
return FileServerSettings(
|
|
|
|
type=sc_fs_type,
|
|
|
|
mountpoint=sc_fs_mountpoint,
|
2017-03-12 02:23:55 +03:00
|
|
|
mount_options=sc_mo,
|
2017-03-11 02:10:31 +03:00
|
|
|
server_options=so_conf,
|
2017-03-30 23:18:47 +03:00
|
|
|
samba=SambaSettings(
|
|
|
|
share_name=smb_share_name,
|
|
|
|
account=smb_account,
|
|
|
|
read_only=smb_ro,
|
|
|
|
create_mask=smb_cm,
|
|
|
|
directory_mask=smb_dm,
|
|
|
|
),
|
2017-03-11 02:10:31 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
|
2017-03-17 01:12:22 +03:00
|
|
|
def remotefs_settings(config, sc_id=None):
|
|
|
|
# type: (dict, str) -> RemoteFsSettings
|
2017-03-03 21:28:10 +03:00
|
|
|
"""Get remote fs settings
|
|
|
|
:param dict config: configuration dict
|
2017-03-17 01:12:22 +03:00
|
|
|
:param str sc_id: storage cluster id
|
2017-03-03 21:28:10 +03:00
|
|
|
:rtype: RemoteFsSettings
|
|
|
|
:return: remote fs settings
|
|
|
|
"""
|
|
|
|
# general settings
|
2018-02-16 07:37:49 +03:00
|
|
|
try:
|
|
|
|
conf = config['remote_fs']
|
|
|
|
if util.is_none_or_empty(conf):
|
|
|
|
raise KeyError
|
|
|
|
except KeyError:
|
2018-02-17 00:56:46 +03:00
|
|
|
raise ValueError(
|
|
|
|
'remote_fs settings are invalid or missing. Did you specify an '
|
|
|
|
'fs configuration file?')
|
2017-03-11 02:10:31 +03:00
|
|
|
resource_group = _kv_read_checked(conf, 'resource_group')
|
2017-03-03 21:28:10 +03:00
|
|
|
location = conf['location']
|
|
|
|
if util.is_none_or_empty(location):
|
|
|
|
raise ValueError('invalid location in remote_fs')
|
2018-11-06 01:42:39 +03:00
|
|
|
zone = _kv_read(conf, 'zone')
|
2017-03-03 21:28:10 +03:00
|
|
|
# managed disk settings
|
2017-03-09 07:18:58 +03:00
|
|
|
md_conf = conf['managed_disks']
|
2017-03-11 02:10:31 +03:00
|
|
|
md_rg = _kv_read_checked(md_conf, 'resource_group', resource_group)
|
|
|
|
if util.is_none_or_empty(md_rg):
|
|
|
|
raise ValueError('invalid managed_disks:resource_group in remote_fs')
|
2018-11-06 01:42:39 +03:00
|
|
|
md_sku = _kv_read(md_conf, 'sku')
|
|
|
|
if util.is_none_or_empty(md_sku):
|
|
|
|
raise ValueError('invalid managed_disks:sku in remote_fs')
|
2017-03-09 07:18:58 +03:00
|
|
|
md_disk_size_gb = _kv_read(md_conf, 'disk_size_gb')
|
2018-11-06 01:42:39 +03:00
|
|
|
md_disk_pp = _kv_read(md_conf, 'disk_provisioned_performance', default={})
|
|
|
|
md_disk_pp_iops = _kv_read(md_disk_pp, 'iops_read_write')
|
|
|
|
md_disk_pp_mbps = _kv_read(md_disk_pp, 'mbps_read_write')
|
2017-03-12 22:55:13 +03:00
|
|
|
md_disk_names = _kv_read_checked(md_conf, 'disk_names')
|
2017-03-17 01:12:22 +03:00
|
|
|
md = ManagedDisksSettings(
|
2018-05-01 01:06:52 +03:00
|
|
|
location=location,
|
2017-03-17 01:12:22 +03:00
|
|
|
resource_group=md_rg,
|
2018-11-06 01:42:39 +03:00
|
|
|
zone=zone,
|
|
|
|
sku=md_sku,
|
2017-03-17 01:12:22 +03:00
|
|
|
disk_size_gb=md_disk_size_gb,
|
2018-11-06 01:42:39 +03:00
|
|
|
disk_provisioned_perf_iops_rw=md_disk_pp_iops,
|
|
|
|
disk_provisioned_perf_mbps_rw=md_disk_pp_mbps,
|
2017-03-17 01:12:22 +03:00
|
|
|
disk_names=md_disk_names,
|
|
|
|
)
|
2017-03-03 21:28:10 +03:00
|
|
|
if util.is_none_or_empty(sc_id):
|
2017-03-17 01:12:22 +03:00
|
|
|
return RemoteFsSettings(
|
|
|
|
managed_disks=md,
|
|
|
|
storage_cluster=None,
|
|
|
|
)
|
|
|
|
# storage cluster settings
|
2017-03-17 05:20:35 +03:00
|
|
|
try:
|
|
|
|
sc_conf = conf['storage_clusters'][sc_id]
|
|
|
|
except KeyError:
|
|
|
|
raise ValueError(
|
|
|
|
('Storage cluster {} is not defined in the given fs '
|
|
|
|
'configuration file').format(sc_id))
|
2017-03-11 02:10:31 +03:00
|
|
|
sc_rg = _kv_read_checked(sc_conf, 'resource_group', resource_group)
|
2018-05-01 01:06:52 +03:00
|
|
|
if util.is_none_or_empty(sc_rg):
|
2017-03-17 01:12:22 +03:00
|
|
|
raise ValueError('invalid resource_group in remote_fs')
|
2017-03-09 07:18:58 +03:00
|
|
|
sc_vm_count = _kv_read(sc_conf, 'vm_count', 1)
|
|
|
|
sc_vm_size = _kv_read_checked(sc_conf, 'vm_size')
|
2017-04-13 23:13:06 +03:00
|
|
|
sc_fault_domains = _kv_read(sc_conf, 'fault_domains', 2)
|
|
|
|
if sc_fault_domains < 2 or sc_fault_domains > 3:
|
|
|
|
raise ValueError('fault_domains must be in range [2, 3]: {}'.format(
|
|
|
|
sc_fault_domains))
|
2017-03-09 07:18:58 +03:00
|
|
|
sc_hostname_prefix = _kv_read_checked(sc_conf, 'hostname_prefix')
|
2018-01-26 00:37:29 +03:00
|
|
|
sc_accel_net = _kv_read(sc_conf, 'accelerated_networking', False)
|
2017-03-29 05:58:55 +03:00
|
|
|
# public ip settings
|
|
|
|
pip_conf = _kv_read_checked(sc_conf, 'public_ip', {})
|
|
|
|
sc_pip_enabled = _kv_read(pip_conf, 'enabled', True)
|
|
|
|
sc_pip_static = _kv_read(pip_conf, 'static', False)
|
2017-03-08 09:27:53 +03:00
|
|
|
# sc network security settings
|
2017-03-09 07:18:58 +03:00
|
|
|
ns_conf = sc_conf['network_security']
|
2017-03-08 09:27:53 +03:00
|
|
|
sc_ns_inbound = {
|
|
|
|
'ssh': InboundNetworkSecurityRule(
|
|
|
|
destination_port_range='22',
|
2017-03-09 07:18:58 +03:00
|
|
|
source_address_prefix=_kv_read_checked(ns_conf, 'ssh', ['*']),
|
2017-03-08 09:27:53 +03:00
|
|
|
protocol='tcp',
|
|
|
|
),
|
|
|
|
}
|
|
|
|
if not isinstance(sc_ns_inbound['ssh'].source_address_prefix, list):
|
|
|
|
raise ValueError('expected list for ssh network security rule')
|
2017-03-09 07:18:58 +03:00
|
|
|
if 'nfs' in ns_conf:
|
2017-03-08 09:27:53 +03:00
|
|
|
sc_ns_inbound['nfs'] = InboundNetworkSecurityRule(
|
|
|
|
destination_port_range='2049',
|
2017-03-15 08:07:51 +03:00
|
|
|
source_address_prefix=_kv_read_checked(ns_conf, 'nfs'),
|
2017-03-08 09:27:53 +03:00
|
|
|
protocol='tcp',
|
|
|
|
)
|
|
|
|
if not isinstance(sc_ns_inbound['nfs'].source_address_prefix, list):
|
|
|
|
raise ValueError('expected list for nfs network security rule')
|
2017-03-15 08:07:51 +03:00
|
|
|
if 'glusterfs' in ns_conf:
|
|
|
|
# glusterd and management ports
|
|
|
|
sc_ns_inbound['glusterfs-management'] = InboundNetworkSecurityRule(
|
|
|
|
destination_port_range='24007-24008',
|
|
|
|
source_address_prefix=_kv_read_checked(ns_conf, 'glusterfs'),
|
|
|
|
protocol='tcp',
|
|
|
|
)
|
|
|
|
# gluster brick ports: only 1 port per vm is needed as there will
|
|
|
|
# only be 1 brick per vm (brick is spread across RAID)
|
|
|
|
sc_ns_inbound['glusterfs-bricks'] = InboundNetworkSecurityRule(
|
|
|
|
destination_port_range='49152',
|
|
|
|
source_address_prefix=_kv_read_checked(ns_conf, 'glusterfs'),
|
|
|
|
protocol='tcp',
|
|
|
|
)
|
|
|
|
# only need to check one for glusterfs
|
|
|
|
if not isinstance(
|
|
|
|
sc_ns_inbound['glusterfs-management'].source_address_prefix,
|
|
|
|
list):
|
|
|
|
raise ValueError(
|
|
|
|
'expected list for glusterfs network security rule')
|
2017-03-31 05:48:17 +03:00
|
|
|
if 'smb' in ns_conf:
|
|
|
|
sc_ns_inbound['smb'] = InboundNetworkSecurityRule(
|
|
|
|
destination_port_range='445',
|
|
|
|
source_address_prefix=_kv_read_checked(ns_conf, 'smb'),
|
|
|
|
protocol='tcp',
|
|
|
|
)
|
|
|
|
if not isinstance(sc_ns_inbound['smb'].source_address_prefix, list):
|
|
|
|
raise ValueError('expected list for smb network security rule')
|
2017-03-10 01:38:16 +03:00
|
|
|
if 'custom_inbound_rules' in ns_conf:
|
2017-03-15 08:07:51 +03:00
|
|
|
# reserve keywords (current and expected possible future support)
|
2017-03-31 05:48:17 +03:00
|
|
|
_reserved = frozenset([
|
2017-04-13 19:31:35 +03:00
|
|
|
'ssh', 'nfs', 'glusterfs', 'smb', 'cifs', 'samba', 'zfs',
|
|
|
|
'beegfs', 'cephfs',
|
2017-03-31 05:48:17 +03:00
|
|
|
])
|
2017-03-10 01:38:16 +03:00
|
|
|
for key in ns_conf['custom_inbound_rules']:
|
2017-03-08 09:27:53 +03:00
|
|
|
# ensure key is not reserved
|
|
|
|
if key.lower() in _reserved:
|
|
|
|
raise ValueError(
|
|
|
|
('custom inbound rule of name {} conflicts with a '
|
|
|
|
'reserved name {}').format(key, _reserved))
|
|
|
|
sc_ns_inbound[key] = InboundNetworkSecurityRule(
|
|
|
|
destination_port_range=_kv_read_checked(
|
2017-03-10 01:38:16 +03:00
|
|
|
ns_conf['custom_inbound_rules'][key],
|
|
|
|
'destination_port_range'),
|
2017-03-08 09:27:53 +03:00
|
|
|
source_address_prefix=_kv_read_checked(
|
2017-03-10 01:38:16 +03:00
|
|
|
ns_conf['custom_inbound_rules'][key],
|
|
|
|
'source_address_prefix'),
|
2017-03-08 09:27:53 +03:00
|
|
|
protocol=_kv_read_checked(
|
2017-03-10 01:38:16 +03:00
|
|
|
ns_conf['custom_inbound_rules'][key], 'protocol'),
|
2017-03-08 09:27:53 +03:00
|
|
|
)
|
|
|
|
if not isinstance(sc_ns_inbound[key].source_address_prefix, list):
|
|
|
|
raise ValueError(
|
|
|
|
'expected list for network security rule {} '
|
|
|
|
'source_address_prefix'.format(key))
|
2017-03-07 01:56:13 +03:00
|
|
|
# sc file server settings
|
2017-03-11 02:10:31 +03:00
|
|
|
file_server = fileserver_settings(sc_conf, sc_vm_count)
|
2017-03-07 01:56:13 +03:00
|
|
|
# sc ssh settings
|
2017-03-09 07:18:58 +03:00
|
|
|
ssh_conf = sc_conf['ssh']
|
|
|
|
sc_ssh_username = _kv_read_checked(ssh_conf, 'username')
|
|
|
|
sc_ssh_public_key = _kv_read_checked(ssh_conf, 'ssh_public_key')
|
2017-04-13 23:13:06 +03:00
|
|
|
if util.is_not_empty(sc_ssh_public_key):
|
|
|
|
sc_ssh_public_key = pathlib.Path(sc_ssh_public_key)
|
2017-04-13 19:31:35 +03:00
|
|
|
sc_ssh_public_key_data = _kv_read_checked(ssh_conf, 'ssh_public_key_data')
|
|
|
|
sc_ssh_private_key = _kv_read_checked(ssh_conf, 'ssh_private_key')
|
2017-04-13 23:13:06 +03:00
|
|
|
if util.is_not_empty(sc_ssh_private_key):
|
|
|
|
sc_ssh_private_key = pathlib.Path(sc_ssh_private_key)
|
|
|
|
if (sc_ssh_public_key is not None and
|
2017-04-13 19:31:35 +03:00
|
|
|
util.is_not_empty(sc_ssh_public_key_data)):
|
|
|
|
raise ValueError('cannot specify both an SSH public key file and data')
|
2017-04-13 23:13:06 +03:00
|
|
|
if (sc_ssh_public_key is None and
|
2017-04-13 19:31:35 +03:00
|
|
|
util.is_none_or_empty(sc_ssh_public_key_data) and
|
2017-04-13 23:13:06 +03:00
|
|
|
sc_ssh_private_key is not None):
|
2017-04-13 19:31:35 +03:00
|
|
|
raise ValueError(
|
|
|
|
'cannot specify an SSH private key with no public key specified')
|
2017-03-03 21:28:10 +03:00
|
|
|
sc_ssh_gen_file_path = _kv_read_checked(
|
2017-03-09 07:18:58 +03:00
|
|
|
ssh_conf, 'generated_file_export_path', '.')
|
2017-04-13 19:31:35 +03:00
|
|
|
# ensure ssh username and samba username are not the same
|
|
|
|
if file_server.samba.account.username == sc_ssh_username:
|
|
|
|
raise ValueError(
|
|
|
|
'SSH username and samba account username cannot be the same')
|
2017-03-07 01:56:13 +03:00
|
|
|
# sc vm disk map settings
|
2017-03-09 07:18:58 +03:00
|
|
|
vmd_conf = sc_conf['vm_disk_map']
|
2017-03-12 22:55:13 +03:00
|
|
|
_disk_set = frozenset(md_disk_names)
|
2017-03-03 21:28:10 +03:00
|
|
|
disk_map = {}
|
2017-03-09 07:18:58 +03:00
|
|
|
for vmkey in vmd_conf:
|
2017-03-03 21:28:10 +03:00
|
|
|
# ensure all disks in disk array are specified in managed disks
|
2017-03-09 07:18:58 +03:00
|
|
|
disk_array = vmd_conf[vmkey]['disk_array']
|
2017-03-03 21:28:10 +03:00
|
|
|
if not _disk_set.issuperset(set(disk_array)):
|
|
|
|
raise ValueError(
|
|
|
|
('All disks {} for vm {} are not specified in '
|
2017-03-12 22:55:13 +03:00
|
|
|
'managed_disks:disk_names ({})').format(
|
2017-03-03 21:28:10 +03:00
|
|
|
disk_array, vmkey, _disk_set))
|
2018-06-27 18:49:32 +03:00
|
|
|
raid_level = _kv_read(vmd_conf[vmkey], 'raid_level', default=-1)
|
2019-10-10 21:03:59 +03:00
|
|
|
if len(disk_array) == 1:
|
|
|
|
if raid_level != -1:
|
|
|
|
raise ValueError(
|
|
|
|
'Cannot specify a RAID-level with 1 disk in array')
|
2017-03-03 21:28:10 +03:00
|
|
|
else:
|
2017-03-08 06:51:01 +03:00
|
|
|
if raid_level == 0 and len(disk_array) < 2:
|
2017-03-06 11:35:24 +03:00
|
|
|
raise ValueError('RAID-0 arrays require at least two disks')
|
2017-03-08 06:51:01 +03:00
|
|
|
if raid_level != 0:
|
|
|
|
raise ValueError('Unsupported RAID level {}'.format(
|
|
|
|
raid_level))
|
2017-03-03 21:28:10 +03:00
|
|
|
disk_map[int(vmkey)] = MappedVmDiskSettings(
|
|
|
|
disk_array=disk_array,
|
2017-03-09 07:18:58 +03:00
|
|
|
filesystem=vmd_conf[vmkey]['filesystem'],
|
2017-03-08 06:51:01 +03:00
|
|
|
raid_level=raid_level,
|
2017-03-03 21:28:10 +03:00
|
|
|
)
|
2017-03-07 04:53:57 +03:00
|
|
|
# check disk map against vm_count
|
|
|
|
if len(disk_map) != sc_vm_count:
|
|
|
|
raise ValueError(
|
2017-03-17 01:12:22 +03:00
|
|
|
('Number of entries in vm_disk_map {} inconsistent with '
|
|
|
|
'vm_count {}').format(len(disk_map), sc_vm_count))
|
2017-03-03 21:28:10 +03:00
|
|
|
return RemoteFsSettings(
|
2018-11-06 01:42:39 +03:00
|
|
|
managed_disks=md,
|
2017-03-03 21:28:10 +03:00
|
|
|
storage_cluster=StorageClusterSettings(
|
|
|
|
id=sc_id,
|
2018-05-01 01:06:52 +03:00
|
|
|
location=location,
|
2017-03-11 02:10:31 +03:00
|
|
|
resource_group=sc_rg,
|
2018-11-06 01:42:39 +03:00
|
|
|
zone=zone,
|
2017-03-09 07:18:58 +03:00
|
|
|
virtual_network=virtual_network_settings(
|
|
|
|
sc_conf,
|
2017-03-11 09:54:16 +03:00
|
|
|
default_resource_group=sc_rg,
|
2017-03-09 07:18:58 +03:00
|
|
|
default_existing_ok=False,
|
|
|
|
default_create_nonexistant=True,
|
2017-03-05 11:52:37 +03:00
|
|
|
),
|
|
|
|
network_security=NetworkSecuritySettings(
|
2017-03-08 09:27:53 +03:00
|
|
|
inbound=sc_ns_inbound,
|
2017-03-03 21:28:10 +03:00
|
|
|
),
|
2017-03-11 02:10:31 +03:00
|
|
|
file_server=file_server,
|
2017-03-03 21:28:10 +03:00
|
|
|
vm_count=sc_vm_count,
|
|
|
|
vm_size=sc_vm_size,
|
2017-04-13 23:13:06 +03:00
|
|
|
fault_domains=sc_fault_domains,
|
2018-01-26 00:37:29 +03:00
|
|
|
accelerated_networking=sc_accel_net,
|
2017-03-29 05:58:55 +03:00
|
|
|
public_ip=PublicIpSettings(
|
|
|
|
enabled=sc_pip_enabled,
|
|
|
|
static=sc_pip_static,
|
|
|
|
),
|
2017-03-03 21:28:10 +03:00
|
|
|
hostname_prefix=sc_hostname_prefix,
|
|
|
|
ssh=SSHSettings(
|
|
|
|
username=sc_ssh_username,
|
|
|
|
expiry_days=9999,
|
|
|
|
ssh_public_key=sc_ssh_public_key,
|
2017-04-13 19:31:35 +03:00
|
|
|
ssh_public_key_data=sc_ssh_public_key_data,
|
|
|
|
ssh_private_key=sc_ssh_private_key,
|
2017-03-03 21:28:10 +03:00
|
|
|
generate_docker_tunnel_script=False,
|
|
|
|
generated_file_export_path=sc_ssh_gen_file_path,
|
|
|
|
hpn_server_swap=False,
|
2018-06-12 00:27:26 +03:00
|
|
|
allow_docker_access=False,
|
2017-03-03 21:28:10 +03:00
|
|
|
),
|
|
|
|
vm_disk_map=disk_map,
|
2018-06-05 22:00:22 +03:00
|
|
|
prometheus=prometheus_settings(sc_conf),
|
2017-03-03 21:28:10 +03:00
|
|
|
),
|
|
|
|
)
|
2017-03-12 21:14:11 +03:00
|
|
|
|
|
|
|
|
2018-05-30 01:02:32 +03:00
|
|
|
def monitoring_prometheus_settings(config):
|
|
|
|
# type: (dict) -> PrometheusMonitoringSettings
|
|
|
|
"""Get prometheus monitoring settings
|
|
|
|
:param dict config: configuration dict
|
|
|
|
:rtype: PrometheusMonitoringSettings
|
|
|
|
:return: Prometheus monitoring settings
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
conf = config['monitoring']['services']['prometheus']
|
|
|
|
except KeyError:
|
|
|
|
conf = {}
|
|
|
|
port = None
|
|
|
|
else:
|
2018-06-08 23:17:40 +03:00
|
|
|
port = str(_kv_read(conf, 'port'))
|
2018-05-30 01:02:32 +03:00
|
|
|
return PrometheusMonitoringSettings(
|
|
|
|
port=port,
|
|
|
|
scrape_interval=_kv_read_checked(
|
|
|
|
conf, 'scrape_interval', default='10s'),
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2018-06-08 23:17:40 +03:00
|
|
|
def credentials_grafana_admin_password_secret_id(config):
|
|
|
|
# type: (dict) -> str
|
|
|
|
"""Get Grafana admin password KeyVault Secret Id
|
|
|
|
:param dict config: configuration object
|
|
|
|
:rtype: str
|
|
|
|
:return: keyvault secret id
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
secid = config[
|
|
|
|
'credentials']['monitoring']['grafana']['admin'][
|
|
|
|
'password_keyvault_secret_id']
|
|
|
|
if util.is_none_or_empty(secid):
|
|
|
|
raise KeyError()
|
|
|
|
except KeyError:
|
|
|
|
return None
|
|
|
|
return secid
|
|
|
|
|
|
|
|
|
|
|
|
def set_credentials_grafana_admin_password(config, pw):
|
|
|
|
# type: (dict, str) -> None
|
|
|
|
"""Set Grafana admin password
|
|
|
|
:param dict config: configuration object
|
|
|
|
:param str pw: password
|
|
|
|
"""
|
|
|
|
config['credentials']['monitoring']['grafana']['admin']['password'] = pw
|
|
|
|
|
|
|
|
|
2018-05-30 01:02:32 +03:00
|
|
|
def monitoring_grafana_settings(config):
|
|
|
|
# type: (dict) -> GrafanaMonitoringSettings
|
|
|
|
"""Get grafana monitoring settings
|
|
|
|
:param dict config: configuration dict
|
|
|
|
:rtype: GrafanaMonitoringSettings
|
|
|
|
:return: Grafana monitoring settings
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
conf = config['monitoring']['services']['grafana']
|
|
|
|
except KeyError:
|
|
|
|
conf = {}
|
2018-06-08 23:17:40 +03:00
|
|
|
try:
|
|
|
|
gaconf = config['credentials']['monitoring']['grafana']
|
|
|
|
except KeyError:
|
|
|
|
gaconf = {}
|
|
|
|
admin = _kv_read_checked(gaconf, 'admin', default={})
|
|
|
|
admin_user = _kv_read_checked(admin, 'username')
|
|
|
|
if util.is_none_or_empty(admin_user):
|
|
|
|
raise ValueError('Grafana admin user is invalid')
|
|
|
|
admin_password = _kv_read_checked(admin, 'password')
|
|
|
|
if util.is_none_or_empty(admin_password):
|
|
|
|
raise ValueError('Grafana admin password is invalid')
|
2018-05-30 01:02:32 +03:00
|
|
|
return GrafanaMonitoringSettings(
|
2018-06-08 23:17:40 +03:00
|
|
|
admin_user=admin_user,
|
|
|
|
admin_password=admin_password,
|
2018-06-05 22:00:22 +03:00
|
|
|
additional_dashboards=_kv_read_checked(conf, 'additional_dashboards'),
|
2018-05-30 01:02:32 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def monitoring_services_settings(config):
|
|
|
|
# type: (dict) -> MonitoringServicesSettings
|
|
|
|
"""Get services monitoring settings
|
|
|
|
:param dict config: configuration dict
|
|
|
|
:rtype: MonitoringServicesSettings
|
|
|
|
:return: Services monitoring settings
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
conf = config['monitoring']['services']
|
|
|
|
except KeyError:
|
|
|
|
conf = {}
|
|
|
|
le = _kv_read_checked(conf, 'lets_encrypt', default={})
|
|
|
|
return MonitoringServicesSettings(
|
|
|
|
resource_polling_interval=str(_kv_read(
|
|
|
|
conf, 'resource_polling_interval', 15)),
|
|
|
|
lets_encrypt_enabled=_kv_read(le, 'enabled', default=True),
|
|
|
|
lets_encrypt_staging=_kv_read(
|
2018-06-05 22:00:22 +03:00
|
|
|
le, 'use_staging_environment', default=True),
|
2018-05-30 01:02:32 +03:00
|
|
|
prometheus=monitoring_prometheus_settings(config),
|
|
|
|
grafana=monitoring_grafana_settings(config),
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2018-05-01 01:06:52 +03:00
|
|
|
def monitoring_settings(config):
|
|
|
|
# type: (dict) -> VmResource
|
|
|
|
"""Get monitoring settings
|
|
|
|
:param dict config: configuration dict
|
|
|
|
:rtype: VmResource
|
|
|
|
:return: VM resource settings
|
|
|
|
"""
|
|
|
|
# general settings
|
|
|
|
try:
|
2018-05-30 01:02:32 +03:00
|
|
|
conf = config['monitoring']
|
2018-05-01 01:06:52 +03:00
|
|
|
if util.is_none_or_empty(conf):
|
|
|
|
raise KeyError
|
|
|
|
except KeyError:
|
2018-06-25 17:49:45 +03:00
|
|
|
raise ValueError('monitoring settings are invalid')
|
2018-05-01 01:06:52 +03:00
|
|
|
location = conf['location']
|
|
|
|
if util.is_none_or_empty(location):
|
2018-06-25 17:49:45 +03:00
|
|
|
raise ValueError('invalid location in monitoring')
|
2018-05-01 01:06:52 +03:00
|
|
|
# monitoring vm settings
|
|
|
|
rg = _kv_read_checked(conf, 'resource_group')
|
|
|
|
if util.is_none_or_empty(rg):
|
2018-06-25 17:49:45 +03:00
|
|
|
raise ValueError('invalid resource_group in monitoring')
|
2018-05-01 01:06:52 +03:00
|
|
|
vm_size = _kv_read_checked(conf, 'vm_size')
|
|
|
|
hostname_prefix = _kv_read_checked(conf, 'hostname_prefix')
|
|
|
|
accel_net = _kv_read(conf, 'accelerated_networking', False)
|
|
|
|
# public ip settings
|
|
|
|
pip_conf = _kv_read_checked(conf, 'public_ip', {})
|
|
|
|
pip_enabled = _kv_read(pip_conf, 'enabled', True)
|
|
|
|
pip_static = _kv_read(pip_conf, 'static', False)
|
|
|
|
# sc network security settings
|
|
|
|
ns_conf = conf['network_security']
|
|
|
|
ns_inbound = {
|
|
|
|
'ssh': InboundNetworkSecurityRule(
|
|
|
|
destination_port_range='22',
|
|
|
|
source_address_prefix=_kv_read_checked(ns_conf, 'ssh', ['*']),
|
|
|
|
protocol='tcp',
|
|
|
|
),
|
|
|
|
}
|
|
|
|
if not isinstance(ns_inbound['ssh'].source_address_prefix, list):
|
|
|
|
raise ValueError('expected list for ssh network security rule')
|
2018-05-30 01:02:32 +03:00
|
|
|
if 'grafana' in ns_conf:
|
|
|
|
ns_inbound['grafana'] = InboundNetworkSecurityRule(
|
|
|
|
# grafana is reverse proxied through nginx on the HTTPS port
|
|
|
|
destination_port_range='443' if pip_enabled else '3000',
|
|
|
|
source_address_prefix=_kv_read_checked(ns_conf, 'grafana'),
|
|
|
|
protocol='tcp',
|
|
|
|
)
|
|
|
|
if not isinstance(ns_inbound['grafana'].source_address_prefix, list):
|
|
|
|
raise ValueError('expected list for grafana network security rule')
|
2018-06-05 22:00:22 +03:00
|
|
|
promconf = monitoring_prometheus_settings(config)
|
|
|
|
if promconf.port is not None:
|
|
|
|
if 'prometheus' not in ns_conf:
|
|
|
|
raise ValueError(
|
|
|
|
'prometheus port specified, but no network security '
|
|
|
|
'rule exists')
|
|
|
|
else:
|
2018-05-30 01:02:32 +03:00
|
|
|
ns_inbound['prometheus'] = InboundNetworkSecurityRule(
|
|
|
|
destination_port_range=promconf.port,
|
|
|
|
source_address_prefix=_kv_read_checked(ns_conf, 'prometheus'),
|
|
|
|
protocol='tcp',
|
|
|
|
)
|
|
|
|
if not isinstance(
|
|
|
|
ns_inbound['prometheus'].source_address_prefix, list):
|
|
|
|
raise ValueError(
|
|
|
|
'expected list for prometheus network security rule')
|
2018-05-01 01:06:52 +03:00
|
|
|
if 'custom_inbound_rules' in ns_conf:
|
|
|
|
for key in ns_conf['custom_inbound_rules']:
|
2018-06-25 17:49:45 +03:00
|
|
|
ns_inbound[key] = InboundNetworkSecurityRule(
|
|
|
|
destination_port_range=_kv_read_checked(
|
|
|
|
ns_conf['custom_inbound_rules'][key],
|
|
|
|
'destination_port_range'),
|
|
|
|
source_address_prefix=_kv_read_checked(
|
|
|
|
ns_conf['custom_inbound_rules'][key],
|
|
|
|
'source_address_prefix'),
|
|
|
|
protocol=_kv_read_checked(
|
|
|
|
ns_conf['custom_inbound_rules'][key], 'protocol'),
|
|
|
|
)
|
|
|
|
if not isinstance(ns_inbound[key].source_address_prefix, list):
|
2018-05-01 01:06:52 +03:00
|
|
|
raise ValueError(
|
2018-06-25 17:49:45 +03:00
|
|
|
'expected list for network security rule {} '
|
|
|
|
'source_address_prefix'.format(key))
|
|
|
|
# ssh settings
|
|
|
|
ssh_conf = conf['ssh']
|
|
|
|
ssh_username = _kv_read_checked(ssh_conf, 'username')
|
|
|
|
ssh_public_key = _kv_read_checked(ssh_conf, 'ssh_public_key')
|
|
|
|
if util.is_not_empty(ssh_public_key):
|
|
|
|
ssh_public_key = pathlib.Path(ssh_public_key)
|
|
|
|
ssh_public_key_data = _kv_read_checked(ssh_conf, 'ssh_public_key_data')
|
|
|
|
ssh_private_key = _kv_read_checked(ssh_conf, 'ssh_private_key')
|
|
|
|
if util.is_not_empty(ssh_private_key):
|
|
|
|
ssh_private_key = pathlib.Path(ssh_private_key)
|
|
|
|
if (ssh_public_key is not None and
|
|
|
|
util.is_not_empty(ssh_public_key_data)):
|
|
|
|
raise ValueError('cannot specify both an SSH public key file and data')
|
|
|
|
if (ssh_public_key is None and
|
|
|
|
util.is_none_or_empty(ssh_public_key_data) and
|
|
|
|
ssh_private_key is not None):
|
|
|
|
raise ValueError(
|
|
|
|
'cannot specify an SSH private key with no public key specified')
|
|
|
|
ssh_gen_file_path = _kv_read_checked(
|
|
|
|
ssh_conf, 'generated_file_export_path', '.')
|
|
|
|
return VmResource(
|
|
|
|
location=location,
|
|
|
|
resource_group=rg,
|
2018-11-06 01:42:39 +03:00
|
|
|
zone=_kv_read(conf, 'zone'),
|
2018-06-25 17:49:45 +03:00
|
|
|
hostname_prefix=hostname_prefix,
|
|
|
|
virtual_network=virtual_network_settings(
|
|
|
|
conf,
|
|
|
|
default_resource_group=rg,
|
|
|
|
default_existing_ok=False,
|
|
|
|
default_create_nonexistant=True,
|
|
|
|
),
|
|
|
|
network_security=NetworkSecuritySettings(
|
|
|
|
inbound=ns_inbound,
|
|
|
|
),
|
|
|
|
vm_size=vm_size,
|
|
|
|
accelerated_networking=accel_net,
|
|
|
|
public_ip=PublicIpSettings(
|
|
|
|
enabled=pip_enabled,
|
|
|
|
static=pip_static,
|
|
|
|
),
|
|
|
|
ssh=SSHSettings(
|
|
|
|
username=ssh_username,
|
|
|
|
expiry_days=9999,
|
|
|
|
ssh_public_key=ssh_public_key,
|
|
|
|
ssh_public_key_data=ssh_public_key_data,
|
|
|
|
ssh_private_key=ssh_private_key,
|
|
|
|
generate_docker_tunnel_script=False,
|
|
|
|
generated_file_export_path=ssh_gen_file_path,
|
|
|
|
hpn_server_swap=False,
|
|
|
|
allow_docker_access=False,
|
|
|
|
),
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def federation_proxy_options_settings(config):
|
|
|
|
# type: (dict) -> FederationProxyOptionsSettings
|
|
|
|
"""Get federation proxy options settings
|
|
|
|
:param dict config: configuration dict
|
|
|
|
:rtype: FederationProxyOptionsSettings
|
|
|
|
:return: federation proxy options settings
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
conf = config['federation']['proxy_options']
|
|
|
|
except KeyError:
|
|
|
|
conf = {}
|
|
|
|
pi_conf = _kv_read_checked(conf, 'polling_interval', default={})
|
|
|
|
fpi = _kv_read(pi_conf, 'federations', 30)
|
|
|
|
if fpi < 5:
|
|
|
|
raise ValueError(
|
|
|
|
'the polling_interval:federations value can not be less than 5')
|
|
|
|
api = _kv_read(pi_conf, 'actions', 5)
|
|
|
|
if api < 5:
|
|
|
|
raise ValueError(
|
|
|
|
'the polling_interval:actions value can not be less than 5')
|
|
|
|
log_conf = _kv_read_checked(conf, 'logging', default={})
|
|
|
|
sched_conf = _kv_read_checked(conf, 'scheduling', default={})
|
|
|
|
as_conf = _kv_read_checked(sched_conf, 'after_success', default={})
|
|
|
|
sasbi = _kv_read(as_conf, 'blackout_interval', 15)
|
|
|
|
if sasbi < 2:
|
|
|
|
raise ValueError(
|
|
|
|
'the scheduling:after_success:blackout_interval value can not '
|
|
|
|
'be less than 2')
|
|
|
|
return FederationProxyOptionsSettings(
|
|
|
|
federations_polling_interval=str(fpi),
|
|
|
|
actions_polling_interval=str(api),
|
|
|
|
log_persistence=_kv_read(log_conf, 'persistence', True),
|
|
|
|
log_level=_kv_read_checked(log_conf, 'level', 'debug'),
|
|
|
|
log_filename=_kv_read_checked(log_conf, 'filename', 'fedproxy.log'),
|
|
|
|
scheduling_after_success_blackout_interval=sasbi,
|
|
|
|
scheduling_after_success_evaluate_autoscale=_kv_read(
|
|
|
|
as_conf, 'evaluate_autoscale', True),
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def federation_settings(config):
|
|
|
|
# type: (dict) -> VmResource
|
|
|
|
"""Get federation settings
|
|
|
|
:param dict config: configuration dict
|
|
|
|
:rtype: VmResource
|
|
|
|
:return: VM resource settings
|
|
|
|
"""
|
|
|
|
# general settings
|
|
|
|
try:
|
|
|
|
conf = config['federation']
|
|
|
|
if util.is_none_or_empty(conf):
|
|
|
|
raise KeyError
|
|
|
|
except KeyError:
|
|
|
|
raise ValueError('federation settings are invalid or missing')
|
|
|
|
location = conf['location']
|
|
|
|
if util.is_none_or_empty(location):
|
|
|
|
raise ValueError('invalid location in federation')
|
|
|
|
# vm settings
|
|
|
|
rg = _kv_read_checked(conf, 'resource_group')
|
|
|
|
if util.is_none_or_empty(rg):
|
|
|
|
raise ValueError('invalid resource_group in federation')
|
|
|
|
vm_size = _kv_read_checked(conf, 'vm_size')
|
|
|
|
hostname_prefix = _kv_read_checked(conf, 'hostname_prefix')
|
|
|
|
accel_net = _kv_read(conf, 'accelerated_networking', False)
|
|
|
|
# public ip settings
|
|
|
|
pip_conf = _kv_read_checked(conf, 'public_ip', {})
|
|
|
|
pip_enabled = _kv_read(pip_conf, 'enabled', True)
|
|
|
|
pip_static = _kv_read(pip_conf, 'static', False)
|
|
|
|
# sc network security settings
|
|
|
|
ns_conf = conf['network_security']
|
|
|
|
ns_inbound = {
|
|
|
|
'ssh': InboundNetworkSecurityRule(
|
|
|
|
destination_port_range='22',
|
|
|
|
source_address_prefix=_kv_read_checked(ns_conf, 'ssh', ['*']),
|
|
|
|
protocol='tcp',
|
|
|
|
),
|
|
|
|
}
|
|
|
|
if not isinstance(ns_inbound['ssh'].source_address_prefix, list):
|
|
|
|
raise ValueError('expected list for ssh network security rule')
|
|
|
|
if 'custom_inbound_rules' in ns_conf:
|
|
|
|
for key in ns_conf['custom_inbound_rules']:
|
2018-05-01 01:06:52 +03:00
|
|
|
ns_inbound[key] = InboundNetworkSecurityRule(
|
|
|
|
destination_port_range=_kv_read_checked(
|
|
|
|
ns_conf['custom_inbound_rules'][key],
|
|
|
|
'destination_port_range'),
|
|
|
|
source_address_prefix=_kv_read_checked(
|
|
|
|
ns_conf['custom_inbound_rules'][key],
|
|
|
|
'source_address_prefix'),
|
|
|
|
protocol=_kv_read_checked(
|
|
|
|
ns_conf['custom_inbound_rules'][key], 'protocol'),
|
|
|
|
)
|
|
|
|
if not isinstance(ns_inbound[key].source_address_prefix, list):
|
|
|
|
raise ValueError(
|
|
|
|
'expected list for network security rule {} '
|
|
|
|
'source_address_prefix'.format(key))
|
|
|
|
# ssh settings
|
|
|
|
ssh_conf = conf['ssh']
|
|
|
|
ssh_username = _kv_read_checked(ssh_conf, 'username')
|
|
|
|
ssh_public_key = _kv_read_checked(ssh_conf, 'ssh_public_key')
|
|
|
|
if util.is_not_empty(ssh_public_key):
|
|
|
|
ssh_public_key = pathlib.Path(ssh_public_key)
|
|
|
|
ssh_public_key_data = _kv_read_checked(ssh_conf, 'ssh_public_key_data')
|
|
|
|
ssh_private_key = _kv_read_checked(ssh_conf, 'ssh_private_key')
|
|
|
|
if util.is_not_empty(ssh_private_key):
|
|
|
|
ssh_private_key = pathlib.Path(ssh_private_key)
|
|
|
|
if (ssh_public_key is not None and
|
|
|
|
util.is_not_empty(ssh_public_key_data)):
|
|
|
|
raise ValueError('cannot specify both an SSH public key file and data')
|
|
|
|
if (ssh_public_key is None and
|
|
|
|
util.is_none_or_empty(ssh_public_key_data) and
|
|
|
|
ssh_private_key is not None):
|
|
|
|
raise ValueError(
|
|
|
|
'cannot specify an SSH private key with no public key specified')
|
|
|
|
ssh_gen_file_path = _kv_read_checked(
|
|
|
|
ssh_conf, 'generated_file_export_path', '.')
|
|
|
|
return VmResource(
|
|
|
|
location=location,
|
|
|
|
resource_group=rg,
|
2018-11-06 01:42:39 +03:00
|
|
|
zone=_kv_read(conf, 'zone'),
|
2018-05-01 01:06:52 +03:00
|
|
|
hostname_prefix=hostname_prefix,
|
|
|
|
virtual_network=virtual_network_settings(
|
|
|
|
conf,
|
|
|
|
default_resource_group=rg,
|
|
|
|
default_existing_ok=False,
|
|
|
|
default_create_nonexistant=True,
|
|
|
|
),
|
|
|
|
network_security=NetworkSecuritySettings(
|
|
|
|
inbound=ns_inbound,
|
|
|
|
),
|
|
|
|
vm_size=vm_size,
|
|
|
|
accelerated_networking=accel_net,
|
|
|
|
public_ip=PublicIpSettings(
|
|
|
|
enabled=pip_enabled,
|
|
|
|
static=pip_static,
|
|
|
|
),
|
|
|
|
ssh=SSHSettings(
|
|
|
|
username=ssh_username,
|
|
|
|
expiry_days=9999,
|
|
|
|
ssh_public_key=ssh_public_key,
|
|
|
|
ssh_public_key_data=ssh_public_key_data,
|
|
|
|
ssh_private_key=ssh_private_key,
|
|
|
|
generate_docker_tunnel_script=False,
|
|
|
|
generated_file_export_path=ssh_gen_file_path,
|
|
|
|
hpn_server_swap=False,
|
2018-06-12 00:27:26 +03:00
|
|
|
allow_docker_access=False,
|
2018-05-01 01:06:52 +03:00
|
|
|
),
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2019-01-15 20:56:03 +03:00
|
|
|
def slurm_options_settings(config):
|
|
|
|
# type: (dict) -> SlurmOptionsSettings
|
|
|
|
"""Get slurm options settings
|
|
|
|
:param dict config: configuration dict
|
|
|
|
:rtype: SlurmOptionsSettings
|
|
|
|
:return: slurm options settings
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
conf = config['slurm']['slurm_options']
|
|
|
|
except KeyError:
|
|
|
|
conf = {}
|
|
|
|
cluster_id = config['slurm']['cluster_id']
|
|
|
|
if util.is_none_or_empty(cluster_id) or len(cluster_id) > 22:
|
|
|
|
raise ValueError(
|
|
|
|
'cluster_id is invalid. Must be between 1 and 22 '
|
|
|
|
'characters in length')
|
|
|
|
bc = credentials_batch(config)
|
|
|
|
idle_reclaim_time = _kv_read(conf, 'idle_reclaim_time', default='00:15:00')
|
|
|
|
idle_reclaim_time = util.convert_string_to_timedelta(idle_reclaim_time)
|
|
|
|
if idle_reclaim_time.total_seconds == 0:
|
|
|
|
raise ValueError('idle_reclaim_time must be positive')
|
|
|
|
max_nodes = 0
|
|
|
|
partitions = {}
|
|
|
|
part_conf = _kv_read_checked(conf, 'elastic_partitions')
|
|
|
|
for key in part_conf:
|
|
|
|
part = _kv_read_checked(part_conf, key)
|
|
|
|
batch_pools = {}
|
|
|
|
pool_conf = _kv_read_checked(part, 'batch_pools', default={})
|
|
|
|
for pkey in pool_conf:
|
|
|
|
bpool = _kv_read_checked(pool_conf, pkey)
|
|
|
|
batch_service_url = _kv_read_checked(bpool, 'account_service_url')
|
|
|
|
if util.is_none_or_empty(batch_service_url):
|
|
|
|
batch_service_url = bc.account_service_url
|
|
|
|
max_compute_nodes = _kv_read(bpool, 'max_compute_nodes')
|
|
|
|
reclaim_exclude_num_nodes = _kv_read(
|
|
|
|
bpool, 'reclaim_exclude_num_nodes', default=0)
|
|
|
|
if reclaim_exclude_num_nodes > max_compute_nodes:
|
|
|
|
raise ValueError(
|
|
|
|
'reclaim_exclude_num_nodes {} > '
|
|
|
|
'max_compute_nodes {}'.format(
|
|
|
|
reclaim_exclude_num_nodes, max_compute_nodes))
|
|
|
|
batch_pools[pkey] = SlurmBatchPoolSettings(
|
|
|
|
batch_service_url=batch_service_url,
|
|
|
|
compute_node_type=_kv_read_checked(bpool, 'compute_node_type'),
|
|
|
|
max_compute_nodes=max_compute_nodes,
|
|
|
|
weight=_kv_read(bpool, 'weight'),
|
|
|
|
features=_kv_read_checked(bpool, 'features', default=[]),
|
|
|
|
reclaim_exclude_num_nodes=reclaim_exclude_num_nodes,
|
|
|
|
)
|
|
|
|
max_nodes = max(max_nodes, batch_pools[pkey].max_compute_nodes)
|
|
|
|
max_runtime_limit = _kv_read_checked(part, 'max_runtime_limit')
|
|
|
|
if util.is_not_empty(max_runtime_limit):
|
|
|
|
max_runtime_limit = max_runtime_limit.replace('.', '-')
|
|
|
|
else:
|
|
|
|
max_runtime_limit = 'UNLIMITED'
|
|
|
|
partition = SlurmPartitionSettings(
|
|
|
|
batch_pools=batch_pools,
|
|
|
|
max_runtime_limit=max_runtime_limit,
|
|
|
|
default=_kv_read(part, 'default'),
|
2019-04-03 01:59:36 +03:00
|
|
|
preempt_type=_kv_read_checked(part, 'preempt_type'),
|
|
|
|
preempt_mode=_kv_read_checked(part, 'preempt_mode'),
|
|
|
|
over_subscribe=_kv_read_checked(part, 'over_subscribe'),
|
|
|
|
priority_tier=_kv_read(part, 'priority_tier'),
|
|
|
|
other_options=_kv_read_checked(part, 'other_options', default=[]),
|
2019-01-15 20:56:03 +03:00
|
|
|
)
|
|
|
|
partitions[key] = partition
|
|
|
|
unmanaged_partitions = []
|
|
|
|
upart_conf = _kv_read_checked(conf, 'unmanaged_partitions', default=[])
|
|
|
|
for upart in upart_conf:
|
|
|
|
unmanaged_partitions.append(SlurmUnmanagedPartitionSettings(
|
|
|
|
partition=_kv_read_checked(upart, 'partition'),
|
|
|
|
nodes=_kv_read_checked(upart, 'nodes'),
|
|
|
|
))
|
|
|
|
return SlurmOptionsSettings(
|
|
|
|
cluster_id=cluster_id,
|
|
|
|
idle_reclaim_time=idle_reclaim_time,
|
|
|
|
max_nodes=max_nodes,
|
|
|
|
elastic_partitions=partitions,
|
|
|
|
unmanaged_partitions=unmanaged_partitions,
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def slurm_settings(config, kind):
|
|
|
|
# type: (dict) -> VmResource
|
|
|
|
"""Get slurm settings
|
2018-06-25 17:49:45 +03:00
|
|
|
:param dict config: configuration dict
|
2019-01-15 20:56:03 +03:00
|
|
|
:rtype: VmResource
|
|
|
|
:return: VM resource settings
|
|
|
|
"""
|
|
|
|
# general settings
|
|
|
|
try:
|
|
|
|
conf = config['slurm']
|
|
|
|
if util.is_none_or_empty(conf):
|
|
|
|
raise KeyError
|
|
|
|
except KeyError:
|
|
|
|
raise ValueError('slurm settings are invalid or missing')
|
|
|
|
location = conf['location']
|
|
|
|
if util.is_none_or_empty(location):
|
|
|
|
raise ValueError('invalid location in slurm')
|
|
|
|
rg = _kv_read_checked(conf, 'resource_group')
|
|
|
|
if util.is_none_or_empty(rg):
|
|
|
|
raise ValueError('invalid resource_group in slurm')
|
|
|
|
zone = _kv_read(conf, 'zone')
|
|
|
|
hostname_prefix = '{}-{}'.format(
|
|
|
|
_kv_read_checked(conf, 'cluster_id'),
|
|
|
|
# Azure doesn't like "login" for DNS
|
|
|
|
'gateway' if kind == 'login' else kind
|
|
|
|
)
|
|
|
|
# get controller settings
|
|
|
|
try:
|
|
|
|
conf = conf[kind]
|
|
|
|
if util.is_none_or_empty(conf):
|
|
|
|
raise KeyError
|
|
|
|
except KeyError:
|
|
|
|
raise ValueError(
|
|
|
|
'slurm:{} settings are invalid or missing'.format(kind))
|
|
|
|
# vm settings
|
|
|
|
vm_size = _kv_read_checked(conf, 'vm_size')
|
|
|
|
accel_net = _kv_read(conf, 'accelerated_networking', False)
|
|
|
|
# public ip settings
|
|
|
|
pip_conf = _kv_read_checked(conf, 'public_ip', {})
|
|
|
|
pip_enabled = _kv_read(pip_conf, 'enabled', True)
|
|
|
|
pip_static = _kv_read(pip_conf, 'static', False)
|
|
|
|
# sc network security settings
|
|
|
|
ns_conf = conf['network_security']
|
|
|
|
ns_inbound = {
|
|
|
|
'ssh': InboundNetworkSecurityRule(
|
|
|
|
destination_port_range='22',
|
|
|
|
source_address_prefix=_kv_read_checked(ns_conf, 'ssh', ['*']),
|
|
|
|
protocol='tcp',
|
|
|
|
),
|
|
|
|
}
|
|
|
|
if not isinstance(ns_inbound['ssh'].source_address_prefix, list):
|
|
|
|
raise ValueError('expected list for ssh network security rule')
|
|
|
|
if 'custom_inbound_rules' in ns_conf:
|
|
|
|
for key in ns_conf['custom_inbound_rules']:
|
|
|
|
ns_inbound[key] = InboundNetworkSecurityRule(
|
|
|
|
destination_port_range=_kv_read_checked(
|
|
|
|
ns_conf['custom_inbound_rules'][key],
|
|
|
|
'destination_port_range'),
|
|
|
|
source_address_prefix=_kv_read_checked(
|
|
|
|
ns_conf['custom_inbound_rules'][key],
|
|
|
|
'source_address_prefix'),
|
|
|
|
protocol=_kv_read_checked(
|
|
|
|
ns_conf['custom_inbound_rules'][key], 'protocol'),
|
|
|
|
)
|
|
|
|
if not isinstance(ns_inbound[key].source_address_prefix, list):
|
|
|
|
raise ValueError(
|
|
|
|
'expected list for network security rule {} '
|
|
|
|
'source_address_prefix'.format(key))
|
|
|
|
# ssh settings
|
|
|
|
ssh_conf = conf['ssh']
|
|
|
|
ssh_username = _kv_read_checked(ssh_conf, 'username')
|
|
|
|
ssh_public_key = _kv_read_checked(ssh_conf, 'ssh_public_key')
|
|
|
|
if util.is_not_empty(ssh_public_key):
|
|
|
|
ssh_public_key = pathlib.Path(ssh_public_key)
|
|
|
|
ssh_public_key_data = _kv_read_checked(ssh_conf, 'ssh_public_key_data')
|
|
|
|
ssh_private_key = _kv_read_checked(ssh_conf, 'ssh_private_key')
|
|
|
|
if util.is_not_empty(ssh_private_key):
|
|
|
|
ssh_private_key = pathlib.Path(ssh_private_key)
|
|
|
|
if (ssh_public_key is not None and
|
|
|
|
util.is_not_empty(ssh_public_key_data)):
|
|
|
|
raise ValueError('cannot specify both an SSH public key file and data')
|
|
|
|
if (ssh_public_key is None and
|
|
|
|
util.is_none_or_empty(ssh_public_key_data) and
|
|
|
|
ssh_private_key is not None):
|
|
|
|
raise ValueError(
|
|
|
|
'cannot specify an SSH private key with no public key specified')
|
|
|
|
ssh_gen_file_path = _kv_read_checked(
|
|
|
|
ssh_conf, 'generated_file_export_path', '.')
|
|
|
|
return VmResource(
|
|
|
|
location=location,
|
|
|
|
resource_group=rg,
|
|
|
|
zone=zone,
|
|
|
|
hostname_prefix=hostname_prefix,
|
|
|
|
virtual_network=virtual_network_settings(
|
|
|
|
conf,
|
|
|
|
default_resource_group=rg,
|
|
|
|
default_existing_ok=False,
|
|
|
|
default_create_nonexistant=True,
|
|
|
|
),
|
|
|
|
network_security=NetworkSecuritySettings(
|
|
|
|
inbound=ns_inbound,
|
|
|
|
),
|
|
|
|
vm_size=vm_size,
|
|
|
|
accelerated_networking=accel_net,
|
|
|
|
public_ip=PublicIpSettings(
|
|
|
|
enabled=pip_enabled,
|
|
|
|
static=pip_static,
|
|
|
|
),
|
|
|
|
ssh=SSHSettings(
|
|
|
|
username=ssh_username,
|
|
|
|
expiry_days=9999,
|
|
|
|
ssh_public_key=ssh_public_key,
|
|
|
|
ssh_public_key_data=ssh_public_key_data,
|
|
|
|
ssh_private_key=ssh_private_key,
|
|
|
|
generate_docker_tunnel_script=False,
|
|
|
|
generated_file_export_path=ssh_gen_file_path,
|
|
|
|
hpn_server_swap=False,
|
|
|
|
allow_docker_access=False,
|
|
|
|
),
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def slurm_vm_count(config, kind):
|
|
|
|
# type: (dict, str) -> int
|
|
|
|
"""Get Slurm controller vm count
|
|
|
|
:param dict config: configuration dict
|
|
|
|
:param str kind: kind
|
|
|
|
:rtype: int
|
|
|
|
:return: vm count
|
|
|
|
"""
|
|
|
|
conf = _kv_read_checked(_kv_read_checked(config, 'slurm'), kind)
|
|
|
|
return _kv_read(conf, 'vm_count')
|
|
|
|
|
|
|
|
|
|
|
|
def slurm_additional_prep_script(config, kind):
|
|
|
|
# type: (dict, str) -> int
|
|
|
|
"""Get Slurm additional prep script
|
|
|
|
:param dict config: configuration dict
|
|
|
|
:param str kind: kind
|
2018-06-25 17:49:45 +03:00
|
|
|
:rtype: str
|
2019-01-15 20:56:03 +03:00
|
|
|
:return: prep script location
|
|
|
|
"""
|
|
|
|
conf = _kv_read_checked(_kv_read_checked(config, 'slurm'), kind)
|
|
|
|
return _kv_read(conf, 'additional_prep_script')
|
|
|
|
|
|
|
|
|
|
|
|
def slurm_shared_data_volumes(config):
|
|
|
|
# type: (dict) -> List[str]
|
|
|
|
"""Get Slurm shared data volumes
|
|
|
|
:param dict config: configuration dict
|
|
|
|
:rtype: List[str]
|
|
|
|
:return: list of SlurmSharedDataVolumesSettings
|
|
|
|
"""
|
|
|
|
conf = _kv_read_checked(config, 'slurm')
|
|
|
|
sdv = _kv_read_checked(conf, 'shared_data_volumes', default={})
|
|
|
|
vols = []
|
|
|
|
state = False
|
|
|
|
for sdkey in sdv:
|
2019-02-28 20:43:27 +03:00
|
|
|
host_mount_path = _kv_read_checked(sdv[sdkey], 'host_mount_path')
|
|
|
|
if host_mount_path == '/home' or host_mount_path == '/home/':
|
|
|
|
raise ValueError(
|
|
|
|
'/home host_mount_path for {} is currently not '
|
|
|
|
'supported'.format(sdkey))
|
2019-01-15 20:56:03 +03:00
|
|
|
store_slurmctld_state = _kv_read(sdv[sdkey], 'store_slurmctld_state')
|
|
|
|
if store_slurmctld_state:
|
|
|
|
if state:
|
|
|
|
raise ValueError(
|
|
|
|
'only one shared data volume should be designated as '
|
|
|
|
'store_slurmctld_state')
|
|
|
|
state = True
|
|
|
|
vols.append(SlurmSharedDataVolumesSettings(
|
|
|
|
id=sdkey,
|
2019-02-28 20:43:27 +03:00
|
|
|
host_mount_path=host_mount_path,
|
2019-01-15 20:56:03 +03:00
|
|
|
store_slurmctld_state=store_slurmctld_state,
|
|
|
|
))
|
|
|
|
return vols
|
|
|
|
|
|
|
|
|
|
|
|
def other_storage_account_settings(config, key):
|
|
|
|
# type: (dict, str) ->str
|
|
|
|
"""Get other storage account settings selector
|
|
|
|
:param dict config: configuration dict
|
|
|
|
:param str key: config key
|
|
|
|
:rtype: str
|
|
|
|
:return: other storage settings link
|
2018-06-25 17:49:45 +03:00
|
|
|
"""
|
|
|
|
try:
|
2019-01-15 20:56:03 +03:00
|
|
|
conf = config[key]
|
2018-06-25 17:49:45 +03:00
|
|
|
if util.is_none_or_empty(conf):
|
|
|
|
raise KeyError
|
|
|
|
except KeyError:
|
2019-01-15 20:56:03 +03:00
|
|
|
raise ValueError('{} settings are invalid or missing'.format(key))
|
2018-06-25 17:49:45 +03:00
|
|
|
ssel = _kv_read_checked(conf, 'storage_account_settings')
|
|
|
|
if util.is_none_or_empty(ssel):
|
|
|
|
raise ValueError(
|
2019-01-15 20:56:03 +03:00
|
|
|
'{} storage_account_settings are invalid or missing'.format(key))
|
2018-06-25 17:49:45 +03:00
|
|
|
return ssel
|
|
|
|
|
|
|
|
|
|
|
|
def federation_credentials_storage(config):
|
|
|
|
# type: (dict) -> StorageCredentialsSettings
|
|
|
|
"""Get federation storage account settings
|
|
|
|
:param dict config: configuration dict
|
|
|
|
:rtype: StorageCredentialsSettings
|
|
|
|
:return: federation storage cred settings
|
|
|
|
"""
|
|
|
|
return credentials_storage(
|
2019-01-15 20:56:03 +03:00
|
|
|
config, other_storage_account_settings(config, 'federation'))
|
|
|
|
|
|
|
|
|
|
|
|
def slurm_credentials_storage(config):
|
|
|
|
# type: (dict) -> StorageCredentialsSettings
|
|
|
|
"""Get slurm storage account settings
|
|
|
|
:param dict config: configuration dict
|
|
|
|
:rtype: StorageCredentialsSettings
|
|
|
|
:return: slurm storage cred settings
|
|
|
|
"""
|
|
|
|
return credentials_storage(
|
|
|
|
config, other_storage_account_settings(config, 'slurm'))
|
2018-06-25 17:49:45 +03:00
|
|
|
|
|
|
|
|
2018-05-01 01:06:52 +03:00
|
|
|
def generate_availability_set_name(vr):
|
|
|
|
# type: (VmResource) -> str
|
2017-03-15 21:30:01 +03:00
|
|
|
"""Generate an availabilty set name
|
2018-05-01 01:06:52 +03:00
|
|
|
:param VmResource vr: vm resource
|
2017-03-15 21:30:01 +03:00
|
|
|
:rtype: str
|
|
|
|
:return: availability set name
|
|
|
|
"""
|
2018-05-01 01:06:52 +03:00
|
|
|
return '{}-as'.format(vr.hostname_prefix)
|
2017-03-15 21:30:01 +03:00
|
|
|
|
|
|
|
|
2018-05-01 01:06:52 +03:00
|
|
|
def generate_virtual_machine_name(vr, i):
|
|
|
|
# type: (VmResource, int) -> str
|
2017-03-15 21:30:01 +03:00
|
|
|
"""Generate a virtual machine name
|
2018-05-01 01:06:52 +03:00
|
|
|
:param VmResource vr: vm resource
|
|
|
|
:param int i: resource number
|
2017-03-15 21:30:01 +03:00
|
|
|
:rtype: str
|
|
|
|
:return: vm name
|
|
|
|
"""
|
2018-05-01 01:06:52 +03:00
|
|
|
return '{}-vm{}'.format(vr.hostname_prefix, str(i).zfill(3))
|
2017-03-15 21:30:01 +03:00
|
|
|
|
|
|
|
|
|
|
|
def get_offset_from_virtual_machine_name(vm_name):
|
2018-05-01 01:06:52 +03:00
|
|
|
# type: (str) -> int
|
2017-03-15 21:30:01 +03:00
|
|
|
"""Gets the virtual machine offset given a vm name
|
2018-05-01 01:06:52 +03:00
|
|
|
:param str vm_name: vm name
|
2017-03-15 21:30:01 +03:00
|
|
|
:rtype: int
|
|
|
|
:return: vm offset
|
|
|
|
"""
|
|
|
|
return int(vm_name.split('-vm')[-1])
|
|
|
|
|
|
|
|
|
2018-05-01 01:06:52 +03:00
|
|
|
def generate_virtual_machine_extension_name(vr, i):
|
|
|
|
# type: (VmResource, int) -> str
|
2017-03-15 21:30:01 +03:00
|
|
|
"""Generate a virtual machine extension name
|
2018-05-01 01:06:52 +03:00
|
|
|
:param VmResource vr: vm resource
|
|
|
|
:param int i: resource number
|
2017-03-15 21:30:01 +03:00
|
|
|
:rtype: str
|
|
|
|
:return: vm extension name
|
|
|
|
"""
|
2018-05-01 01:06:52 +03:00
|
|
|
return '{}-vmext{}'.format(vr.hostname_prefix, str(i).zfill(3))
|
2017-03-15 21:30:01 +03:00
|
|
|
|
|
|
|
|
2018-05-01 01:06:52 +03:00
|
|
|
def generate_virtual_machine_msi_extension_name(vr, i):
|
|
|
|
# type: (VmResource, int) -> str
|
|
|
|
"""Generate a virtual machine msi extension name
|
|
|
|
:param VmResource vr: vm resource
|
|
|
|
:param int i: resource number
|
|
|
|
:rtype: str
|
|
|
|
:return: vm extension msi name
|
|
|
|
"""
|
|
|
|
return '{}-vmextmsi{}'.format(vr.hostname_prefix, str(i).zfill(3))
|
|
|
|
|
|
|
|
|
|
|
|
def generate_network_security_group_name(vr):
|
|
|
|
# type: (VmResource) -> str
|
2017-03-15 21:30:01 +03:00
|
|
|
"""Generate a network security group name
|
2018-05-01 01:06:52 +03:00
|
|
|
:param VmResource vr: vm resource
|
2017-03-15 21:30:01 +03:00
|
|
|
:rtype: str
|
|
|
|
:return: nsg name
|
|
|
|
"""
|
2018-05-01 01:06:52 +03:00
|
|
|
return '{}-nsg'.format(vr.hostname_prefix)
|
2017-03-15 21:30:01 +03:00
|
|
|
|
|
|
|
|
|
|
|
def generate_network_security_inbound_rule_name(rule_name, i):
|
2018-05-01 01:06:52 +03:00
|
|
|
# type: (str, int) -> str
|
2017-03-15 21:30:01 +03:00
|
|
|
"""Generate a network security inbound rule name
|
2018-05-01 01:06:52 +03:00
|
|
|
:param str rule_name: rule name
|
|
|
|
:parm int i: rule number
|
2017-03-15 21:30:01 +03:00
|
|
|
:rtype: str
|
|
|
|
:return: inbound rule name
|
|
|
|
"""
|
2018-02-25 05:30:02 +03:00
|
|
|
return '{}_in-{}'.format(rule_name, str(i).zfill(3))
|
2017-03-15 21:30:01 +03:00
|
|
|
|
|
|
|
|
|
|
|
def generate_network_security_inbound_rule_description(rule_name, i):
|
2018-05-01 01:06:52 +03:00
|
|
|
# type: (str, int) -> str
|
2017-03-15 21:30:01 +03:00
|
|
|
"""Generate a network security inbound rule description
|
2018-05-01 01:06:52 +03:00
|
|
|
:param str rule_name: rule name
|
|
|
|
:parm int i: rule number
|
2017-03-15 21:30:01 +03:00
|
|
|
:rtype: str
|
|
|
|
:return: inbound description
|
|
|
|
"""
|
2018-02-25 05:30:02 +03:00
|
|
|
return '{} inbound ({})'.format(rule_name, str(i).zfill(3))
|
2017-03-15 21:30:01 +03:00
|
|
|
|
|
|
|
|
2018-05-01 01:06:52 +03:00
|
|
|
def generate_public_ip_name(vr, i):
|
|
|
|
# type: (VmResource, int) -> str
|
2017-03-15 21:30:01 +03:00
|
|
|
"""Generate a public ip name
|
2018-05-01 01:06:52 +03:00
|
|
|
:param VmResource vr: vm resource
|
|
|
|
:parm int i: pip number
|
2017-03-15 21:30:01 +03:00
|
|
|
:rtype: str
|
|
|
|
:return: public ip name
|
|
|
|
"""
|
2018-05-01 01:06:52 +03:00
|
|
|
return '{}-pip{}'.format(vr.hostname_prefix, str(i).zfill(3))
|
2017-03-15 21:30:01 +03:00
|
|
|
|
|
|
|
|
2018-05-01 01:06:52 +03:00
|
|
|
def generate_hostname(vr, i):
|
|
|
|
# type: (VmResource, int) -> str
|
2017-03-15 21:30:01 +03:00
|
|
|
"""Generate a hostname (dns label prefix)
|
2018-05-01 01:06:52 +03:00
|
|
|
:param VmResource vr: vm resource
|
|
|
|
:parm int i: hostname number
|
2017-03-15 21:30:01 +03:00
|
|
|
:rtype: str
|
|
|
|
:return: hostname
|
|
|
|
"""
|
2018-05-01 01:06:52 +03:00
|
|
|
return '{}{}'.format(vr.hostname_prefix, str(i).zfill(3))
|
2017-03-15 21:30:01 +03:00
|
|
|
|
|
|
|
|
2018-05-01 01:06:52 +03:00
|
|
|
def generate_network_interface_name(vr, i):
|
|
|
|
# type: (VmResource, int) -> str
|
2017-03-15 21:30:01 +03:00
|
|
|
"""Generate a network inetrface name
|
2018-05-01 01:06:52 +03:00
|
|
|
:param VmResource vr: vm resource
|
|
|
|
:parm int i: network interface number
|
2017-03-15 21:30:01 +03:00
|
|
|
:rtype: str
|
|
|
|
:return: network interface name
|
|
|
|
"""
|
2018-05-01 01:06:52 +03:00
|
|
|
return '{}-ni{}'.format(vr.hostname_prefix, str(i).zfill(3))
|
2017-03-15 21:30:01 +03:00
|
|
|
|
|
|
|
|
2017-03-12 21:14:11 +03:00
|
|
|
def get_file_server_glusterfs_volume_name(sc):
|
|
|
|
# type: (StorageClusterSettings) -> str
|
|
|
|
"""Get the glusterfs volume name
|
|
|
|
:param StorageClusterSettings sc: storage cluster settings
|
|
|
|
:rtype: str
|
|
|
|
:return: glusterfs volume name
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
volname = sc.file_server.server_options['glusterfs']['volume_name']
|
|
|
|
except KeyError:
|
|
|
|
volname = get_gluster_default_volume_name()
|
|
|
|
return volname
|
|
|
|
|
|
|
|
|
|
|
|
def get_file_server_glusterfs_volume_type(sc):
|
|
|
|
# type: (StorageClusterSettings) -> str
|
|
|
|
"""Get the glusterfs volume type
|
|
|
|
:param StorageClusterSettings sc: storage cluster settings
|
|
|
|
:rtype: str
|
|
|
|
:return: glusterfs volume type
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
voltype = sc.file_server.server_options[
|
|
|
|
'glusterfs']['volume_type'].lower()
|
|
|
|
except KeyError:
|
|
|
|
voltype = 'distributed'
|
|
|
|
return voltype
|
|
|
|
|
|
|
|
|
|
|
|
def get_file_server_glusterfs_transport(sc):
|
|
|
|
# type: (StorageClusterSettings) -> str
|
|
|
|
"""Get the glusterfs transport
|
|
|
|
:param StorageClusterSettings sc: storage cluster settings
|
|
|
|
:rtype: str
|
|
|
|
:return: glusterfs transport
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
transport = sc.file_server.server_options[
|
|
|
|
'glusterfs']['transport'].lower()
|
|
|
|
if transport != 'tcp':
|
|
|
|
raise ValueError('Only tcp is supported as transport')
|
|
|
|
except KeyError:
|
|
|
|
transport = 'tcp'
|
|
|
|
return transport
|