2016-07-18 18:19:36 +03:00
|
|
|
#!/usr/bin/env python3
|
|
|
|
|
2016-08-28 03:27:36 +03:00
|
|
|
# Copyright (c) Microsoft Corporation
|
|
|
|
#
|
|
|
|
# All rights reserved.
|
|
|
|
#
|
|
|
|
# MIT License
|
|
|
|
#
|
|
|
|
# Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
# copy of this software and associated documentation files (the "Software"),
|
|
|
|
# to deal in the Software without restriction, including without limitation
|
|
|
|
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
# and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
# Software is furnished to do so, subject to the following conditions:
|
|
|
|
#
|
|
|
|
# The above copyright notice and this permission notice shall be included in
|
|
|
|
# all copies or substantial portions of the Software.
|
|
|
|
#
|
|
|
|
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
|
|
# DEALINGS IN THE SOFTWARE.
|
|
|
|
|
2016-07-18 18:19:36 +03:00
|
|
|
# stdlib imports
|
2016-10-10 01:22:15 +03:00
|
|
|
from __future__ import division, print_function, unicode_literals
|
2016-07-18 18:19:36 +03:00
|
|
|
import argparse
|
|
|
|
import json
|
2016-08-25 01:35:17 +03:00
|
|
|
import logging
|
|
|
|
import logging.handlers
|
2016-08-31 21:09:08 +03:00
|
|
|
try:
|
|
|
|
import pathlib
|
|
|
|
except ImportError:
|
|
|
|
import pathlib2 as pathlib
|
2016-08-20 01:13:33 +03:00
|
|
|
import subprocess
|
2016-07-18 18:19:36 +03:00
|
|
|
import time
|
2016-08-25 01:35:17 +03:00
|
|
|
try:
|
|
|
|
import urllib.request as urllibreq
|
|
|
|
except ImportError:
|
|
|
|
import urllib as urllibreq
|
2016-09-15 22:47:43 +03:00
|
|
|
import uuid
|
2016-07-18 18:19:36 +03:00
|
|
|
# non-stdlib imports
|
|
|
|
import azure.batch.batch_auth as batchauth
|
|
|
|
import azure.batch.batch_service_client as batch
|
|
|
|
import azure.batch.models as batchmodels
|
2016-10-10 01:22:15 +03:00
|
|
|
# local imports
|
|
|
|
import convoy.batch
|
2016-10-20 07:10:40 +03:00
|
|
|
import convoy.crypto
|
2016-10-10 01:22:15 +03:00
|
|
|
import convoy.data
|
|
|
|
import convoy.storage
|
|
|
|
import convoy.util
|
2016-07-18 18:19:36 +03:00
|
|
|
|
2016-08-25 01:35:17 +03:00
|
|
|
# create logger
|
|
|
|
logger = logging.getLogger('shipyard')
|
2016-07-18 18:19:36 +03:00
|
|
|
# global defines
|
2016-10-13 20:53:53 +03:00
|
|
|
_VERSION = '2.0.0'
|
2016-10-21 07:18:31 +03:00
|
|
|
_ROOT_PATH = pathlib.Path(__file__).resolve().parent
|
2016-09-21 18:30:58 +03:00
|
|
|
_AZUREFILE_DVD_BIN = {
|
|
|
|
'url': (
|
|
|
|
'https://github.com/Azure/azurefile-dockervolumedriver/releases'
|
|
|
|
'/download/v0.5.1/azurefile-dockervolumedriver'
|
|
|
|
),
|
|
|
|
'md5': 'ee14da21efdfda4bedd85a67adbadc14'
|
|
|
|
}
|
|
|
|
_NVIDIA_DOCKER = {
|
|
|
|
'ubuntuserver': {
|
|
|
|
'url': (
|
|
|
|
'https://github.com/NVIDIA/nvidia-docker/releases'
|
|
|
|
'/download/v1.0.0-rc.3/nvidia-docker_1.0.0.rc.3-1_amd64.deb'
|
|
|
|
),
|
|
|
|
'md5': '49990712ebf3778013fae81ee67f6c79'
|
|
|
|
}
|
|
|
|
}
|
|
|
|
_NVIDIA_DRIVER = 'nvidia-driver.run'
|
2016-10-21 07:18:31 +03:00
|
|
|
_NODEPREP_FILE = (
|
|
|
|
'shipyard_nodeprep.sh',
|
|
|
|
str(pathlib.Path(_ROOT_PATH, 'scripts/shipyard_nodeprep.sh'))
|
|
|
|
)
|
|
|
|
_GLUSTERPREP_FILE = (
|
|
|
|
'shipyard_glusterfs.sh',
|
|
|
|
str(pathlib.Path(_ROOT_PATH, 'scripts/shipyard_glusterfs.sh'))
|
|
|
|
)
|
2016-10-24 19:12:24 +03:00
|
|
|
_GLUSTERRESIZE_FILE = (
|
|
|
|
'shipyard_glusterfs_resize.sh',
|
|
|
|
str(pathlib.Path(_ROOT_PATH, 'scripts/shipyard_glusterfs_resize.sh'))
|
|
|
|
)
|
2016-10-21 07:18:31 +03:00
|
|
|
_HPNSSH_FILE = (
|
|
|
|
'shipyard_hpnssh.sh',
|
|
|
|
str(pathlib.Path(_ROOT_PATH, 'scripts/shipyard_hpnssh.sh'))
|
|
|
|
)
|
|
|
|
_JOBPREP_FILE = (
|
|
|
|
'docker_jp_block.sh',
|
|
|
|
str(pathlib.Path(_ROOT_PATH, 'scripts/docker_jp_block.sh'))
|
|
|
|
)
|
|
|
|
_BLOBXFER_FILE = (
|
|
|
|
'shipyard_blobxfer.sh',
|
|
|
|
str(pathlib.Path(_ROOT_PATH, 'scripts/shipyard_blobxfer.sh'))
|
|
|
|
)
|
|
|
|
_CASCADE_FILE = (
|
|
|
|
'cascade.py',
|
|
|
|
str(pathlib.Path(_ROOT_PATH, 'cascade/cascade.py'))
|
|
|
|
)
|
2016-08-27 21:35:32 +03:00
|
|
|
_SETUP_PR_FILE = (
|
2016-10-21 07:18:31 +03:00
|
|
|
'setup_private_registry.py',
|
|
|
|
str(pathlib.Path(_ROOT_PATH, 'cascade/setup_private_registry.py'))
|
|
|
|
)
|
|
|
|
_PERF_FILE = (
|
|
|
|
'perf.py',
|
|
|
|
str(pathlib.Path(_ROOT_PATH, 'cascade/perf.py'))
|
2016-08-27 21:35:32 +03:00
|
|
|
)
|
2016-09-08 07:16:27 +03:00
|
|
|
_VM_TCP_NO_TUNE = (
|
|
|
|
'basic_a0', 'basic_a1', 'basic_a2', 'basic_a3', 'basic_a4', 'standard_a0',
|
|
|
|
'standard_a1', 'standard_d1', 'standard_d2', 'standard_d1_v2',
|
|
|
|
'standard_f1'
|
|
|
|
)
|
2016-07-18 18:19:36 +03:00
|
|
|
|
|
|
|
|
2016-08-25 01:35:17 +03:00
|
|
|
def _populate_global_settings(config, action):
|
|
|
|
# type: (dict, str) -> None
|
2016-07-18 18:19:36 +03:00
|
|
|
"""Populate global settings from config
|
|
|
|
:param dict config: configuration dict
|
2016-08-20 01:13:33 +03:00
|
|
|
:param str action: action
|
2016-07-18 18:19:36 +03:00
|
|
|
"""
|
2016-09-01 01:35:33 +03:00
|
|
|
ssel = config['batch_shipyard']['storage_account_settings']
|
2016-07-18 18:19:36 +03:00
|
|
|
try:
|
2016-09-01 01:35:33 +03:00
|
|
|
sep = config['batch_shipyard']['storage_entity_prefix']
|
2016-07-18 18:19:36 +03:00
|
|
|
except KeyError:
|
|
|
|
sep = None
|
2016-10-13 20:53:53 +03:00
|
|
|
if sep is None or len(sep) == 0:
|
|
|
|
raise ValueError('storage_entity_prefix is invalid')
|
2016-08-24 00:50:17 +03:00
|
|
|
postfix = '-'.join(
|
|
|
|
(config['credentials']['batch']['account'].lower(),
|
2016-08-20 01:13:33 +03:00
|
|
|
config['pool_specification']['id'].lower()))
|
2016-10-10 07:01:11 +03:00
|
|
|
sa = config['credentials']['storage'][ssel]['account']
|
|
|
|
sakey = config['credentials']['storage'][ssel]['account_key']
|
|
|
|
try:
|
|
|
|
saep = config['credentials']['storage'][ssel]['endpoint']
|
|
|
|
except KeyError:
|
|
|
|
saep = 'core.windows.net'
|
|
|
|
convoy.storage.set_storage_configuration(
|
|
|
|
sep, postfix, sa, sakey, saep)
|
2016-08-20 01:13:33 +03:00
|
|
|
if action != 'addpool':
|
|
|
|
return
|
2016-08-12 19:21:33 +03:00
|
|
|
try:
|
2016-08-24 00:50:17 +03:00
|
|
|
dpre = config['docker_registry']['private']['enabled']
|
|
|
|
except KeyError:
|
|
|
|
dpre = False
|
2016-09-09 06:15:11 +03:00
|
|
|
# set docker private registry file info
|
2016-08-24 00:50:17 +03:00
|
|
|
if dpre:
|
2016-09-09 06:15:11 +03:00
|
|
|
rf = None
|
|
|
|
imgid = None
|
2016-08-24 00:50:17 +03:00
|
|
|
try:
|
2016-08-17 18:30:53 +03:00
|
|
|
rf = config['docker_registry']['private'][
|
|
|
|
'docker_save_registry_file']
|
2016-09-09 06:15:11 +03:00
|
|
|
imgid = config['docker_registry']['private'][
|
|
|
|
'docker_save_registry_image_id']
|
|
|
|
if rf is not None and len(rf) == 0:
|
|
|
|
rf = None
|
|
|
|
if imgid is not None and len(imgid) == 0:
|
|
|
|
imgid = None
|
|
|
|
if rf is None or imgid is None:
|
|
|
|
raise KeyError()
|
2016-08-24 00:50:17 +03:00
|
|
|
except KeyError:
|
2016-09-09 06:15:11 +03:00
|
|
|
if rf is None:
|
2016-10-21 07:18:31 +03:00
|
|
|
rf = _ROOT_PATH + '/resources/docker-registry-v2.tar.gz'
|
2016-09-09 06:15:11 +03:00
|
|
|
imgid = None
|
2016-08-24 00:50:17 +03:00
|
|
|
prf = pathlib.Path(rf)
|
|
|
|
# attempt to package if registry file doesn't exist
|
2016-09-09 06:15:11 +03:00
|
|
|
if not prf.exists() or prf.stat().st_size == 0 or imgid is None:
|
2016-08-25 01:35:17 +03:00
|
|
|
logger.debug(
|
|
|
|
'attempting to generate docker private registry tarball')
|
2016-08-24 00:50:17 +03:00
|
|
|
try:
|
2016-10-20 07:10:40 +03:00
|
|
|
imgid = convoy.util.decode_string(subprocess.check_output(
|
|
|
|
'sudo docker images -q registry:2', shell=True)).strip()
|
2016-08-27 08:56:00 +03:00
|
|
|
except subprocess.CalledProcessError:
|
|
|
|
rf = None
|
2016-09-09 06:15:11 +03:00
|
|
|
imgid = None
|
2016-08-24 00:50:17 +03:00
|
|
|
else:
|
2016-09-09 06:15:11 +03:00
|
|
|
if len(imgid) == 12:
|
|
|
|
if rf is None:
|
2016-10-21 07:18:31 +03:00
|
|
|
rf = (_ROOT_PATH +
|
|
|
|
'/resources/docker-registry-v2.tar.gz')
|
2016-08-24 00:50:17 +03:00
|
|
|
prf = pathlib.Path(rf)
|
|
|
|
subprocess.check_call(
|
|
|
|
'sudo docker save registry:2 '
|
|
|
|
'| gzip -c > {}'.format(rf), shell=True)
|
2016-10-10 07:01:11 +03:00
|
|
|
regfile = (prf.name if rf is not None else None, rf, imgid)
|
2016-08-24 00:50:17 +03:00
|
|
|
else:
|
2016-10-10 07:01:11 +03:00
|
|
|
regfile = (None, None, None)
|
|
|
|
logger.info('private registry settings: {}'.format(regfile))
|
|
|
|
convoy.storage.set_registry_file(regfile)
|
2016-07-18 18:19:36 +03:00
|
|
|
|
|
|
|
|
2016-08-25 01:35:17 +03:00
|
|
|
def _create_credentials(config):
|
|
|
|
# type: (dict) -> tuple
|
2016-07-18 18:19:36 +03:00
|
|
|
"""Create authenticated clients
|
|
|
|
:param dict config: configuration dict
|
|
|
|
:rtype: tuple
|
|
|
|
:return: (batch client, blob client, queue client, table client)
|
|
|
|
"""
|
|
|
|
credentials = batchauth.SharedKeyCredentials(
|
2016-08-24 00:50:17 +03:00
|
|
|
config['credentials']['batch']['account'],
|
2016-08-25 03:44:43 +03:00
|
|
|
config['credentials']['batch']['account_key'])
|
2016-07-18 18:19:36 +03:00
|
|
|
batch_client = batch.BatchServiceClient(
|
|
|
|
credentials,
|
2016-08-24 00:50:17 +03:00
|
|
|
base_url=config['credentials']['batch']['account_service_url'])
|
2016-10-05 19:20:00 +03:00
|
|
|
batch_client.config.add_user_agent('batch-shipyard/{}'.format(_VERSION))
|
2016-10-10 07:01:11 +03:00
|
|
|
blob_client, queue_client, table_client = convoy.storage.create_clients()
|
2016-07-18 18:19:36 +03:00
|
|
|
return batch_client, blob_client, queue_client, table_client
|
|
|
|
|
|
|
|
|
2016-09-02 11:42:54 +03:00
|
|
|
def setup_nvidia_docker_package(blob_client, config):
|
|
|
|
# type: (azure.storage.blob.BlockBlobService, dict) -> pathlib.Path
|
|
|
|
"""Set up the nvidia docker package
|
|
|
|
:param azure.storage.blob.BlockBlobService blob_client: blob client
|
|
|
|
:param dict config: configuration dict
|
|
|
|
:rtype: pathlib.Path
|
|
|
|
:return: package path
|
|
|
|
"""
|
|
|
|
offer = config['pool_specification']['offer'].lower()
|
|
|
|
if offer == 'ubuntuserver':
|
2016-10-21 07:18:31 +03:00
|
|
|
pkg = pathlib.Path(_ROOT_PATH, 'resources/nvidia-docker.deb')
|
2016-09-02 11:42:54 +03:00
|
|
|
else:
|
|
|
|
raise ValueError('Offer {} is unsupported with nvidia docker'.format(
|
|
|
|
offer))
|
|
|
|
# check to see if package is downloaded
|
|
|
|
if (not pkg.exists() or
|
2016-10-10 01:22:15 +03:00
|
|
|
convoy.util.compute_md5_for_file(pkg, False) !=
|
|
|
|
_NVIDIA_DOCKER[offer]['md5']):
|
2016-09-02 11:42:54 +03:00
|
|
|
response = urllibreq.urlopen(_NVIDIA_DOCKER[offer]['url'])
|
|
|
|
with pkg.open('wb') as f:
|
|
|
|
f.write(response.read())
|
|
|
|
# check md5
|
2016-10-10 01:22:15 +03:00
|
|
|
if (convoy.util.compute_md5_for_file(pkg, False) !=
|
|
|
|
_NVIDIA_DOCKER[offer]['md5']):
|
2016-09-02 11:42:54 +03:00
|
|
|
raise RuntimeError('md5 mismatch for {}'.format(pkg))
|
|
|
|
return pkg
|
|
|
|
|
|
|
|
|
2016-08-25 01:35:17 +03:00
|
|
|
def setup_azurefile_volume_driver(blob_client, config):
|
|
|
|
# type: (azure.storage.blob.BlockBlobService, dict) -> tuple
|
2016-08-27 01:03:56 +03:00
|
|
|
"""Set up the Azure File docker volume driver
|
|
|
|
:param azure.storage.blob.BlockBlobService blob_client: blob client
|
|
|
|
:param dict config: configuration dict
|
|
|
|
:rtype: tuple
|
|
|
|
:return: (bin path, service file path, service env file path,
|
|
|
|
volume creation script path)
|
|
|
|
"""
|
2016-08-29 22:07:39 +03:00
|
|
|
publisher = config['pool_specification']['publisher'].lower()
|
|
|
|
offer = config['pool_specification']['offer'].lower()
|
|
|
|
sku = config['pool_specification']['sku'].lower()
|
2016-08-20 01:13:33 +03:00
|
|
|
# check to see if binary is downloaded
|
2016-10-21 07:18:31 +03:00
|
|
|
bin = pathlib.Path(_ROOT_PATH, 'resources/azurefile-dockervolumedriver')
|
2016-08-20 01:13:33 +03:00
|
|
|
if (not bin.exists() or
|
2016-10-10 01:22:15 +03:00
|
|
|
convoy.util.compute_md5_for_file(bin, False) !=
|
|
|
|
_AZUREFILE_DVD_BIN['md5']):
|
2016-09-21 18:30:58 +03:00
|
|
|
response = urllibreq.urlopen(_AZUREFILE_DVD_BIN['url'])
|
2016-08-20 01:13:33 +03:00
|
|
|
with bin.open('wb') as f:
|
|
|
|
f.write(response.read())
|
2016-08-29 22:07:39 +03:00
|
|
|
# check md5
|
2016-10-10 01:22:15 +03:00
|
|
|
if (convoy.util.compute_md5_for_file(bin, False) !=
|
|
|
|
_AZUREFILE_DVD_BIN['md5']):
|
2016-08-29 22:07:39 +03:00
|
|
|
raise RuntimeError('md5 mismatch for {}'.format(bin))
|
|
|
|
if (publisher == 'canonical' and offer == 'ubuntuserver' and
|
|
|
|
sku.startswith('14.04')):
|
2016-10-21 07:18:31 +03:00
|
|
|
srv = pathlib.Path(
|
|
|
|
_ROOT_PATH, 'resources/azurefile-dockervolumedriver.conf')
|
2016-08-29 22:07:39 +03:00
|
|
|
else:
|
2016-10-21 07:18:31 +03:00
|
|
|
srv = pathlib.Path(
|
|
|
|
_ROOT_PATH, 'resources/azurefile-dockervolumedriver.service')
|
2016-08-20 01:13:33 +03:00
|
|
|
# construct systemd env file
|
|
|
|
sa = None
|
|
|
|
sakey = None
|
2016-08-24 01:18:14 +03:00
|
|
|
saep = None
|
2016-08-20 01:13:33 +03:00
|
|
|
for svkey in config[
|
|
|
|
'global_resources']['docker_volumes']['shared_data_volumes']:
|
|
|
|
conf = config[
|
|
|
|
'global_resources']['docker_volumes']['shared_data_volumes'][svkey]
|
|
|
|
if conf['volume_driver'] == 'azurefile':
|
|
|
|
# check every entry to ensure the same storage account
|
2016-08-24 00:50:17 +03:00
|
|
|
ssel = conf['storage_account_settings']
|
|
|
|
_sa = config['credentials']['storage'][ssel]['account']
|
2016-08-20 01:13:33 +03:00
|
|
|
if sa is not None and sa != _sa:
|
|
|
|
raise ValueError(
|
|
|
|
'multiple storage accounts are not supported for '
|
|
|
|
'azurefile docker volume driver')
|
|
|
|
sa = _sa
|
2016-08-24 01:18:14 +03:00
|
|
|
sakey = config['credentials']['storage'][ssel]['account_key']
|
|
|
|
saep = config['credentials']['storage'][ssel]['endpoint']
|
2016-09-15 22:47:43 +03:00
|
|
|
elif conf['volume_driver'] != 'glusterfs':
|
2016-08-29 22:07:39 +03:00
|
|
|
raise NotImplementedError(
|
|
|
|
'Unsupported volume driver: {}'.format(conf['volume_driver']))
|
2016-08-20 01:13:33 +03:00
|
|
|
if sa is None or sakey is None:
|
|
|
|
raise RuntimeError(
|
|
|
|
'storage account or storage account key not specified for '
|
|
|
|
'azurefile docker volume driver')
|
2016-10-21 07:18:31 +03:00
|
|
|
srvenv = pathlib.Path(
|
|
|
|
_ROOT_PATH, 'resources/azurefile-dockervolumedriver.env')
|
2016-10-13 20:53:53 +03:00
|
|
|
with srvenv.open('wb') as f:
|
2016-10-23 05:27:48 +03:00
|
|
|
f.write('AZURE_STORAGE_ACCOUNT={}\n'.format(sa).encode('utf8'))
|
|
|
|
f.write('AZURE_STORAGE_ACCOUNT_KEY={}\n'.format(sakey).encode('utf8'))
|
|
|
|
f.write('AZURE_STORAGE_BASE={}\n'.format(saep).encode('utf8'))
|
2016-08-20 01:13:33 +03:00
|
|
|
# create docker volume mount command script
|
2016-10-21 07:18:31 +03:00
|
|
|
volcreate = pathlib.Path(
|
|
|
|
_ROOT_PATH, 'resources/azurefile-dockervolume-create.sh')
|
2016-10-13 20:53:53 +03:00
|
|
|
with volcreate.open('wb') as f:
|
2016-10-23 05:27:48 +03:00
|
|
|
f.write(b'#!/usr/bin/env bash\n\n')
|
2016-08-20 01:13:33 +03:00
|
|
|
for svkey in config[
|
|
|
|
'global_resources']['docker_volumes']['shared_data_volumes']:
|
|
|
|
conf = config[
|
|
|
|
'global_resources']['docker_volumes'][
|
|
|
|
'shared_data_volumes'][svkey]
|
2016-09-15 22:47:43 +03:00
|
|
|
if conf['volume_driver'] == 'glusterfs':
|
|
|
|
continue
|
2016-08-20 01:13:33 +03:00
|
|
|
opts = [
|
|
|
|
'-o share={}'.format(conf['azure_file_share_name'])
|
|
|
|
]
|
|
|
|
for opt in conf['mount_options']:
|
|
|
|
opts.append('-o {}'.format(opt))
|
|
|
|
f.write('docker volume create -d azurefile --name {} {}\n'.format(
|
2016-10-23 05:27:48 +03:00
|
|
|
svkey, ' '.join(opts)).encode('utf8'))
|
2016-08-20 01:13:33 +03:00
|
|
|
return bin, srv, srvenv, volcreate
|
|
|
|
|
|
|
|
|
2016-08-25 01:35:17 +03:00
|
|
|
def add_pool(batch_client, blob_client, config):
|
|
|
|
# type: (batch.BatchServiceClient, azureblob.BlockBlobService,dict) -> None
|
2016-07-18 18:19:36 +03:00
|
|
|
"""Add a Batch pool to account
|
|
|
|
:param azure.batch.batch_service_client.BatchServiceClient: batch client
|
|
|
|
:param azure.storage.blob.BlockBlobService blob_client: blob client
|
|
|
|
:param dict config: configuration dict
|
|
|
|
"""
|
2016-10-20 07:10:40 +03:00
|
|
|
# add encryption cert to account if specified
|
|
|
|
encrypt = False
|
|
|
|
encrypt_sha1tp = None
|
|
|
|
try:
|
|
|
|
encrypt = config['batch_shipyard']['encryption']['enabled']
|
|
|
|
if encrypt:
|
|
|
|
convoy.batch.add_certificate_to_account(batch_client, config)
|
|
|
|
try:
|
|
|
|
encrypt_sha1tp = config['batch_shipyard']['encryption'][
|
2016-10-20 20:48:25 +03:00
|
|
|
'pfx']['sha1_thumbprint']
|
2016-10-20 07:10:40 +03:00
|
|
|
except KeyError:
|
|
|
|
pfxfile = config['batch_shipyard']['encryption']['pfx'][
|
|
|
|
'filename']
|
|
|
|
try:
|
|
|
|
passphrase = config['batch_shipyard']['encryption'][
|
|
|
|
'pfx']['passphrase']
|
|
|
|
except KeyError:
|
|
|
|
passphrase = None
|
|
|
|
encrypt_sha1tp = convoy.crypto.get_sha1_thumbprint_pfx(
|
|
|
|
pfxfile, passphrase)
|
2016-10-20 20:48:25 +03:00
|
|
|
config['batch_shipyard']['encryption']['pfx'][
|
2016-10-20 07:10:40 +03:00
|
|
|
'sha1_thumbprint'] = encrypt_sha1tp
|
|
|
|
except KeyError:
|
|
|
|
pass
|
2016-08-19 21:28:29 +03:00
|
|
|
publisher = config['pool_specification']['publisher']
|
|
|
|
offer = config['pool_specification']['offer']
|
|
|
|
sku = config['pool_specification']['sku']
|
2016-08-27 08:56:00 +03:00
|
|
|
vm_count = config['pool_specification']['vm_count']
|
2016-09-02 11:42:54 +03:00
|
|
|
vm_size = config['pool_specification']['vm_size']
|
2016-10-14 19:36:38 +03:00
|
|
|
try:
|
|
|
|
ingress_files = config[
|
|
|
|
'pool_specification']['transfer_files_on_pool_creation']
|
|
|
|
except KeyError:
|
|
|
|
ingress_files = False
|
|
|
|
# ingress data to Azure Blob Storage if specified
|
|
|
|
storage_threads = []
|
|
|
|
if ingress_files:
|
|
|
|
storage_threads = convoy.data.ingress_data(
|
|
|
|
batch_client, config, rls=None, kind='storage')
|
2016-08-31 12:21:50 +03:00
|
|
|
try:
|
|
|
|
maxtasks = config['pool_specification']['max_tasks_per_node']
|
|
|
|
except KeyError:
|
|
|
|
maxtasks = 1
|
2016-09-17 03:54:49 +03:00
|
|
|
try:
|
|
|
|
internodecomm = config[
|
|
|
|
'pool_specification']['inter_node_communication_enabled']
|
|
|
|
except KeyError:
|
|
|
|
internodecomm = False
|
2016-08-29 22:07:39 +03:00
|
|
|
# cascade settings
|
2016-08-25 23:07:57 +03:00
|
|
|
try:
|
2016-09-01 01:35:33 +03:00
|
|
|
perf = config['batch_shipyard']['store_timing_metrics']
|
2016-08-25 23:07:57 +03:00
|
|
|
except KeyError:
|
|
|
|
perf = False
|
2016-08-19 01:17:04 +03:00
|
|
|
# peer-to-peer settings
|
2016-07-18 18:19:36 +03:00
|
|
|
try:
|
2016-08-13 01:31:05 +03:00
|
|
|
p2p = config['data_replication']['peer_to_peer']['enabled']
|
2016-07-18 18:19:36 +03:00
|
|
|
except KeyError:
|
2016-09-01 08:16:22 +03:00
|
|
|
p2p = False
|
2016-08-17 18:30:53 +03:00
|
|
|
if p2p:
|
|
|
|
nonp2pcd = False
|
|
|
|
try:
|
|
|
|
p2psbias = config['data_replication'][
|
|
|
|
'peer_to_peer']['direct_download_seed_bias']
|
2016-08-25 23:07:57 +03:00
|
|
|
if p2psbias is None or p2psbias < 1:
|
|
|
|
raise KeyError()
|
2016-08-17 18:30:53 +03:00
|
|
|
except KeyError:
|
2016-08-27 08:56:00 +03:00
|
|
|
p2psbias = vm_count // 10
|
2016-08-25 23:07:57 +03:00
|
|
|
if p2psbias < 1:
|
|
|
|
p2psbias = 1
|
2016-08-17 18:30:53 +03:00
|
|
|
try:
|
|
|
|
p2pcomp = config[
|
|
|
|
'data_replication']['peer_to_peer']['compression']
|
|
|
|
except KeyError:
|
|
|
|
p2pcomp = True
|
|
|
|
else:
|
2016-08-24 00:50:17 +03:00
|
|
|
p2psbias = 0
|
|
|
|
p2pcomp = False
|
2016-08-13 01:31:05 +03:00
|
|
|
try:
|
|
|
|
nonp2pcd = config[
|
|
|
|
'data_replication']['non_peer_to_peer_concurrent_downloading']
|
|
|
|
except KeyError:
|
|
|
|
nonp2pcd = True
|
2016-08-19 01:17:04 +03:00
|
|
|
# private registry settings
|
2016-07-18 18:19:36 +03:00
|
|
|
try:
|
2016-08-12 19:21:33 +03:00
|
|
|
pcont = config['docker_registry']['private']['container']
|
2016-08-24 00:50:17 +03:00
|
|
|
pregpubpull = config['docker_registry']['private'][
|
|
|
|
'allow_public_docker_hub_pull_on_missing']
|
2016-08-24 01:18:14 +03:00
|
|
|
preg = config['docker_registry']['private']['enabled']
|
2016-07-18 18:19:36 +03:00
|
|
|
except KeyError:
|
|
|
|
preg = False
|
2016-08-24 00:50:17 +03:00
|
|
|
pregpubpull = False
|
|
|
|
# create private registry flags
|
2016-10-10 07:01:11 +03:00
|
|
|
regfile = convoy.storage.get_registry_file()
|
2016-08-19 01:17:04 +03:00
|
|
|
if preg:
|
2016-10-10 07:01:11 +03:00
|
|
|
preg = ' -r {}:{}:{}'.format(pcont, regfile[0], regfile[2])
|
2016-08-19 01:17:04 +03:00
|
|
|
else:
|
|
|
|
preg = ''
|
2016-08-24 00:50:17 +03:00
|
|
|
# create torrent flags
|
|
|
|
torrentflags = ' -t {}:{}:{}:{}:{}'.format(
|
|
|
|
p2p, nonp2pcd, p2psbias, p2pcomp, pregpubpull)
|
2016-08-19 01:17:04 +03:00
|
|
|
# docker settings
|
2016-07-18 18:19:36 +03:00
|
|
|
try:
|
2016-08-12 19:21:33 +03:00
|
|
|
dockeruser = config['docker_registry']['login']['username']
|
|
|
|
dockerpw = config['docker_registry']['login']['password']
|
2016-07-18 18:19:36 +03:00
|
|
|
except KeyError:
|
|
|
|
dockeruser = None
|
|
|
|
dockerpw = None
|
2016-08-29 22:07:39 +03:00
|
|
|
try:
|
2016-09-01 01:35:33 +03:00
|
|
|
use_shipyard_docker_image = config[
|
|
|
|
'batch_shipyard']['use_shipyard_docker_image']
|
2016-08-29 22:07:39 +03:00
|
|
|
except KeyError:
|
|
|
|
use_shipyard_docker_image = True
|
|
|
|
try:
|
2016-09-02 11:42:54 +03:00
|
|
|
block_for_gr = config[
|
|
|
|
'pool_specification']['block_until_all_global_resources_loaded']
|
2016-08-29 22:07:39 +03:00
|
|
|
except KeyError:
|
|
|
|
block_for_gr = True
|
|
|
|
if block_for_gr:
|
|
|
|
block_for_gr = ','.join(
|
|
|
|
[r for r in config['global_resources']['docker_images']])
|
2016-10-13 20:53:53 +03:00
|
|
|
try:
|
|
|
|
hpnssh = config['pool_specification']['ssh']['hpn_server_swap']
|
|
|
|
except KeyError:
|
|
|
|
hpnssh = False
|
2016-09-15 22:47:43 +03:00
|
|
|
# check shared data volume mounts
|
2016-08-20 01:13:33 +03:00
|
|
|
azurefile_vd = False
|
2016-09-15 22:47:43 +03:00
|
|
|
gluster = False
|
2016-08-20 01:13:33 +03:00
|
|
|
try:
|
|
|
|
shared_data_volumes = config[
|
|
|
|
'global_resources']['docker_volumes']['shared_data_volumes']
|
|
|
|
for key in shared_data_volumes:
|
|
|
|
if shared_data_volumes[key]['volume_driver'] == 'azurefile':
|
|
|
|
azurefile_vd = True
|
2016-09-15 22:47:43 +03:00
|
|
|
elif shared_data_volumes[key]['volume_driver'] == 'glusterfs':
|
|
|
|
gluster = True
|
2016-08-20 01:13:33 +03:00
|
|
|
except KeyError:
|
|
|
|
pass
|
2016-08-19 01:17:04 +03:00
|
|
|
# prefix settings
|
2016-07-18 18:19:36 +03:00
|
|
|
try:
|
2016-09-01 01:35:33 +03:00
|
|
|
prefix = config['batch_shipyard']['storage_entity_prefix']
|
2016-07-18 18:19:36 +03:00
|
|
|
if len(prefix) == 0:
|
|
|
|
prefix = None
|
|
|
|
except KeyError:
|
|
|
|
prefix = None
|
2016-08-20 01:13:33 +03:00
|
|
|
# create resource files list
|
2016-10-17 00:42:25 +03:00
|
|
|
_rflist = [_NODEPREP_FILE, _JOBPREP_FILE, _BLOBXFER_FILE, regfile]
|
2016-08-27 01:03:56 +03:00
|
|
|
if not use_shipyard_docker_image:
|
2016-08-27 08:56:00 +03:00
|
|
|
_rflist.append(_CASCADE_FILE)
|
|
|
|
_rflist.append(_SETUP_PR_FILE)
|
2016-08-27 01:03:56 +03:00
|
|
|
if perf:
|
|
|
|
_rflist.append(_PERF_FILE)
|
2016-10-13 20:53:53 +03:00
|
|
|
if hpnssh:
|
|
|
|
_rflist.append(_HPNSSH_FILE)
|
2016-08-20 01:13:33 +03:00
|
|
|
# handle azurefile docker volume driver
|
|
|
|
if azurefile_vd:
|
|
|
|
afbin, afsrv, afenv, afvc = setup_azurefile_volume_driver(
|
|
|
|
blob_client, config)
|
|
|
|
_rflist.append((str(afbin.name), str(afbin)))
|
|
|
|
_rflist.append((str(afsrv.name), str(afsrv)))
|
|
|
|
_rflist.append((str(afenv.name), str(afenv)))
|
|
|
|
_rflist.append((str(afvc.name), str(afvc)))
|
2016-09-02 11:42:54 +03:00
|
|
|
# gpu settings
|
|
|
|
if (vm_size.lower().startswith('standard_nc') or
|
|
|
|
vm_size.lower().startswith('standard_nv')):
|
|
|
|
gpupkg = setup_nvidia_docker_package(blob_client, config)
|
|
|
|
_rflist.append((str(gpupkg.name), str(gpupkg)))
|
2016-09-09 07:06:21 +03:00
|
|
|
gpu_env = '{}:{}:{}'.format(
|
2016-09-02 11:42:54 +03:00
|
|
|
vm_size.lower().startswith('standard_nv'),
|
|
|
|
_NVIDIA_DRIVER,
|
|
|
|
gpupkg.name)
|
|
|
|
else:
|
|
|
|
gpu_env = None
|
|
|
|
# pick latest sku
|
|
|
|
node_agent_skus = batch_client.account.list_node_agent_skus()
|
|
|
|
skus_to_use = [
|
|
|
|
(nas, image_ref) for nas in node_agent_skus for image_ref in sorted(
|
|
|
|
nas.verified_image_references, key=lambda item: item.sku)
|
|
|
|
if image_ref.publisher.lower() == publisher.lower() and
|
|
|
|
image_ref.offer.lower() == offer.lower() and
|
|
|
|
image_ref.sku.lower() == sku.lower()
|
|
|
|
]
|
|
|
|
sku_to_use, image_ref_to_use = skus_to_use[-1]
|
2016-07-18 18:19:36 +03:00
|
|
|
# upload resource files
|
2016-10-10 07:01:11 +03:00
|
|
|
sas_urls = convoy.storage.upload_resource_files(
|
|
|
|
blob_client, config, _rflist)
|
2016-08-20 01:13:33 +03:00
|
|
|
del _rflist
|
2016-08-24 00:50:17 +03:00
|
|
|
# create start task commandline
|
|
|
|
start_task = [
|
2016-10-20 07:10:40 +03:00
|
|
|
'{} -o {} -s {}{}{}{}{}{}{}{}{}{}{}{}'.format(
|
2016-08-24 00:50:17 +03:00
|
|
|
_NODEPREP_FILE[0],
|
|
|
|
offer,
|
|
|
|
sku,
|
|
|
|
preg,
|
|
|
|
torrentflags,
|
|
|
|
' -a' if azurefile_vd else '',
|
2016-09-02 11:42:54 +03:00
|
|
|
' -b {}'.format(block_for_gr) if block_for_gr else '',
|
2016-08-27 01:03:56 +03:00
|
|
|
' -d' if use_shipyard_docker_image else '',
|
2016-10-20 07:10:40 +03:00
|
|
|
' -e {}'.format(encrypt_sha1tp) if encrypt else '',
|
2016-09-15 22:47:43 +03:00
|
|
|
' -f' if gluster else '',
|
2016-09-02 11:42:54 +03:00
|
|
|
' -g {}'.format(gpu_env) if gpu_env is not None else '',
|
2016-09-08 07:16:27 +03:00
|
|
|
' -n' if vm_size.lower() not in _VM_TCP_NO_TUNE else '',
|
|
|
|
' -p {}'.format(prefix) if prefix else '',
|
2016-10-13 20:53:53 +03:00
|
|
|
' -w' if hpnssh else '',
|
2016-08-24 00:50:17 +03:00
|
|
|
),
|
|
|
|
]
|
2016-10-14 23:59:19 +03:00
|
|
|
# add additional start task commands
|
2016-08-24 00:50:17 +03:00
|
|
|
try:
|
|
|
|
start_task.extend(
|
|
|
|
config['pool_specification']['additional_node_prep_commands'])
|
|
|
|
except KeyError:
|
|
|
|
pass
|
2016-10-14 23:59:19 +03:00
|
|
|
# digest any input_data
|
|
|
|
addlcmds = convoy.data.process_input_data(
|
2016-10-17 00:42:25 +03:00
|
|
|
config, _BLOBXFER_FILE, config['pool_specification'])
|
2016-10-14 23:59:19 +03:00
|
|
|
if addlcmds is not None:
|
|
|
|
start_task.append(addlcmds)
|
|
|
|
del addlcmds
|
2016-07-18 18:19:36 +03:00
|
|
|
# create pool param
|
|
|
|
pool = batchmodels.PoolAddParameter(
|
2016-08-19 21:28:29 +03:00
|
|
|
id=config['pool_specification']['id'],
|
2016-07-18 18:19:36 +03:00
|
|
|
virtual_machine_configuration=batchmodels.VirtualMachineConfiguration(
|
|
|
|
image_reference=image_ref_to_use,
|
|
|
|
node_agent_sku_id=sku_to_use.id),
|
2016-09-02 11:42:54 +03:00
|
|
|
vm_size=vm_size,
|
2016-08-27 08:56:00 +03:00
|
|
|
target_dedicated=vm_count,
|
2016-08-31 12:21:50 +03:00
|
|
|
max_tasks_per_node=maxtasks,
|
2016-09-17 03:54:49 +03:00
|
|
|
enable_inter_node_communication=internodecomm,
|
2016-07-18 18:19:36 +03:00
|
|
|
start_task=batchmodels.StartTask(
|
2016-10-10 01:22:15 +03:00
|
|
|
command_line=convoy.util.wrap_commands_in_shell(
|
|
|
|
start_task, wait=False),
|
2016-07-18 18:19:36 +03:00
|
|
|
run_elevated=True,
|
|
|
|
wait_for_success=True,
|
|
|
|
environment_settings=[
|
|
|
|
batchmodels.EnvironmentSetting('LC_ALL', 'en_US.UTF-8'),
|
2016-08-24 00:50:17 +03:00
|
|
|
batchmodels.EnvironmentSetting(
|
2016-10-21 07:18:31 +03:00
|
|
|
'SHIPYARD_STORAGE_ENV',
|
2016-10-20 07:10:40 +03:00
|
|
|
convoy.crypto.encrypt_string(
|
|
|
|
encrypt, '{}:{}:{}'.format(
|
|
|
|
convoy.storage.get_storageaccount(),
|
|
|
|
convoy.storage.get_storageaccount_endpoint(),
|
|
|
|
convoy.storage.get_storageaccount_key()),
|
|
|
|
config)
|
2016-08-24 01:18:14 +03:00
|
|
|
)
|
2016-07-18 18:19:36 +03:00
|
|
|
],
|
|
|
|
resource_files=[],
|
|
|
|
),
|
|
|
|
)
|
2016-10-20 07:10:40 +03:00
|
|
|
if encrypt:
|
|
|
|
pool.certificate_references = [
|
|
|
|
batchmodels.CertificateReference(
|
|
|
|
encrypt_sha1tp, 'sha1',
|
|
|
|
visibility=[batchmodels.CertificateVisibility.starttask]
|
|
|
|
)
|
|
|
|
]
|
2016-07-18 18:19:36 +03:00
|
|
|
for rf in sas_urls:
|
|
|
|
pool.start_task.resource_files.append(
|
|
|
|
batchmodels.ResourceFile(
|
|
|
|
file_path=rf,
|
|
|
|
blob_source=sas_urls[rf])
|
|
|
|
)
|
2016-09-02 11:42:54 +03:00
|
|
|
if gpu_env:
|
|
|
|
pool.start_task.resource_files.append(
|
|
|
|
batchmodels.ResourceFile(
|
|
|
|
file_path=_NVIDIA_DRIVER,
|
|
|
|
blob_source=config[
|
|
|
|
'pool_specification']['gpu']['nvidia_driver']['source'],
|
|
|
|
file_mode='0755')
|
|
|
|
)
|
2016-07-18 18:19:36 +03:00
|
|
|
if preg:
|
2016-08-24 01:18:14 +03:00
|
|
|
ssel = config['docker_registry']['private']['storage_account_settings']
|
2016-07-18 18:19:36 +03:00
|
|
|
pool.start_task.environment_settings.append(
|
|
|
|
batchmodels.EnvironmentSetting(
|
2016-10-21 07:18:31 +03:00
|
|
|
'SHIPYARD_PRIVATE_REGISTRY_STORAGE_ENV',
|
2016-10-20 07:10:40 +03:00
|
|
|
convoy.crypto.encrypt_string(
|
|
|
|
encrypt, '{}:{}:{}'.format(
|
|
|
|
config['credentials']['storage'][ssel]['account'],
|
|
|
|
config['credentials']['storage'][ssel]['endpoint'],
|
|
|
|
config['credentials']['storage'][ssel]['account_key']),
|
|
|
|
config)
|
2016-08-24 01:18:14 +03:00
|
|
|
)
|
2016-07-18 18:19:36 +03:00
|
|
|
)
|
2016-08-24 01:18:14 +03:00
|
|
|
del ssel
|
2016-07-18 18:19:36 +03:00
|
|
|
if (dockeruser is not None and len(dockeruser) > 0 and
|
|
|
|
dockerpw is not None and len(dockerpw) > 0):
|
|
|
|
pool.start_task.environment_settings.append(
|
|
|
|
batchmodels.EnvironmentSetting('DOCKER_LOGIN_USERNAME', dockeruser)
|
|
|
|
)
|
|
|
|
pool.start_task.environment_settings.append(
|
2016-10-20 07:10:40 +03:00
|
|
|
batchmodels.EnvironmentSetting(
|
|
|
|
'DOCKER_LOGIN_PASSWORD',
|
|
|
|
convoy.crypto.encrypt_string(encrypt, dockerpw, config))
|
2016-07-18 18:19:36 +03:00
|
|
|
)
|
2016-08-25 23:07:57 +03:00
|
|
|
if perf:
|
|
|
|
pool.start_task.environment_settings.append(
|
2016-10-21 07:18:31 +03:00
|
|
|
batchmodels.EnvironmentSetting('SHIPYARD_TIMING', '1')
|
2016-08-25 23:07:57 +03:00
|
|
|
)
|
2016-10-10 01:22:15 +03:00
|
|
|
# create pool
|
|
|
|
nodes = convoy.batch.create_pool(batch_client, config, pool)
|
2016-09-15 22:47:43 +03:00
|
|
|
# set up gluster if specified
|
|
|
|
if gluster:
|
2016-10-24 19:12:24 +03:00
|
|
|
_setup_glusterfs(
|
|
|
|
batch_client, blob_client, config, nodes, _GLUSTERPREP_FILE,
|
|
|
|
cmdline=None)
|
2016-08-25 03:44:43 +03:00
|
|
|
# create admin user on each node if requested
|
2016-10-10 01:22:15 +03:00
|
|
|
convoy.batch.add_ssh_user(batch_client, config, nodes)
|
2016-08-25 03:44:43 +03:00
|
|
|
# log remote login settings
|
2016-10-10 01:22:15 +03:00
|
|
|
rls = convoy.batch.get_remote_login_settings(batch_client, config, nodes)
|
2016-10-14 19:36:38 +03:00
|
|
|
# ingress data to shared fs if specified
|
2016-10-09 21:37:29 +03:00
|
|
|
if ingress_files:
|
2016-10-14 19:36:38 +03:00
|
|
|
convoy.data.ingress_data(batch_client, config, rls=rls, kind='shared')
|
|
|
|
# wait for storage ingress processes
|
|
|
|
convoy.data.wait_for_storage_threads(storage_threads)
|
2016-08-25 01:35:17 +03:00
|
|
|
|
|
|
|
|
2016-10-24 19:12:24 +03:00
|
|
|
def _setup_glusterfs(
|
|
|
|
batch_client, blob_client, config, nodes, shell_script, cmdline=None):
|
2016-09-15 22:47:43 +03:00
|
|
|
# type: (batch.BatchServiceClient, azureblob.BlockBlobService, dict,
|
2016-10-24 19:12:24 +03:00
|
|
|
# List[batchmodels.ComputeNode], str, str) -> None
|
2016-09-15 22:47:43 +03:00
|
|
|
"""Setup glusterfs via multi-instance task
|
|
|
|
:param batch_client: The batch client to use.
|
2016-10-14 00:11:52 +03:00
|
|
|
:type batch_client: `azure.batch.batch_service_client.BatchServiceClient`
|
2016-09-15 22:47:43 +03:00
|
|
|
:param azure.storage.blob.BlockBlobService blob_client: blob client
|
|
|
|
:param dict config: configuration dict
|
|
|
|
:param list nodes: list of nodes
|
2016-10-24 19:12:24 +03:00
|
|
|
:param str shell_script: glusterfs setup script to use
|
|
|
|
:param str cmdline: coordination cmdline
|
2016-09-15 22:47:43 +03:00
|
|
|
"""
|
2016-10-24 19:12:24 +03:00
|
|
|
# get volume type/options
|
|
|
|
voltype = 'replica'
|
|
|
|
volopts = None
|
|
|
|
shared_data_volumes = config[
|
|
|
|
'global_resources']['docker_volumes']['shared_data_volumes']
|
|
|
|
for key in shared_data_volumes:
|
|
|
|
try:
|
|
|
|
if shared_data_volumes[key]['volume_driver'] == 'glusterfs':
|
|
|
|
voltype = shared_data_volumes[key]['volume_type']
|
|
|
|
volopts = shared_data_volumes[key]['volume_options']
|
|
|
|
except KeyError:
|
|
|
|
pass
|
|
|
|
if volopts is not None and len(volopts) == 0:
|
|
|
|
volopts = None
|
2016-09-15 22:47:43 +03:00
|
|
|
pool_id = config['pool_specification']['id']
|
|
|
|
job_id = 'shipyard-glusterfs-{}'.format(uuid.uuid4())
|
|
|
|
job = batchmodels.JobAddParameter(
|
|
|
|
id=job_id,
|
|
|
|
pool_info=batchmodels.PoolInformation(pool_id=pool_id),
|
|
|
|
)
|
|
|
|
batch_client.job.add(job)
|
2016-10-24 19:12:24 +03:00
|
|
|
# create coordination command line
|
|
|
|
if cmdline is None:
|
|
|
|
if config['pool_specification']['offer'].lower() == 'ubuntuserver':
|
|
|
|
tempdisk = '/mnt'
|
|
|
|
else:
|
|
|
|
tempdisk = '/mnt/resource'
|
|
|
|
cmdline = convoy.util.wrap_commands_in_shell([
|
|
|
|
'$AZ_BATCH_TASK_DIR/{} {} {}'.format(
|
|
|
|
shell_script[0], voltype.lower(), tempdisk)])
|
|
|
|
# create application command line
|
|
|
|
appcmd = [
|
|
|
|
'[[ -f $AZ_BATCH_TASK_DIR/.glusterfs_success ]] || exit 1',
|
|
|
|
]
|
|
|
|
if volopts is not None:
|
|
|
|
for vo in volopts:
|
|
|
|
appcmd.append('gluster volume set gv0 {}'.format(vo))
|
2016-09-15 22:47:43 +03:00
|
|
|
# upload script
|
2016-10-10 07:01:11 +03:00
|
|
|
sas_urls = convoy.storage.upload_resource_files(
|
2016-10-24 19:12:24 +03:00
|
|
|
blob_client, config, [shell_script])
|
2016-09-15 22:47:43 +03:00
|
|
|
batchtask = batchmodels.TaskAddParameter(
|
|
|
|
id='gluster-setup',
|
|
|
|
multi_instance_settings=batchmodels.MultiInstanceSettings(
|
|
|
|
number_of_instances=config['pool_specification']['vm_count'],
|
2016-10-24 19:12:24 +03:00
|
|
|
coordination_command_line=cmdline,
|
2016-09-15 22:47:43 +03:00
|
|
|
common_resource_files=[
|
|
|
|
batchmodels.ResourceFile(
|
2016-10-24 19:12:24 +03:00
|
|
|
file_path=shell_script[0],
|
|
|
|
blob_source=sas_urls[shell_script[0]],
|
2016-09-15 22:47:43 +03:00
|
|
|
file_mode='0755'),
|
|
|
|
],
|
|
|
|
),
|
2016-10-24 19:12:24 +03:00
|
|
|
command_line=convoy.util.wrap_commands_in_shell(appcmd),
|
2016-09-15 22:47:43 +03:00
|
|
|
run_elevated=True,
|
|
|
|
)
|
|
|
|
batch_client.task.add(job_id=job_id, task=batchtask)
|
|
|
|
logger.debug(
|
|
|
|
'waiting for glusterfs setup task {} in job {} to complete'.format(
|
|
|
|
batchtask.id, job_id))
|
|
|
|
# wait for gluster fs setup task to complete
|
|
|
|
while True:
|
|
|
|
batchtask = batch_client.task.get(job_id, batchtask.id)
|
|
|
|
if batchtask.state == batchmodels.TaskState.completed:
|
|
|
|
break
|
|
|
|
time.sleep(1)
|
|
|
|
# ensure all nodes have glusterfs success file
|
|
|
|
if nodes is None:
|
|
|
|
nodes = batch_client.compute_node.list(pool_id)
|
|
|
|
success = True
|
|
|
|
for node in nodes:
|
|
|
|
try:
|
|
|
|
batch_client.file.get_node_file_properties_from_compute_node(
|
|
|
|
pool_id, node.id,
|
|
|
|
('workitems/{}/job-1/gluster-setup/wd/'
|
|
|
|
'.glusterfs_success').format(job_id))
|
2016-09-29 06:50:53 +03:00
|
|
|
except batchmodels.BatchErrorException:
|
|
|
|
logger.error('gluster success file absent on node {}'.format(
|
|
|
|
node.id))
|
2016-09-15 22:47:43 +03:00
|
|
|
success = False
|
|
|
|
break
|
|
|
|
# delete job
|
|
|
|
batch_client.job.delete(job_id)
|
|
|
|
if not success:
|
|
|
|
raise RuntimeError('glusterfs setup failed')
|
|
|
|
logger.info(
|
|
|
|
'glusterfs setup task {} in job {} completed'.format(
|
|
|
|
batchtask.id, job_id))
|
|
|
|
|
|
|
|
|
2016-10-24 19:12:24 +03:00
|
|
|
def _resize_pool(batch_client, blob_client, config):
|
|
|
|
# type: (batch.BatchServiceClient, azureblob.BlockBlobService,
|
|
|
|
# dict) -> None
|
|
|
|
"""Resize pool that may contain glusterfs
|
|
|
|
:param batch_client: The batch client to use.
|
|
|
|
:type batch_client: `azure.batch.batch_service_client.BatchServiceClient`
|
|
|
|
:param azure.storage.blob.BlockBlobService blob_client: blob client
|
|
|
|
:param dict config: configuration dict
|
|
|
|
"""
|
|
|
|
pool_id = config['pool_specification']['id']
|
|
|
|
# check if this is a glusterfs-enabled pool
|
|
|
|
voltype = 'replica'
|
|
|
|
old_nodes = {}
|
|
|
|
try:
|
|
|
|
for svkey in config[
|
|
|
|
'global_resources']['docker_volumes']['shared_data_volumes']:
|
|
|
|
conf = ['global_resources']['docker_volumes'][
|
|
|
|
'shared_data_volumes'][svkey]
|
|
|
|
if conf['volume_driver'] == 'glusterfs':
|
|
|
|
gluster_present = True
|
|
|
|
try:
|
|
|
|
voltype = conf['volume_type']
|
|
|
|
except KeyError:
|
|
|
|
pass
|
|
|
|
break
|
|
|
|
except KeyError:
|
|
|
|
gluster_present = False
|
|
|
|
logger.debug('glusterfs shared volume present: {}'.format(
|
|
|
|
gluster_present))
|
|
|
|
if gluster_present:
|
|
|
|
for node in batch_client.compute_node.list(pool_id):
|
|
|
|
old_nodes[node.id] = node.ip_address
|
|
|
|
# resize pool
|
|
|
|
convoy.batch.resize_pool(batch_client, config)
|
|
|
|
# add brick for new nodes
|
|
|
|
if gluster_present:
|
|
|
|
# wait for nodes to reach idle
|
|
|
|
nodes = convoy.batch.wait_for_pool_ready(
|
|
|
|
batch_client, config, pool_id)
|
|
|
|
# get internal ip addresses of new nodes
|
|
|
|
new_nodes = [
|
|
|
|
node.ip_address for node in nodes if node.id not in old_nodes
|
|
|
|
]
|
|
|
|
masterip = next(iter(old_nodes.values()))
|
|
|
|
# get tempdisk mountpoint
|
|
|
|
if config['pool_specification']['offer'].lower() == 'ubuntuserver':
|
|
|
|
tempdisk = '/mnt'
|
|
|
|
else:
|
|
|
|
tempdisk = '/mnt/resource'
|
|
|
|
# construct cmdline
|
|
|
|
vm_count = config['pool_specification']['vm_count']
|
|
|
|
cmdline = convoy.util.wrap_commands_in_shell([
|
|
|
|
'$AZ_BATCH_TASK_DIR/{} {} {} {} {} {}'.format(
|
|
|
|
_GLUSTERRESIZE_FILE[0], voltype.lower(), tempdisk, vm_count,
|
|
|
|
masterip, ' '.join(new_nodes))])
|
|
|
|
# setup gluster
|
|
|
|
_setup_glusterfs(
|
|
|
|
batch_client, blob_client, config, nodes, _GLUSTERRESIZE_FILE,
|
|
|
|
cmdline=cmdline)
|
|
|
|
|
|
|
|
|
2016-08-28 03:27:36 +03:00
|
|
|
def _adjust_settings_for_pool_creation(config):
|
|
|
|
# type: (dict) -> None
|
|
|
|
"""Adjust settings for pool creation
|
|
|
|
:param dict config: configuration dict
|
|
|
|
"""
|
2016-08-29 05:43:53 +03:00
|
|
|
publisher = config['pool_specification']['publisher'].lower()
|
|
|
|
offer = config['pool_specification']['offer'].lower()
|
|
|
|
sku = config['pool_specification']['sku'].lower()
|
2016-09-02 11:42:54 +03:00
|
|
|
vm_size = config['pool_specification']['vm_size']
|
2016-08-29 05:43:53 +03:00
|
|
|
# enforce publisher/offer/sku restrictions
|
|
|
|
allowed = False
|
|
|
|
shipyard_container_required = True
|
|
|
|
if publisher == 'canonical':
|
|
|
|
if offer == 'ubuntuserver':
|
|
|
|
if sku >= '14.04.0-lts':
|
|
|
|
allowed = True
|
|
|
|
if sku >= '16.04.0-lts':
|
|
|
|
shipyard_container_required = False
|
|
|
|
elif publisher == 'credativ':
|
|
|
|
if offer == 'debian':
|
|
|
|
if sku >= '8':
|
|
|
|
allowed = True
|
|
|
|
elif publisher == 'openlogic':
|
|
|
|
if offer.startswith('centos'):
|
|
|
|
if sku >= '7':
|
|
|
|
allowed = True
|
|
|
|
elif publisher == 'redhat':
|
|
|
|
if offer == 'rhel':
|
|
|
|
if sku >= '7':
|
|
|
|
allowed = True
|
|
|
|
elif publisher == 'suse':
|
|
|
|
if offer.startswith('sles'):
|
2016-10-01 08:00:16 +03:00
|
|
|
if sku >= '12-sp1':
|
2016-08-29 05:43:53 +03:00
|
|
|
allowed = True
|
|
|
|
elif offer == 'opensuse-leap':
|
|
|
|
if sku >= '42':
|
|
|
|
allowed = True
|
|
|
|
elif offer == 'opensuse':
|
|
|
|
if sku == '13.2':
|
|
|
|
allowed = True
|
2016-09-02 11:42:54 +03:00
|
|
|
# check for valid image if gpu, currently only ubuntu 16.04 is supported
|
|
|
|
if ((vm_size.lower().startswith('standard_nc') or
|
|
|
|
vm_size.lower().startswith('standard_nv')) and
|
|
|
|
(publisher != 'canonical' and offer != 'ubuntuserver' and
|
|
|
|
sku < '16.04.0-lts')):
|
|
|
|
allowed = False
|
2016-08-29 05:43:53 +03:00
|
|
|
# oracle linux is not supported due to UEKR4 requirement
|
|
|
|
if not allowed:
|
|
|
|
raise ValueError(
|
|
|
|
('Unsupported Docker Host VM Config, publisher={} offer={} '
|
2016-09-02 11:42:54 +03:00
|
|
|
'sku={} vm_size={}').format(publisher, offer, sku, vm_size))
|
2016-08-29 05:43:53 +03:00
|
|
|
# adjust for shipyard container requirement
|
|
|
|
if shipyard_container_required:
|
2016-09-01 01:35:33 +03:00
|
|
|
config['batch_shipyard']['use_shipyard_docker_image'] = True
|
2016-08-29 05:43:53 +03:00
|
|
|
logger.warning(
|
|
|
|
('forcing shipyard docker image to be used due to '
|
|
|
|
'VM config, publisher={} offer={} sku={}').format(
|
|
|
|
publisher, offer, sku))
|
|
|
|
# adjust inter node comm setting
|
2016-08-28 03:27:36 +03:00
|
|
|
vm_count = int(config['pool_specification']['vm_count'])
|
|
|
|
try:
|
|
|
|
p2p = config['data_replication']['peer_to_peer']['enabled']
|
|
|
|
except KeyError:
|
2016-09-01 08:16:22 +03:00
|
|
|
p2p = False
|
|
|
|
try:
|
|
|
|
internode = config[
|
|
|
|
'pool_specification']['inter_node_communication_enabled']
|
|
|
|
except KeyError:
|
2016-10-21 18:54:18 +03:00
|
|
|
internode = False
|
2016-08-29 05:43:53 +03:00
|
|
|
max_vms = 20 if publisher == 'microsoftwindowsserver' else 40
|
2016-09-01 08:16:22 +03:00
|
|
|
if vm_count > max_vms:
|
|
|
|
if p2p:
|
|
|
|
logger.warning(
|
|
|
|
('disabling peer-to-peer transfer as pool size of {} exceeds '
|
|
|
|
'max limit of {} vms for inter-node communication').format(
|
|
|
|
vm_count, max_vms))
|
|
|
|
if 'data_replication' not in config:
|
|
|
|
config['data_replication'] = {}
|
|
|
|
if 'peer_to_peer' not in config['data_replication']:
|
|
|
|
config['data_replication']['peer_to_peer'] = {}
|
|
|
|
config['data_replication']['peer_to_peer']['enabled'] = False
|
|
|
|
p2p = False
|
|
|
|
if internode:
|
2016-10-21 18:54:18 +03:00
|
|
|
internode = False
|
2016-09-01 08:16:22 +03:00
|
|
|
logger.warning(
|
|
|
|
('disabling inter-node communication as pool size of {} '
|
|
|
|
'exceeds max limit of {} vms for setting').format(
|
|
|
|
vm_count, max_vms))
|
|
|
|
config['pool_specification'][
|
2016-10-21 18:54:18 +03:00
|
|
|
'inter_node_communication_enabled'] = internode
|
2016-09-01 08:16:22 +03:00
|
|
|
# ensure settings p2p/internode settings are compatible
|
|
|
|
if p2p and not internode:
|
2016-10-21 18:54:18 +03:00
|
|
|
internode = True
|
|
|
|
config['pool_specification'][
|
|
|
|
'inter_node_communication_enabled'] = internode
|
2016-08-28 03:27:36 +03:00
|
|
|
logger.warning(
|
2016-09-01 08:16:22 +03:00
|
|
|
'force enabling inter-node communication due to peer-to-peer '
|
|
|
|
'transfer')
|
2016-10-06 21:03:10 +03:00
|
|
|
# hpn-ssh can only be used for Ubuntu currently
|
|
|
|
try:
|
|
|
|
if (config['pool_specification']['ssh']['hpn_server_swap'] and
|
|
|
|
publisher != 'canonical' and offer != 'ubuntuserver'):
|
|
|
|
logger.warning('cannot enable HPN SSH swap on {} {} {}'.format(
|
|
|
|
publisher, offer, sku))
|
|
|
|
config['pool_specification']['ssh']['hpn_server_swap'] = False
|
|
|
|
except KeyError:
|
|
|
|
pass
|
2016-09-16 23:00:05 +03:00
|
|
|
# adjust ssh settings on windows
|
2016-10-10 01:22:15 +03:00
|
|
|
if convoy.util.on_windows():
|
2016-09-16 23:00:05 +03:00
|
|
|
try:
|
2016-10-06 21:03:10 +03:00
|
|
|
ssh_pub_key = config['pool_specification']['ssh']['ssh_public_key']
|
2016-09-16 23:00:05 +03:00
|
|
|
except KeyError:
|
|
|
|
ssh_pub_key = None
|
|
|
|
if ssh_pub_key is None:
|
|
|
|
logger.warning(
|
2016-10-06 21:03:10 +03:00
|
|
|
'disabling ssh user creation due to script being run '
|
|
|
|
'from Windows and no public key is specified')
|
|
|
|
config['pool_specification'].pop('ssh', None)
|
2016-10-21 18:54:18 +03:00
|
|
|
# glusterfs requires internode comms
|
2016-10-24 19:12:24 +03:00
|
|
|
try:
|
|
|
|
num_gluster = 0
|
|
|
|
shared = config['global_resources']['docker_volumes'][
|
|
|
|
'shared_data_volumes']
|
|
|
|
for sdvkey in shared:
|
|
|
|
if shared[sdvkey]['volume_driver'] == 'glusterfs':
|
|
|
|
if not internode:
|
2016-10-21 18:54:18 +03:00
|
|
|
# do not modify value and proceed since this interplays
|
|
|
|
# with p2p settings, simply raise exception and force
|
|
|
|
# user to reconfigure
|
|
|
|
raise ValueError(
|
|
|
|
'inter node communication in pool configuration '
|
|
|
|
'must be enabled for glusterfs')
|
2016-10-24 19:12:24 +03:00
|
|
|
num_gluster += 1
|
|
|
|
try:
|
|
|
|
if shared[sdvkey]['volume_type'] != 'replica':
|
|
|
|
raise ValueError(
|
|
|
|
'only replicated GlusterFS volumes are '
|
|
|
|
'currently supported')
|
|
|
|
except KeyError:
|
|
|
|
pass
|
|
|
|
if num_gluster > 1:
|
|
|
|
raise ValueError(
|
|
|
|
'cannot create more than one GlusterFS volume per pool')
|
|
|
|
except KeyError:
|
|
|
|
pass
|
2016-10-09 21:37:29 +03:00
|
|
|
# ensure file transfer settings
|
|
|
|
try:
|
|
|
|
xfer_files_with_pool = config['pool_specification'][
|
|
|
|
'transfer_files_on_pool_creation']
|
|
|
|
except KeyError:
|
2016-10-11 01:23:10 +03:00
|
|
|
xfer_files_with_pool = False
|
2016-10-09 21:37:29 +03:00
|
|
|
config['pool_specification'][
|
|
|
|
'transfer_files_on_pool_creation'] = xfer_files_with_pool
|
|
|
|
try:
|
|
|
|
files = config['global_resources']['files']
|
|
|
|
shared = False
|
|
|
|
for fdict in files:
|
|
|
|
if 'shared_data_volume' in fdict['destination']:
|
|
|
|
shared = True
|
|
|
|
break
|
2016-10-10 01:22:15 +03:00
|
|
|
if convoy.util.on_windows() and shared and xfer_files_with_pool:
|
2016-10-09 21:37:29 +03:00
|
|
|
raise RuntimeError(
|
|
|
|
'cannot transfer files to shared data volume on Windows')
|
|
|
|
except KeyError:
|
|
|
|
pass
|
|
|
|
# force disable block for global resources if ingressing data
|
|
|
|
try:
|
|
|
|
block_for_gr = config[
|
|
|
|
'pool_specification']['block_until_all_global_resources_loaded']
|
|
|
|
except KeyError:
|
|
|
|
block_for_gr = True
|
|
|
|
if xfer_files_with_pool and block_for_gr:
|
|
|
|
logger.warning(
|
|
|
|
'disabling block until all global resources loaded with '
|
|
|
|
'transfer files on pool creation enabled')
|
|
|
|
config['pool_specification'][
|
|
|
|
'block_until_all_global_resources_loaded'] = False
|
2016-08-28 03:27:36 +03:00
|
|
|
|
|
|
|
|
2016-10-20 07:10:40 +03:00
|
|
|
def _adjust_general_settings(config):
|
|
|
|
# type: (dict) -> None
|
|
|
|
"""Adjust general settings
|
|
|
|
:param dict config: configuration dict
|
|
|
|
"""
|
|
|
|
# adjust encryption settings on windows
|
|
|
|
if convoy.util.on_windows():
|
|
|
|
try:
|
|
|
|
enc = config['batch_shipyard']['encryption']['enabled']
|
|
|
|
except KeyError:
|
|
|
|
enc = False
|
|
|
|
if enc:
|
|
|
|
logger.warning(
|
|
|
|
'disabling credential encryption due to script being run '
|
|
|
|
'from Windows')
|
|
|
|
config['encryption']['enabled'] = False
|
|
|
|
|
|
|
|
|
2016-07-18 18:19:36 +03:00
|
|
|
def main():
|
|
|
|
"""Main function"""
|
|
|
|
# get command-line args
|
|
|
|
args = parseargs()
|
|
|
|
args.action = args.action.lower()
|
|
|
|
|
2016-09-17 00:02:25 +03:00
|
|
|
if args.configdir is not None:
|
|
|
|
if args.credentials is None:
|
|
|
|
args.credentials = str(pathlib.Path(
|
|
|
|
args.configdir, 'credentials.json'))
|
|
|
|
if args.config is None:
|
|
|
|
args.config = str(pathlib.Path(args.configdir, 'config.json'))
|
2016-09-22 23:27:21 +03:00
|
|
|
if args.pool is None:
|
|
|
|
args.pool = str(pathlib.Path(args.configdir, 'pool.json'))
|
|
|
|
|
|
|
|
if args.credentials is None:
|
|
|
|
raise ValueError('credentials json not specified')
|
|
|
|
if args.config is None:
|
|
|
|
raise ValueError('config json not specified')
|
2016-09-17 00:02:25 +03:00
|
|
|
|
2016-08-24 00:50:17 +03:00
|
|
|
with open(args.credentials, 'r') as f:
|
2016-08-12 19:21:33 +03:00
|
|
|
config = json.load(f)
|
|
|
|
with open(args.config, 'r') as f:
|
2016-10-10 01:22:15 +03:00
|
|
|
config = convoy.util.merge_dict(config, json.load(f))
|
2016-08-24 00:50:17 +03:00
|
|
|
try:
|
|
|
|
with open(args.pool, 'r') as f:
|
2016-10-10 01:22:15 +03:00
|
|
|
config = convoy.util.merge_dict(config, json.load(f))
|
2016-08-31 12:21:50 +03:00
|
|
|
except ValueError:
|
|
|
|
raise
|
2016-08-24 00:50:17 +03:00
|
|
|
except Exception:
|
|
|
|
config['pool_specification'] = {
|
|
|
|
'id': args.poolid
|
|
|
|
}
|
2016-10-14 00:11:52 +03:00
|
|
|
if args.action in (
|
|
|
|
'addjobs', 'cleanmijobs', 'delcleanmijobs', 'deljobs',
|
2016-10-23 05:27:48 +03:00
|
|
|
'deljobswait', 'termjobs', 'listtasks', 'listtaskfiles'):
|
2016-09-17 00:02:25 +03:00
|
|
|
if args.configdir is not None and args.jobs is None:
|
|
|
|
args.jobs = str(pathlib.Path(args.configdir, 'jobs.json'))
|
2016-08-24 00:50:17 +03:00
|
|
|
try:
|
|
|
|
with open(args.jobs, 'r') as f:
|
2016-10-10 01:22:15 +03:00
|
|
|
config = convoy.util.merge_dict(config, json.load(f))
|
2016-08-31 12:21:50 +03:00
|
|
|
except ValueError:
|
|
|
|
raise
|
2016-08-24 00:50:17 +03:00
|
|
|
except Exception:
|
|
|
|
config['job_specifications'] = [{
|
|
|
|
'id': args.jobid
|
|
|
|
}]
|
2016-09-16 21:38:09 +03:00
|
|
|
if args.verbose:
|
|
|
|
logger.debug('config:\n' + json.dumps(config, indent=4))
|
2016-10-14 23:59:19 +03:00
|
|
|
config['_verbose'] = args.verbose
|
2016-08-20 01:13:33 +03:00
|
|
|
_populate_global_settings(config, args.action)
|
2016-09-06 20:32:42 +03:00
|
|
|
config['_auto_confirm'] = args.yes
|
2016-07-18 18:19:36 +03:00
|
|
|
|
|
|
|
batch_client, blob_client, queue_client, table_client = \
|
|
|
|
_create_credentials(config)
|
|
|
|
|
2016-10-20 07:10:40 +03:00
|
|
|
_adjust_general_settings(config)
|
|
|
|
|
2016-07-18 18:19:36 +03:00
|
|
|
if args.action == 'addpool':
|
2016-08-29 05:43:53 +03:00
|
|
|
# first check if pool exists to prevent accidential metadata clear
|
|
|
|
if batch_client.pool.exists(config['pool_specification']['id']):
|
|
|
|
raise RuntimeError(
|
|
|
|
'attempting to create a pool that already exists: {}'.format(
|
|
|
|
config['pool_specification']['id']))
|
2016-10-10 01:22:15 +03:00
|
|
|
convoy.storage.create_storage_containers(
|
2016-10-10 07:01:11 +03:00
|
|
|
blob_client, queue_client, table_client, config)
|
2016-10-10 01:22:15 +03:00
|
|
|
convoy.storage.clear_storage_containers(
|
2016-10-10 07:01:11 +03:00
|
|
|
blob_client, queue_client, table_client, config)
|
2016-08-25 23:07:57 +03:00
|
|
|
_adjust_settings_for_pool_creation(config)
|
2016-10-10 07:01:11 +03:00
|
|
|
convoy.storage.populate_queues(queue_client, table_client, config)
|
2016-07-18 18:19:36 +03:00
|
|
|
add_pool(batch_client, blob_client, config)
|
|
|
|
elif args.action == 'resizepool':
|
2016-10-24 19:12:24 +03:00
|
|
|
_resize_pool(batch_client, blob_client, config)
|
2016-07-18 18:19:36 +03:00
|
|
|
elif args.action == 'delpool':
|
2016-10-26 21:18:52 +03:00
|
|
|
try:
|
|
|
|
convoy.batch.del_pool(batch_client, config)
|
|
|
|
except batchmodels.BatchErrorException as ex:
|
|
|
|
logger.exception(ex)
|
2016-10-13 20:53:53 +03:00
|
|
|
convoy.storage.cleanup_with_del_pool(
|
|
|
|
blob_client, queue_client, table_client, config)
|
2016-08-25 03:44:43 +03:00
|
|
|
elif args.action == 'addsshuser':
|
2016-10-10 01:22:15 +03:00
|
|
|
convoy.batch.add_ssh_user(batch_client, config)
|
|
|
|
convoy.batch.get_remote_login_settings(batch_client, config)
|
2016-08-17 18:30:53 +03:00
|
|
|
elif args.action == 'delnode':
|
2016-10-10 01:22:15 +03:00
|
|
|
convoy.batch.del_node(batch_client, config, args.nodeid)
|
2016-08-24 00:50:17 +03:00
|
|
|
elif args.action == 'addjobs':
|
2016-10-14 23:59:19 +03:00
|
|
|
convoy.batch.add_jobs(
|
2016-10-17 00:42:25 +03:00
|
|
|
batch_client, blob_client, config, _JOBPREP_FILE, _BLOBXFER_FILE)
|
2016-08-31 12:21:50 +03:00
|
|
|
elif args.action == 'cleanmijobs':
|
2016-10-10 01:22:15 +03:00
|
|
|
convoy.batch.clean_mi_jobs(batch_client, config)
|
2016-08-31 12:21:50 +03:00
|
|
|
elif args.action == 'termjobs':
|
2016-10-10 01:22:15 +03:00
|
|
|
convoy.batch.terminate_jobs(batch_client, config)
|
2016-08-24 00:50:17 +03:00
|
|
|
elif args.action == 'deljobs':
|
2016-10-10 01:22:15 +03:00
|
|
|
convoy.batch.del_jobs(batch_client, config)
|
2016-10-23 05:27:48 +03:00
|
|
|
elif args.action == 'deljobswait':
|
|
|
|
convoy.batch.del_jobs(batch_client, config, wait=True)
|
2016-09-01 19:07:05 +03:00
|
|
|
elif args.action == 'delcleanmijobs':
|
2016-10-10 01:22:15 +03:00
|
|
|
convoy.batch.del_clean_mi_jobs(batch_client, config)
|
2016-07-18 18:19:36 +03:00
|
|
|
elif args.action == 'delalljobs':
|
2016-10-10 01:22:15 +03:00
|
|
|
convoy.batch.del_all_jobs(batch_client, config)
|
2016-09-01 06:09:59 +03:00
|
|
|
elif args.action == 'grls':
|
2016-10-10 01:22:15 +03:00
|
|
|
convoy.batch.get_remote_login_settings(batch_client, config)
|
2016-09-01 19:07:05 +03:00
|
|
|
elif args.action == 'streamfile':
|
2016-10-10 01:22:15 +03:00
|
|
|
convoy.batch.stream_file_and_wait_for_task(batch_client, args.filespec)
|
2016-09-09 06:15:11 +03:00
|
|
|
elif args.action == 'gettaskfile':
|
2016-10-10 01:22:15 +03:00
|
|
|
convoy.batch.get_file_via_task(batch_client, config, args.filespec)
|
2016-10-14 00:11:52 +03:00
|
|
|
elif args.action == 'gettaskallfiles':
|
|
|
|
convoy.batch.get_all_files_via_task(
|
|
|
|
batch_client, config, args.filespec)
|
2016-09-09 06:15:11 +03:00
|
|
|
elif args.action == 'getnodefile':
|
2016-10-10 01:22:15 +03:00
|
|
|
convoy.batch.get_file_via_node(batch_client, config, args.nodeid)
|
2016-10-09 21:37:29 +03:00
|
|
|
elif args.action == 'ingressdata':
|
2016-10-14 19:36:38 +03:00
|
|
|
try:
|
|
|
|
# ensure there are remote login settings
|
|
|
|
rls = convoy.batch.get_remote_login_settings(
|
|
|
|
batch_client, config, nodes=None)
|
|
|
|
# ensure nodes are at least idle/running for shared ingress
|
|
|
|
kind = 'all'
|
|
|
|
if not convoy.batch.check_pool_nodes_runnable(
|
|
|
|
batch_client, config):
|
|
|
|
kind = 'storage'
|
|
|
|
except batchmodels.BatchErrorException as ex:
|
|
|
|
if 'The specified pool does not exist' in ex.message.value:
|
|
|
|
rls = None
|
|
|
|
kind = 'storage'
|
|
|
|
else:
|
|
|
|
raise
|
|
|
|
storage_threads = convoy.data.ingress_data(
|
|
|
|
batch_client, config, rls=rls, kind=kind)
|
|
|
|
convoy.data.wait_for_storage_threads(storage_threads)
|
2016-10-14 00:11:52 +03:00
|
|
|
elif args.action == 'listjobs':
|
|
|
|
convoy.batch.list_jobs(batch_client, config)
|
|
|
|
elif args.action == 'listtasks':
|
|
|
|
convoy.batch.list_tasks(batch_client, config)
|
|
|
|
elif args.action == 'listtaskfiles':
|
|
|
|
convoy.batch.list_task_files(batch_client, config)
|
2016-10-20 07:10:40 +03:00
|
|
|
elif args.action == 'createcert':
|
|
|
|
sha1tp = convoy.crypto.generate_pem_pfx_certificates(config)
|
|
|
|
logger.info('SHA1 Thumbprint: {}'.format(sha1tp))
|
|
|
|
elif args.action == 'addcert':
|
|
|
|
convoy.batch.add_certificate_to_account(batch_client, config, False)
|
|
|
|
elif args.action == 'delcert':
|
|
|
|
convoy.batch.del_certificate_from_account(batch_client, config)
|
2016-07-18 18:19:36 +03:00
|
|
|
elif args.action == 'delstorage':
|
2016-10-10 01:22:15 +03:00
|
|
|
convoy.storage.delete_storage_containers(
|
2016-10-10 07:01:11 +03:00
|
|
|
blob_client, queue_client, table_client, config)
|
2016-07-18 18:19:36 +03:00
|
|
|
elif args.action == 'clearstorage':
|
2016-10-10 01:22:15 +03:00
|
|
|
convoy.storage.clear_storage_containers(
|
2016-10-10 07:01:11 +03:00
|
|
|
blob_client, queue_client, table_client, config)
|
2016-07-18 18:19:36 +03:00
|
|
|
else:
|
|
|
|
raise ValueError('Unknown action: {}'.format(args.action))
|
|
|
|
|
|
|
|
|
|
|
|
def parseargs():
|
|
|
|
"""Parse program arguments
|
|
|
|
:rtype: argparse.Namespace
|
|
|
|
:return: parsed arguments
|
|
|
|
"""
|
|
|
|
parser = argparse.ArgumentParser(
|
2016-09-01 06:09:59 +03:00
|
|
|
description='Batch Shipyard: Provision and Execute Docker Workloads '
|
|
|
|
'on Azure Batch')
|
2016-09-16 21:38:09 +03:00
|
|
|
parser.set_defaults(verbose=False, yes=False)
|
2016-07-18 18:19:36 +03:00
|
|
|
parser.add_argument(
|
2016-09-01 06:09:59 +03:00
|
|
|
'action', help='addpool, addjobs, addsshuser, cleanmijobs, '
|
2016-10-23 05:27:48 +03:00
|
|
|
'termjobs, deljobs, deljobswait, delcleanmijobs, delalljobs, '
|
|
|
|
'delpool, delnode, grls, streamfile, gettaskfile, gettaskallfiles, '
|
|
|
|
'getnodefile, ingressdata, listjobs, listtasks, listtaskfiles, '
|
|
|
|
'createcert, addcert, delcert, clearstorage, delstorage')
|
2016-09-16 21:38:09 +03:00
|
|
|
parser.add_argument(
|
|
|
|
'-v', '--verbose', dest='verbose', action='store_true',
|
|
|
|
help='verbose output')
|
2016-09-06 20:32:42 +03:00
|
|
|
parser.add_argument(
|
|
|
|
'-y', '--yes', dest='yes', action='store_true',
|
|
|
|
help='assume yes for all yes/no confirmations')
|
2016-07-18 18:19:36 +03:00
|
|
|
parser.add_argument(
|
2016-08-24 00:50:17 +03:00
|
|
|
'--credentials',
|
|
|
|
help='credentials json config. required for all actions')
|
2016-08-12 19:21:33 +03:00
|
|
|
parser.add_argument(
|
|
|
|
'--config',
|
2016-09-01 06:09:59 +03:00
|
|
|
help='global json config for option. required for all actions')
|
2016-09-17 00:02:25 +03:00
|
|
|
parser.add_argument(
|
|
|
|
'--configdir',
|
|
|
|
help='configdir where all config files can be found. json config '
|
|
|
|
'file must be named exactly the same as the switch option, e.g., '
|
|
|
|
'pool.json for --pool. individually specified configuration options '
|
|
|
|
'take precedence over this option.')
|
2016-08-24 00:50:17 +03:00
|
|
|
parser.add_argument(
|
|
|
|
'--pool',
|
|
|
|
help='pool json config. required for most actions')
|
|
|
|
parser.add_argument(
|
|
|
|
'--jobs',
|
2016-09-01 06:09:59 +03:00
|
|
|
help='jobs json config. required for job-related actions')
|
2016-09-09 06:15:11 +03:00
|
|
|
parser.add_argument(
|
|
|
|
'--nodeid',
|
|
|
|
help='node id for delnode or getnodefile action')
|
2016-09-16 21:38:09 +03:00
|
|
|
parser.add_argument(
|
|
|
|
'--filespec',
|
|
|
|
help='parameter for action streamfile/gettaskfile: '
|
|
|
|
'jobid:taskid:filename')
|
2016-10-05 19:20:00 +03:00
|
|
|
parser.add_argument('--version', action='version', version=_VERSION)
|
2016-07-18 18:19:36 +03:00
|
|
|
return parser.parse_args()
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
2016-10-10 01:22:15 +03:00
|
|
|
convoy.util.setup_logger(logger)
|
2016-07-18 18:19:36 +03:00
|
|
|
main()
|