- Add poolid param for pool del
- Fix vm_count deprecation check on fs actions
- Improve robustness of package index updates
- Prompt for jobs cmi action
- Update to latest dependencies
This commit is contained in:
Fred Park 2017-05-24 09:54:09 -07:00
Родитель 06fd4f8e62
Коммит 3a5fb452d5
9 изменённых файлов: 96 добавлений и 43 удалений

Просмотреть файл

@ -2,6 +2,17 @@
## [Unreleased]
### Added
- `--poolid` parameter for `pool del` to specify a specific pool to delete
### Changed
- Prompt for confirmation for `jobs cmi`
- Updated to latest dependencies
### Fixed
- Remote FS allocation issue with `vm_count` deprecation check
- Better handling of package index refresh errors
## [2.7.0rc1] - 2017-05-24
### Added
- `pool listimages` command which will list all common Docker images on

Просмотреть файл

@ -680,16 +680,19 @@ def resize_pool(batch_client, config, wait=False):
addl_end_states=[batchmodels.ComputeNodeState.running])
def del_pool(batch_client, config):
# type: (azure.batch.batch_service_client.BatchServiceClient, dict) -> bool
def del_pool(batch_client, config, pool_id=None):
# type: (azure.batch.batch_service_client.BatchServiceClient, dict,
# str) -> bool
"""Delete a pool
:param batch_client: The batch client to use.
:type batch_client: `azure.batch.batch_service_client.BatchServiceClient`
:param dict config: configuration dict
:param str pool_id: pool id
:rtype: bool
:return: if pool was deleted
"""
pool_id = settings.pool_id(config)
if util.is_none_or_empty(pool_id):
pool_id = settings.pool_id(config)
if not util.confirm_action(
config, 'delete {} pool'.format(pool_id)):
return False
@ -957,6 +960,9 @@ def clean_mi_jobs(batch_client, config):
"""
for job in settings.job_specifications(config):
job_id = settings.job_id(job)
if not util.confirm_action(
config, 'cleanup {} job'.format(job_id)):
continue
cleanup_job_id = 'shipyardcleanup-' + job_id
cleanup_job = batchmodels.JobAddParameter(
id=cleanup_job_id,

Просмотреть файл

@ -202,13 +202,17 @@ def check_for_invalid_config(config):
'migrate your Docker images to Azure Container Registry, '
'Docker Hub (public or private), or any other Internet '
'accessible Docker registry solution.')
if isinstance(config['pool_specification']['vm_count'], int):
logger.warning(
'DEPRECATION WARNING: pool_specification:vm_count is directly '
'set with an integral value for dedicated nodes. This '
'configuration will not be supported in future releases. '
'Please update your configuration to include a complex property '
'of dedicated and/or low_priority nodes for vm_count.')
try:
if isinstance(config['pool_specification']['vm_count'], int):
logger.warning(
'DEPRECATION WARNING: pool_specification:vm_count is '
'directly set with an integral value for dedicated nodes. '
'This configuration will not be supported in future '
'releases. Please update your configuration to include a '
'complex property of dedicated and/or low_priority nodes '
'for vm_count.')
except KeyError:
pass
def populate_global_settings(config, fs_storage):
@ -1939,10 +1943,10 @@ def action_pool_list(batch_client):
def action_pool_delete(
batch_client, blob_client, queue_client, table_client, config,
wait=False):
pool_id=None, wait=False):
# type: (batchsc.BatchServiceClient, azureblob.BlockBlobService,
# azurequeue.QueueService, azuretable.TableService, dict,
# bool) -> None
# str, bool) -> None
"""Action: Pool Delete
:param azure.batch.batch_service_client.BatchServiceClient batch_client:
batch client
@ -1950,20 +1954,22 @@ def action_pool_delete(
:param azure.storage.queue.QueueService queue_client: queue client
:param azure.storage.table.TableService table_client: table client
:param dict config: configuration dict
:param str pool_id: poolid to delete
:param bool wait: wait for pool to delete
"""
deleted = False
try:
deleted = batch.del_pool(batch_client, config)
deleted = batch.del_pool(batch_client, config, pool_id=pool_id)
except batchmodels.BatchErrorException as ex:
logger.exception(ex)
if 'The specified pool does not exist' in ex.message.value:
deleted = True
if deleted:
storage.cleanup_with_del_pool(
blob_client, queue_client, table_client, config)
blob_client, queue_client, table_client, config, pool_id=pool_id)
if wait:
pool_id = settings.pool_id(config)
if util.is_none_or_empty(pool_id):
pool_id = settings.pool_id(config)
logger.debug('waiting for pool {} to delete'.format(pool_id))
while batch_client.pool.exists(pool_id):
time.sleep(3)

Просмотреть файл

@ -767,7 +767,7 @@ def create_storage_cluster(
config, 'create storage cluster {}'.format(sc_id)):
return
# create storage container
storage.create_storage_containers_remotefs(blob_client, config)
storage.create_storage_containers_remotefs(blob_client)
# async operation dictionary
async_ops = {}
# create nsg
@ -1848,7 +1848,7 @@ def delete_storage_cluster(
logger.info('availability set {} deleted'.format(as_name))
deleted.clear()
# delete storage container
storage.delete_storage_containers_remotefs(blob_client, config)
storage.delete_storage_containers_remotefs(blob_client)
# wait for all async ops to complete
if wait:
logger.debug('waiting for network security groups to delete')

Просмотреть файл

@ -493,38 +493,39 @@ def create_storage_containers(blob_client, queue_client, table_client, config):
queue_client.create_queue(_STORAGE_CONTAINERS[key])
def create_storage_containers_remotefs(blob_client, config):
# type: (azureblob.BlockBlobService, dict) -> None
def create_storage_containers_remotefs(blob_client):
# type: (azureblob.BlockBlobService) -> None
"""Create storage containers used for remotefs
:param azure.storage.blob.BlockBlobService blob_client: blob client
:param dict config: configuration dict
"""
contname = _STORAGE_CONTAINERS['blob_remotefs']
logger.info('creating container: {}'.format(contname))
blob_client.create_container(contname)
def delete_storage_containers_remotefs(blob_client, config):
# type: (azureblob.BlockBlobService, dict) -> None
def delete_storage_containers_remotefs(blob_client):
# type: (azureblob.BlockBlobService) -> None
"""Delete storage containers used for remotefs
:param azure.storage.blob.BlockBlobService blob_client: blob client
:param dict config: configuration dict
"""
contname = _STORAGE_CONTAINERS['blob_remotefs']
logger.info('deleting container: {}'.format(contname))
blob_client.delete_container(contname)
def cleanup_with_del_pool(blob_client, queue_client, table_client, config):
def cleanup_with_del_pool(
blob_client, queue_client, table_client, config, pool_id=None):
# type: (azureblob.BlockBlobService, azurequeue.QueueService,
# azuretable.TableService, dict) -> None
# azuretable.TableService, dict, str) -> None
"""Special cleanup routine in combination with delete pool
:param azure.storage.blob.BlockBlobService blob_client: blob client
:param azure.storage.queue.QueueService queue_client: queue client
:param azure.storage.table.TableService table_client: table client
:param dict config: configuration dict
:param str pool_id: pool id
"""
pool_id = settings.pool_id(config)
if util.is_none_or_empty(pool_id):
pool_id = settings.pool_id(config)
if not util.confirm_action(
config, 'delete/cleanup of Batch Shipyard metadata in storage '
'containers associated with {} pool'.format(pool_id)):

Просмотреть файл

@ -445,6 +445,8 @@ Batch Shipyard. It is recommended to use this command instead of deleting
a pool directly from the Azure Portal, Batch Labs, or other tools as
this action can conveniently remove all associated Batch Shipyard metadata on
Azure Storage.
* `--poolid` will delete the specified pool instead of the pool from the
pool configuration file
* `--wait` will wait for deletion to complete
* `delnode` will delete the specified node from the pool
* `dsu` will delete the SSH user defined in the pool configuration file

Просмотреть файл

@ -2,14 +2,14 @@ adal==0.4.5
azure-batch==3.0.0
azure-keyvault==0.3.0
azure-mgmt-batch==4.0.0
azure-mgmt-compute==1.0.0rc1
azure-mgmt-network==1.0.0rc3
azure-mgmt-resource==1.1.0rc1
azure-mgmt-compute==1.0.0
azure-mgmt-network==1.0.0
azure-mgmt-resource==1.1.0
azure-storage==0.34.2
blobxfer==0.12.1
click==6.7
future==0.16.0
msrest==0.4.7
msrest==0.4.8
msrestazure==0.4.7
pathlib2==2.2.1; python_version < '3.5'
requests==2.14.2

Просмотреть файл

@ -10,7 +10,7 @@ block=
cascadecontainer=0
encrypted=
hpnssh=0
gluster=0
gluster_on_compute=0
gpu=
networkopt=0
offer=
@ -32,7 +32,7 @@ while getopts "h?abde:fg:nm:o:p:r:s:t:v:wx:" opt; do
echo "-b block until resources loaded"
echo "-d use docker container for cascade"
echo "-e [thumbprint] encrypted credentials with cert"
echo "-f set up glusterfs cluster"
echo "-f set up glusterfs on compute"
echo "-g [nv-series:driver file:nvidia docker pkg] gpu support"
echo "-m [type:scid] mount storage cluster"
echo "-n optimize network TCP settings"
@ -60,7 +60,7 @@ while getopts "h?abde:fg:nm:o:p:r:s:t:v:wx:" opt; do
encrypted=${OPTARG,,}
;;
f)
gluster=1
gluster_on_compute=1
;;
g)
gpu=$OPTARG
@ -169,6 +169,31 @@ install_azurefile_docker_volume_driver() {
./azurefile-dockervolume-create.sh
}
refresh_package_index() {
# refresh package index
set +e
retries=30
while [ $retries -gt 0 ]; do
if [ $1 == "ubuntuserver" ]; then
apt-get update
elif [[ $1 == centos* ]] || [[ $1 == "rhel" ]] || [[ $1 == "oracle-linux" ]]; then
yum makecache -y fast
elif [[ $1 == opensuse* ]] || [[ $1 == sles* ]]; then
zypper -n --gpg-auto-import-keys ref
fi
if [ $? -eq 0 ]; then
break
fi
let retries=retries-1
if [ $retries -eq 0 ]; then
echo "Could not update package index"
exit 1
fi
sleep 1
done
set -e
}
# check sdb1 mount
check_for_buggy_ntfs_mount
@ -304,7 +329,7 @@ if [ $offer == "ubuntuserver" ] || [ $offer == "debian" ]; then
fi
fi
# refresh package index
apt-get update
refresh_package_index $offer
# install required software first
apt-get install -y -q -o Dpkg::Options::="--force-confnew" --no-install-recommends \
apt-transport-https ca-certificates curl software-properties-common
@ -331,7 +356,7 @@ if [ $offer == "ubuntuserver" ] || [ $offer == "debian" ]; then
# add repo
add-apt-repository "deb [arch=amd64] $repo $(lsb_release -cs) stable"
# refresh index
apt-get update
refresh_package_index $offer
# ensure docker opts service modifications are idempotent
set +e
grep '^DOCKER_OPTS=' /etc/default/docker
@ -435,7 +460,7 @@ EOF
set -e
fi
# set up glusterfs
if [ $gluster -eq 1 ] && [ ! -f $nodeprepfinished ]; then
if [ $gluster_on_compute -eq 1 ] && [ ! -f $nodeprepfinished ]; then
apt-get install -y -q --no-install-recommends glusterfs-server
if [[ ! -z $gfsenable ]]; then
$gfsenable
@ -511,7 +536,7 @@ elif [[ $offer == centos* ]] || [[ $offer == "rhel" ]] || [[ $offer == "oracle-l
# add docker repo to yum
yum install -y yum-utils
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum makecache -y fast
refresh_package_index $offer
yum install -y docker-ce-$dockerversion
# modify docker opts
mkdir -p /mnt/resource/docker-tmp
@ -529,7 +554,7 @@ elif [[ $offer == centos* ]] || [[ $offer == "rhel" ]] || [[ $offer == "oracle-l
install_azurefile_docker_volume_driver $offer $sku
fi
# set up glusterfs
if [ $gluster -eq 1 ] && [ ! -f $nodeprepfinished ]; then
if [ $gluster_on_compute -eq 1 ] && [ ! -f $nodeprepfinished ]; then
yum install -y epel-release centos-release-gluster38
sed -i -e "s/enabled=1/enabled=0/g" /etc/yum.repos.d/CentOS-Gluster-3.8.repo
yum install -y --enablerepo=centos-gluster38,epel glusterfs-server
@ -586,7 +611,6 @@ elif [[ $offer == opensuse* ]] || [[ $offer == sles* ]]; then
fi
# add container repo for zypper
zypper addrepo http://download.opensuse.org/repositories/Virtualization:containers/$repodir/Virtualization:containers.repo
zypper -n --gpg-auto-import-keys ref
elif [[ $offer == sles* ]]; then
dockerversion=1.12.6-90.1
if [[ $sku == "12-sp1" ]]; then
@ -596,12 +620,13 @@ elif [[ $offer == opensuse* ]] || [[ $offer == sles* ]]; then
fi
# enable container module
SUSEConnect -p sle-module-containers/12/x86_64 -r ''
zypper ref
fi
if [ -z $repodir ]; then
echo "unsupported sku: $sku for offer: $offer"
exit 1
fi
# update index
refresh_package_index $offer
# install docker engine
zypper -n in docker-$dockerversion
# modify docker opts, docker opts in /etc/sysconfig/docker
@ -618,7 +643,7 @@ elif [[ $offer == opensuse* ]] || [[ $offer == sles* ]]; then
install_azurefile_docker_volume_driver $offer $sku
fi
# set up glusterfs
if [ $gluster -eq 1 ]; then
if [ $gluster_on_compute -eq 1 ]; then
zypper addrepo http://download.opensuse.org/repositories/filesystems/$repodir/filesystems.repo
zypper -n --gpg-auto-import-keys ref
zypper -n in glusterfs

Просмотреть файл

@ -1078,6 +1078,8 @@ def pool_list(ctx):
@pool.command('del')
@click.option(
'--poolid', help='Delete the specified pool')
@click.option(
'--wait', is_flag=True, help='Wait for pool deletion to complete')
@common_options
@ -1085,12 +1087,12 @@ def pool_list(ctx):
@keyvault_options
@aad_options
@pass_cli_context
def pool_del(ctx, wait):
def pool_del(ctx, poolid, wait):
"""Delete a pool from the Batch account"""
ctx.initialize_for_batch()
convoy.fleet.action_pool_delete(
ctx.batch_client, ctx.blob_client, ctx.queue_client,
ctx.table_client, ctx.config, wait=wait)
ctx.table_client, ctx.config, pool_id=poolid, wait=wait)
@pool.command('resize')