Various fixes
- Add poolid param for pool del - Fix vm_count deprecation check on fs actions - Improve robustness of package index updates - Prompt for jobs cmi action - Update to latest dependencies
This commit is contained in:
Родитель
06fd4f8e62
Коммит
3a5fb452d5
11
CHANGELOG.md
11
CHANGELOG.md
|
@ -2,6 +2,17 @@
|
||||||
|
|
||||||
## [Unreleased]
|
## [Unreleased]
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- `--poolid` parameter for `pool del` to specify a specific pool to delete
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Prompt for confirmation for `jobs cmi`
|
||||||
|
- Updated to latest dependencies
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Remote FS allocation issue with `vm_count` deprecation check
|
||||||
|
- Better handling of package index refresh errors
|
||||||
|
|
||||||
## [2.7.0rc1] - 2017-05-24
|
## [2.7.0rc1] - 2017-05-24
|
||||||
### Added
|
### Added
|
||||||
- `pool listimages` command which will list all common Docker images on
|
- `pool listimages` command which will list all common Docker images on
|
||||||
|
|
|
@ -680,16 +680,19 @@ def resize_pool(batch_client, config, wait=False):
|
||||||
addl_end_states=[batchmodels.ComputeNodeState.running])
|
addl_end_states=[batchmodels.ComputeNodeState.running])
|
||||||
|
|
||||||
|
|
||||||
def del_pool(batch_client, config):
|
def del_pool(batch_client, config, pool_id=None):
|
||||||
# type: (azure.batch.batch_service_client.BatchServiceClient, dict) -> bool
|
# type: (azure.batch.batch_service_client.BatchServiceClient, dict,
|
||||||
|
# str) -> bool
|
||||||
"""Delete a pool
|
"""Delete a pool
|
||||||
:param batch_client: The batch client to use.
|
:param batch_client: The batch client to use.
|
||||||
:type batch_client: `azure.batch.batch_service_client.BatchServiceClient`
|
:type batch_client: `azure.batch.batch_service_client.BatchServiceClient`
|
||||||
:param dict config: configuration dict
|
:param dict config: configuration dict
|
||||||
|
:param str pool_id: pool id
|
||||||
:rtype: bool
|
:rtype: bool
|
||||||
:return: if pool was deleted
|
:return: if pool was deleted
|
||||||
"""
|
"""
|
||||||
pool_id = settings.pool_id(config)
|
if util.is_none_or_empty(pool_id):
|
||||||
|
pool_id = settings.pool_id(config)
|
||||||
if not util.confirm_action(
|
if not util.confirm_action(
|
||||||
config, 'delete {} pool'.format(pool_id)):
|
config, 'delete {} pool'.format(pool_id)):
|
||||||
return False
|
return False
|
||||||
|
@ -957,6 +960,9 @@ def clean_mi_jobs(batch_client, config):
|
||||||
"""
|
"""
|
||||||
for job in settings.job_specifications(config):
|
for job in settings.job_specifications(config):
|
||||||
job_id = settings.job_id(job)
|
job_id = settings.job_id(job)
|
||||||
|
if not util.confirm_action(
|
||||||
|
config, 'cleanup {} job'.format(job_id)):
|
||||||
|
continue
|
||||||
cleanup_job_id = 'shipyardcleanup-' + job_id
|
cleanup_job_id = 'shipyardcleanup-' + job_id
|
||||||
cleanup_job = batchmodels.JobAddParameter(
|
cleanup_job = batchmodels.JobAddParameter(
|
||||||
id=cleanup_job_id,
|
id=cleanup_job_id,
|
||||||
|
|
|
@ -202,13 +202,17 @@ def check_for_invalid_config(config):
|
||||||
'migrate your Docker images to Azure Container Registry, '
|
'migrate your Docker images to Azure Container Registry, '
|
||||||
'Docker Hub (public or private), or any other Internet '
|
'Docker Hub (public or private), or any other Internet '
|
||||||
'accessible Docker registry solution.')
|
'accessible Docker registry solution.')
|
||||||
if isinstance(config['pool_specification']['vm_count'], int):
|
try:
|
||||||
logger.warning(
|
if isinstance(config['pool_specification']['vm_count'], int):
|
||||||
'DEPRECATION WARNING: pool_specification:vm_count is directly '
|
logger.warning(
|
||||||
'set with an integral value for dedicated nodes. This '
|
'DEPRECATION WARNING: pool_specification:vm_count is '
|
||||||
'configuration will not be supported in future releases. '
|
'directly set with an integral value for dedicated nodes. '
|
||||||
'Please update your configuration to include a complex property '
|
'This configuration will not be supported in future '
|
||||||
'of dedicated and/or low_priority nodes for vm_count.')
|
'releases. Please update your configuration to include a '
|
||||||
|
'complex property of dedicated and/or low_priority nodes '
|
||||||
|
'for vm_count.')
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
def populate_global_settings(config, fs_storage):
|
def populate_global_settings(config, fs_storage):
|
||||||
|
@ -1939,10 +1943,10 @@ def action_pool_list(batch_client):
|
||||||
|
|
||||||
def action_pool_delete(
|
def action_pool_delete(
|
||||||
batch_client, blob_client, queue_client, table_client, config,
|
batch_client, blob_client, queue_client, table_client, config,
|
||||||
wait=False):
|
pool_id=None, wait=False):
|
||||||
# type: (batchsc.BatchServiceClient, azureblob.BlockBlobService,
|
# type: (batchsc.BatchServiceClient, azureblob.BlockBlobService,
|
||||||
# azurequeue.QueueService, azuretable.TableService, dict,
|
# azurequeue.QueueService, azuretable.TableService, dict,
|
||||||
# bool) -> None
|
# str, bool) -> None
|
||||||
"""Action: Pool Delete
|
"""Action: Pool Delete
|
||||||
:param azure.batch.batch_service_client.BatchServiceClient batch_client:
|
:param azure.batch.batch_service_client.BatchServiceClient batch_client:
|
||||||
batch client
|
batch client
|
||||||
|
@ -1950,20 +1954,22 @@ def action_pool_delete(
|
||||||
:param azure.storage.queue.QueueService queue_client: queue client
|
:param azure.storage.queue.QueueService queue_client: queue client
|
||||||
:param azure.storage.table.TableService table_client: table client
|
:param azure.storage.table.TableService table_client: table client
|
||||||
:param dict config: configuration dict
|
:param dict config: configuration dict
|
||||||
|
:param str pool_id: poolid to delete
|
||||||
:param bool wait: wait for pool to delete
|
:param bool wait: wait for pool to delete
|
||||||
"""
|
"""
|
||||||
deleted = False
|
deleted = False
|
||||||
try:
|
try:
|
||||||
deleted = batch.del_pool(batch_client, config)
|
deleted = batch.del_pool(batch_client, config, pool_id=pool_id)
|
||||||
except batchmodels.BatchErrorException as ex:
|
except batchmodels.BatchErrorException as ex:
|
||||||
logger.exception(ex)
|
logger.exception(ex)
|
||||||
if 'The specified pool does not exist' in ex.message.value:
|
if 'The specified pool does not exist' in ex.message.value:
|
||||||
deleted = True
|
deleted = True
|
||||||
if deleted:
|
if deleted:
|
||||||
storage.cleanup_with_del_pool(
|
storage.cleanup_with_del_pool(
|
||||||
blob_client, queue_client, table_client, config)
|
blob_client, queue_client, table_client, config, pool_id=pool_id)
|
||||||
if wait:
|
if wait:
|
||||||
pool_id = settings.pool_id(config)
|
if util.is_none_or_empty(pool_id):
|
||||||
|
pool_id = settings.pool_id(config)
|
||||||
logger.debug('waiting for pool {} to delete'.format(pool_id))
|
logger.debug('waiting for pool {} to delete'.format(pool_id))
|
||||||
while batch_client.pool.exists(pool_id):
|
while batch_client.pool.exists(pool_id):
|
||||||
time.sleep(3)
|
time.sleep(3)
|
||||||
|
|
|
@ -767,7 +767,7 @@ def create_storage_cluster(
|
||||||
config, 'create storage cluster {}'.format(sc_id)):
|
config, 'create storage cluster {}'.format(sc_id)):
|
||||||
return
|
return
|
||||||
# create storage container
|
# create storage container
|
||||||
storage.create_storage_containers_remotefs(blob_client, config)
|
storage.create_storage_containers_remotefs(blob_client)
|
||||||
# async operation dictionary
|
# async operation dictionary
|
||||||
async_ops = {}
|
async_ops = {}
|
||||||
# create nsg
|
# create nsg
|
||||||
|
@ -1848,7 +1848,7 @@ def delete_storage_cluster(
|
||||||
logger.info('availability set {} deleted'.format(as_name))
|
logger.info('availability set {} deleted'.format(as_name))
|
||||||
deleted.clear()
|
deleted.clear()
|
||||||
# delete storage container
|
# delete storage container
|
||||||
storage.delete_storage_containers_remotefs(blob_client, config)
|
storage.delete_storage_containers_remotefs(blob_client)
|
||||||
# wait for all async ops to complete
|
# wait for all async ops to complete
|
||||||
if wait:
|
if wait:
|
||||||
logger.debug('waiting for network security groups to delete')
|
logger.debug('waiting for network security groups to delete')
|
||||||
|
|
|
@ -493,38 +493,39 @@ def create_storage_containers(blob_client, queue_client, table_client, config):
|
||||||
queue_client.create_queue(_STORAGE_CONTAINERS[key])
|
queue_client.create_queue(_STORAGE_CONTAINERS[key])
|
||||||
|
|
||||||
|
|
||||||
def create_storage_containers_remotefs(blob_client, config):
|
def create_storage_containers_remotefs(blob_client):
|
||||||
# type: (azureblob.BlockBlobService, dict) -> None
|
# type: (azureblob.BlockBlobService) -> None
|
||||||
"""Create storage containers used for remotefs
|
"""Create storage containers used for remotefs
|
||||||
:param azure.storage.blob.BlockBlobService blob_client: blob client
|
:param azure.storage.blob.BlockBlobService blob_client: blob client
|
||||||
:param dict config: configuration dict
|
|
||||||
"""
|
"""
|
||||||
contname = _STORAGE_CONTAINERS['blob_remotefs']
|
contname = _STORAGE_CONTAINERS['blob_remotefs']
|
||||||
logger.info('creating container: {}'.format(contname))
|
logger.info('creating container: {}'.format(contname))
|
||||||
blob_client.create_container(contname)
|
blob_client.create_container(contname)
|
||||||
|
|
||||||
|
|
||||||
def delete_storage_containers_remotefs(blob_client, config):
|
def delete_storage_containers_remotefs(blob_client):
|
||||||
# type: (azureblob.BlockBlobService, dict) -> None
|
# type: (azureblob.BlockBlobService) -> None
|
||||||
"""Delete storage containers used for remotefs
|
"""Delete storage containers used for remotefs
|
||||||
:param azure.storage.blob.BlockBlobService blob_client: blob client
|
:param azure.storage.blob.BlockBlobService blob_client: blob client
|
||||||
:param dict config: configuration dict
|
|
||||||
"""
|
"""
|
||||||
contname = _STORAGE_CONTAINERS['blob_remotefs']
|
contname = _STORAGE_CONTAINERS['blob_remotefs']
|
||||||
logger.info('deleting container: {}'.format(contname))
|
logger.info('deleting container: {}'.format(contname))
|
||||||
blob_client.delete_container(contname)
|
blob_client.delete_container(contname)
|
||||||
|
|
||||||
|
|
||||||
def cleanup_with_del_pool(blob_client, queue_client, table_client, config):
|
def cleanup_with_del_pool(
|
||||||
|
blob_client, queue_client, table_client, config, pool_id=None):
|
||||||
# type: (azureblob.BlockBlobService, azurequeue.QueueService,
|
# type: (azureblob.BlockBlobService, azurequeue.QueueService,
|
||||||
# azuretable.TableService, dict) -> None
|
# azuretable.TableService, dict, str) -> None
|
||||||
"""Special cleanup routine in combination with delete pool
|
"""Special cleanup routine in combination with delete pool
|
||||||
:param azure.storage.blob.BlockBlobService blob_client: blob client
|
:param azure.storage.blob.BlockBlobService blob_client: blob client
|
||||||
:param azure.storage.queue.QueueService queue_client: queue client
|
:param azure.storage.queue.QueueService queue_client: queue client
|
||||||
:param azure.storage.table.TableService table_client: table client
|
:param azure.storage.table.TableService table_client: table client
|
||||||
:param dict config: configuration dict
|
:param dict config: configuration dict
|
||||||
|
:param str pool_id: pool id
|
||||||
"""
|
"""
|
||||||
pool_id = settings.pool_id(config)
|
if util.is_none_or_empty(pool_id):
|
||||||
|
pool_id = settings.pool_id(config)
|
||||||
if not util.confirm_action(
|
if not util.confirm_action(
|
||||||
config, 'delete/cleanup of Batch Shipyard metadata in storage '
|
config, 'delete/cleanup of Batch Shipyard metadata in storage '
|
||||||
'containers associated with {} pool'.format(pool_id)):
|
'containers associated with {} pool'.format(pool_id)):
|
||||||
|
|
|
@ -445,6 +445,8 @@ Batch Shipyard. It is recommended to use this command instead of deleting
|
||||||
a pool directly from the Azure Portal, Batch Labs, or other tools as
|
a pool directly from the Azure Portal, Batch Labs, or other tools as
|
||||||
this action can conveniently remove all associated Batch Shipyard metadata on
|
this action can conveniently remove all associated Batch Shipyard metadata on
|
||||||
Azure Storage.
|
Azure Storage.
|
||||||
|
* `--poolid` will delete the specified pool instead of the pool from the
|
||||||
|
pool configuration file
|
||||||
* `--wait` will wait for deletion to complete
|
* `--wait` will wait for deletion to complete
|
||||||
* `delnode` will delete the specified node from the pool
|
* `delnode` will delete the specified node from the pool
|
||||||
* `dsu` will delete the SSH user defined in the pool configuration file
|
* `dsu` will delete the SSH user defined in the pool configuration file
|
||||||
|
|
|
@ -2,14 +2,14 @@ adal==0.4.5
|
||||||
azure-batch==3.0.0
|
azure-batch==3.0.0
|
||||||
azure-keyvault==0.3.0
|
azure-keyvault==0.3.0
|
||||||
azure-mgmt-batch==4.0.0
|
azure-mgmt-batch==4.0.0
|
||||||
azure-mgmt-compute==1.0.0rc1
|
azure-mgmt-compute==1.0.0
|
||||||
azure-mgmt-network==1.0.0rc3
|
azure-mgmt-network==1.0.0
|
||||||
azure-mgmt-resource==1.1.0rc1
|
azure-mgmt-resource==1.1.0
|
||||||
azure-storage==0.34.2
|
azure-storage==0.34.2
|
||||||
blobxfer==0.12.1
|
blobxfer==0.12.1
|
||||||
click==6.7
|
click==6.7
|
||||||
future==0.16.0
|
future==0.16.0
|
||||||
msrest==0.4.7
|
msrest==0.4.8
|
||||||
msrestazure==0.4.7
|
msrestazure==0.4.7
|
||||||
pathlib2==2.2.1; python_version < '3.5'
|
pathlib2==2.2.1; python_version < '3.5'
|
||||||
requests==2.14.2
|
requests==2.14.2
|
||||||
|
|
|
@ -10,7 +10,7 @@ block=
|
||||||
cascadecontainer=0
|
cascadecontainer=0
|
||||||
encrypted=
|
encrypted=
|
||||||
hpnssh=0
|
hpnssh=0
|
||||||
gluster=0
|
gluster_on_compute=0
|
||||||
gpu=
|
gpu=
|
||||||
networkopt=0
|
networkopt=0
|
||||||
offer=
|
offer=
|
||||||
|
@ -32,7 +32,7 @@ while getopts "h?abde:fg:nm:o:p:r:s:t:v:wx:" opt; do
|
||||||
echo "-b block until resources loaded"
|
echo "-b block until resources loaded"
|
||||||
echo "-d use docker container for cascade"
|
echo "-d use docker container for cascade"
|
||||||
echo "-e [thumbprint] encrypted credentials with cert"
|
echo "-e [thumbprint] encrypted credentials with cert"
|
||||||
echo "-f set up glusterfs cluster"
|
echo "-f set up glusterfs on compute"
|
||||||
echo "-g [nv-series:driver file:nvidia docker pkg] gpu support"
|
echo "-g [nv-series:driver file:nvidia docker pkg] gpu support"
|
||||||
echo "-m [type:scid] mount storage cluster"
|
echo "-m [type:scid] mount storage cluster"
|
||||||
echo "-n optimize network TCP settings"
|
echo "-n optimize network TCP settings"
|
||||||
|
@ -60,7 +60,7 @@ while getopts "h?abde:fg:nm:o:p:r:s:t:v:wx:" opt; do
|
||||||
encrypted=${OPTARG,,}
|
encrypted=${OPTARG,,}
|
||||||
;;
|
;;
|
||||||
f)
|
f)
|
||||||
gluster=1
|
gluster_on_compute=1
|
||||||
;;
|
;;
|
||||||
g)
|
g)
|
||||||
gpu=$OPTARG
|
gpu=$OPTARG
|
||||||
|
@ -169,6 +169,31 @@ install_azurefile_docker_volume_driver() {
|
||||||
./azurefile-dockervolume-create.sh
|
./azurefile-dockervolume-create.sh
|
||||||
}
|
}
|
||||||
|
|
||||||
|
refresh_package_index() {
|
||||||
|
# refresh package index
|
||||||
|
set +e
|
||||||
|
retries=30
|
||||||
|
while [ $retries -gt 0 ]; do
|
||||||
|
if [ $1 == "ubuntuserver" ]; then
|
||||||
|
apt-get update
|
||||||
|
elif [[ $1 == centos* ]] || [[ $1 == "rhel" ]] || [[ $1 == "oracle-linux" ]]; then
|
||||||
|
yum makecache -y fast
|
||||||
|
elif [[ $1 == opensuse* ]] || [[ $1 == sles* ]]; then
|
||||||
|
zypper -n --gpg-auto-import-keys ref
|
||||||
|
fi
|
||||||
|
if [ $? -eq 0 ]; then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
let retries=retries-1
|
||||||
|
if [ $retries -eq 0 ]; then
|
||||||
|
echo "Could not update package index"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
set -e
|
||||||
|
}
|
||||||
|
|
||||||
# check sdb1 mount
|
# check sdb1 mount
|
||||||
check_for_buggy_ntfs_mount
|
check_for_buggy_ntfs_mount
|
||||||
|
|
||||||
|
@ -304,7 +329,7 @@ if [ $offer == "ubuntuserver" ] || [ $offer == "debian" ]; then
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
# refresh package index
|
# refresh package index
|
||||||
apt-get update
|
refresh_package_index $offer
|
||||||
# install required software first
|
# install required software first
|
||||||
apt-get install -y -q -o Dpkg::Options::="--force-confnew" --no-install-recommends \
|
apt-get install -y -q -o Dpkg::Options::="--force-confnew" --no-install-recommends \
|
||||||
apt-transport-https ca-certificates curl software-properties-common
|
apt-transport-https ca-certificates curl software-properties-common
|
||||||
|
@ -331,7 +356,7 @@ if [ $offer == "ubuntuserver" ] || [ $offer == "debian" ]; then
|
||||||
# add repo
|
# add repo
|
||||||
add-apt-repository "deb [arch=amd64] $repo $(lsb_release -cs) stable"
|
add-apt-repository "deb [arch=amd64] $repo $(lsb_release -cs) stable"
|
||||||
# refresh index
|
# refresh index
|
||||||
apt-get update
|
refresh_package_index $offer
|
||||||
# ensure docker opts service modifications are idempotent
|
# ensure docker opts service modifications are idempotent
|
||||||
set +e
|
set +e
|
||||||
grep '^DOCKER_OPTS=' /etc/default/docker
|
grep '^DOCKER_OPTS=' /etc/default/docker
|
||||||
|
@ -435,7 +460,7 @@ EOF
|
||||||
set -e
|
set -e
|
||||||
fi
|
fi
|
||||||
# set up glusterfs
|
# set up glusterfs
|
||||||
if [ $gluster -eq 1 ] && [ ! -f $nodeprepfinished ]; then
|
if [ $gluster_on_compute -eq 1 ] && [ ! -f $nodeprepfinished ]; then
|
||||||
apt-get install -y -q --no-install-recommends glusterfs-server
|
apt-get install -y -q --no-install-recommends glusterfs-server
|
||||||
if [[ ! -z $gfsenable ]]; then
|
if [[ ! -z $gfsenable ]]; then
|
||||||
$gfsenable
|
$gfsenable
|
||||||
|
@ -511,7 +536,7 @@ elif [[ $offer == centos* ]] || [[ $offer == "rhel" ]] || [[ $offer == "oracle-l
|
||||||
# add docker repo to yum
|
# add docker repo to yum
|
||||||
yum install -y yum-utils
|
yum install -y yum-utils
|
||||||
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
|
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
|
||||||
yum makecache -y fast
|
refresh_package_index $offer
|
||||||
yum install -y docker-ce-$dockerversion
|
yum install -y docker-ce-$dockerversion
|
||||||
# modify docker opts
|
# modify docker opts
|
||||||
mkdir -p /mnt/resource/docker-tmp
|
mkdir -p /mnt/resource/docker-tmp
|
||||||
|
@ -529,7 +554,7 @@ elif [[ $offer == centos* ]] || [[ $offer == "rhel" ]] || [[ $offer == "oracle-l
|
||||||
install_azurefile_docker_volume_driver $offer $sku
|
install_azurefile_docker_volume_driver $offer $sku
|
||||||
fi
|
fi
|
||||||
# set up glusterfs
|
# set up glusterfs
|
||||||
if [ $gluster -eq 1 ] && [ ! -f $nodeprepfinished ]; then
|
if [ $gluster_on_compute -eq 1 ] && [ ! -f $nodeprepfinished ]; then
|
||||||
yum install -y epel-release centos-release-gluster38
|
yum install -y epel-release centos-release-gluster38
|
||||||
sed -i -e "s/enabled=1/enabled=0/g" /etc/yum.repos.d/CentOS-Gluster-3.8.repo
|
sed -i -e "s/enabled=1/enabled=0/g" /etc/yum.repos.d/CentOS-Gluster-3.8.repo
|
||||||
yum install -y --enablerepo=centos-gluster38,epel glusterfs-server
|
yum install -y --enablerepo=centos-gluster38,epel glusterfs-server
|
||||||
|
@ -586,7 +611,6 @@ elif [[ $offer == opensuse* ]] || [[ $offer == sles* ]]; then
|
||||||
fi
|
fi
|
||||||
# add container repo for zypper
|
# add container repo for zypper
|
||||||
zypper addrepo http://download.opensuse.org/repositories/Virtualization:containers/$repodir/Virtualization:containers.repo
|
zypper addrepo http://download.opensuse.org/repositories/Virtualization:containers/$repodir/Virtualization:containers.repo
|
||||||
zypper -n --gpg-auto-import-keys ref
|
|
||||||
elif [[ $offer == sles* ]]; then
|
elif [[ $offer == sles* ]]; then
|
||||||
dockerversion=1.12.6-90.1
|
dockerversion=1.12.6-90.1
|
||||||
if [[ $sku == "12-sp1" ]]; then
|
if [[ $sku == "12-sp1" ]]; then
|
||||||
|
@ -596,12 +620,13 @@ elif [[ $offer == opensuse* ]] || [[ $offer == sles* ]]; then
|
||||||
fi
|
fi
|
||||||
# enable container module
|
# enable container module
|
||||||
SUSEConnect -p sle-module-containers/12/x86_64 -r ''
|
SUSEConnect -p sle-module-containers/12/x86_64 -r ''
|
||||||
zypper ref
|
|
||||||
fi
|
fi
|
||||||
if [ -z $repodir ]; then
|
if [ -z $repodir ]; then
|
||||||
echo "unsupported sku: $sku for offer: $offer"
|
echo "unsupported sku: $sku for offer: $offer"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
# update index
|
||||||
|
refresh_package_index $offer
|
||||||
# install docker engine
|
# install docker engine
|
||||||
zypper -n in docker-$dockerversion
|
zypper -n in docker-$dockerversion
|
||||||
# modify docker opts, docker opts in /etc/sysconfig/docker
|
# modify docker opts, docker opts in /etc/sysconfig/docker
|
||||||
|
@ -618,7 +643,7 @@ elif [[ $offer == opensuse* ]] || [[ $offer == sles* ]]; then
|
||||||
install_azurefile_docker_volume_driver $offer $sku
|
install_azurefile_docker_volume_driver $offer $sku
|
||||||
fi
|
fi
|
||||||
# set up glusterfs
|
# set up glusterfs
|
||||||
if [ $gluster -eq 1 ]; then
|
if [ $gluster_on_compute -eq 1 ]; then
|
||||||
zypper addrepo http://download.opensuse.org/repositories/filesystems/$repodir/filesystems.repo
|
zypper addrepo http://download.opensuse.org/repositories/filesystems/$repodir/filesystems.repo
|
||||||
zypper -n --gpg-auto-import-keys ref
|
zypper -n --gpg-auto-import-keys ref
|
||||||
zypper -n in glusterfs
|
zypper -n in glusterfs
|
||||||
|
|
|
@ -1078,6 +1078,8 @@ def pool_list(ctx):
|
||||||
|
|
||||||
|
|
||||||
@pool.command('del')
|
@pool.command('del')
|
||||||
|
@click.option(
|
||||||
|
'--poolid', help='Delete the specified pool')
|
||||||
@click.option(
|
@click.option(
|
||||||
'--wait', is_flag=True, help='Wait for pool deletion to complete')
|
'--wait', is_flag=True, help='Wait for pool deletion to complete')
|
||||||
@common_options
|
@common_options
|
||||||
|
@ -1085,12 +1087,12 @@ def pool_list(ctx):
|
||||||
@keyvault_options
|
@keyvault_options
|
||||||
@aad_options
|
@aad_options
|
||||||
@pass_cli_context
|
@pass_cli_context
|
||||||
def pool_del(ctx, wait):
|
def pool_del(ctx, poolid, wait):
|
||||||
"""Delete a pool from the Batch account"""
|
"""Delete a pool from the Batch account"""
|
||||||
ctx.initialize_for_batch()
|
ctx.initialize_for_batch()
|
||||||
convoy.fleet.action_pool_delete(
|
convoy.fleet.action_pool_delete(
|
||||||
ctx.batch_client, ctx.blob_client, ctx.queue_client,
|
ctx.batch_client, ctx.blob_client, ctx.queue_client,
|
||||||
ctx.table_client, ctx.config, wait=wait)
|
ctx.table_client, ctx.config, pool_id=poolid, wait=wait)
|
||||||
|
|
||||||
|
|
||||||
@pool.command('resize')
|
@pool.command('resize')
|
||||||
|
|
Загрузка…
Ссылка в новой задаче