Fix suspend/start issues with software raid

- Disallow expand action with mdadm-based arrays on RAID-0
- Change "remotefs" to "fs" for commands
This commit is contained in:
Fred Park 2017-03-07 19:51:01 -08:00
Родитель 748cf64bfb
Коммит 587ab7faa4
7 изменённых файлов: 220 добавлений и 194 удалений

Просмотреть файл

@ -40,8 +40,8 @@
"vm_disk_map": {
"0": {
"disk_array": ["disk0", "disk1"],
"format_as": "btrfs",
"raid_type": 0
"filesystem": "btrfs",
"raid_level": 0
}
}
}

Просмотреть файл

@ -189,16 +189,16 @@ def _adjust_general_settings(config):
settings.set_batch_shipyard_encryption_enabled(config, False)
def _populate_global_settings(config, remotefs_context):
def _populate_global_settings(config, fs_context):
# type: (dict, bool) -> None
"""Populate global settings from config
:param dict config: configuration dict
:param bool remotefs_context: only initialize storage clients
:param bool fs_context: only initialize storage clients
"""
bs = settings.batch_shipyard_settings(config)
sc = settings.credentials_storage(config, bs.storage_account_settings)
bc = settings.credentials_batch(config)
if remotefs_context:
if fs_context:
rfs = settings.remotefs_settings(config)
postfix = rfs.storage_cluster.id
else:
@ -213,16 +213,16 @@ def _populate_global_settings(config, remotefs_context):
bs.generated_sas_expiry_days)
def _create_clients(config, remotefs_context):
def _create_clients(config, fs_context):
# type: (dict, bool) -> tuple
"""Create authenticated clients
:param dict config: configuration dict
:param bool remotefs_context: only initialize storage clients
:param bool fs_context: only initialize storage clients
:rtype: tuple
:return: (batch client, blob client, queue client, table client)
"""
bc = settings.credentials_batch(config)
if remotefs_context:
if fs_context:
batch_client = None
else:
credentials = batchauth.SharedKeyCredentials(
@ -247,12 +247,12 @@ def create_keyvault_client(ctx, config):
return keyvault.create_client(ctx, kv.aad)
def create_remotefs_clients(ctx, config):
def create_fs_clients(ctx, config):
# type: (CliContext, dict) ->
# Tuple[azure.mgmt.resource.resources.ResourceManagementClient,
# azure.mgmt.compute.ComputeManagementClient,
# azure.mgmt.network.NetworkManagementClient]
"""Create clients needed for remotefs: client, network
"""Create clients needed for fs: resource management, compute, network
:param CliContext ctx: Cli Context
:param dict config: configuration dict
:rtype: tuple
@ -266,18 +266,18 @@ def create_remotefs_clients(ctx, config):
return remotefs.create_clients(ctx, mgmt.aad, subscription_id)
def initialize(config, remotefs_context=False):
def initialize(config, fs_context=False):
# type: (dict, bool) -> tuple
"""Initialize fleet and create authenticated clients
:param dict config: configuration dict
:param bool remotefs_context: only initialize storage clients
:param bool fs_context: only initialize storage clients
:rtype: tuple
:return: (batch client, blob client, queue client, table client)
"""
if not remotefs_context:
if not fs_context:
_adjust_general_settings(config)
_populate_global_settings(config, remotefs_context)
return _create_clients(config, remotefs_context)
_populate_global_settings(config, fs_context)
return _create_clients(config, fs_context)
def fetch_credentials_json_from_keyvault(
@ -1128,10 +1128,10 @@ def _adjust_settings_for_pool_creation(config):
pass
def action_remotefs_disks_add(resource_client, compute_client, config):
def action_fs_disks_add(resource_client, compute_client, config):
# type: (azure.mgmt.resource.resources.ResourceManagementClient,
# azure.mgmt.compute.ComputeManagementClient, dict) -> None
"""Action: Remotefs Disks Add
"""Action: Fs Disks Add
:param azure.mgmt.resource.resources.ResourceManagementClient
resource_client: resource client
:param azure.mgmt.compute.ComputeManagementClient compute_client:
@ -1141,10 +1141,10 @@ def action_remotefs_disks_add(resource_client, compute_client, config):
remotefs.create_managed_disks(resource_client, compute_client, config)
def action_remotefs_disks_del(compute_client, config, name, wait):
def action_fs_disks_del(compute_client, config, name, wait):
# type: (azure.mgmt.compute.ComputeManagementClient, dict, str,
# bool) -> None
"""Action: Remotefs Disks Del
"""Action: Fs Disks Del
:param azure.mgmt.compute.ComputeManagementClient compute_client:
compute client
:param dict config: configuration dict
@ -1154,9 +1154,9 @@ def action_remotefs_disks_del(compute_client, config, name, wait):
remotefs.delete_managed_disks(compute_client, config, name, wait)
def action_remotefs_disks_list(compute_client, config, restrict_scope):
def action_fs_disks_list(compute_client, config, restrict_scope):
# type: (azure.mgmt.compute.ComputeManagementClient, dict, bool) -> None
"""Action: Remotefs Disks List
"""Action: Fs Disks List
:param azure.mgmt.compute.ComputeManagementClient compute_client:
compute client
:param dict config: configuration dict
@ -1165,13 +1165,13 @@ def action_remotefs_disks_list(compute_client, config, restrict_scope):
remotefs.list_disks(compute_client, config, restrict_scope)
def action_remotefs_cluster_add(
def action_fs_cluster_add(
resource_client, compute_client, network_client, blob_client, config):
# type: (azure.mgmt.resource.resources.ResourceManagementClient,
# azure.mgmt.compute.ComputeManagementClient,
# azure.mgmt.network.NetworkManagementClient,
# azure.storage.blob.BlockBlobService, dict) -> None
"""Action: Remotefs Cluster Add
"""Action: Fs Cluster Add
:param azure.mgmt.resource.resources.ResourceManagementClient
resource_client: resource client
:param azure.mgmt.compute.ComputeManagementClient compute_client:
@ -1187,7 +1187,7 @@ def action_remotefs_cluster_add(
_REMOTEFSPREP_FILE[0], [_REMOTEFSPREP_FILE, _REMOTEFSSTAT_FILE])
def action_remotefs_cluster_del(
def action_fs_cluster_del(
resource_client, compute_client, network_client, blob_client, config,
delete_all_resources, delete_data_disks, delete_virtual_network, wait):
# type: (azure.mgmt.resource.resources.ResourceManagementClient,
@ -1195,7 +1195,7 @@ def action_remotefs_cluster_del(
# azure.mgmt.network.NetworkManagementClient,
# azure.storage.blob.BlockBlobService, dict, bool, bool,
# bool, bool) -> None
"""Action: Remotefs Cluster Add
"""Action: Fs Cluster Add
:param azure.mgmt.resource.resources.ResourceManagementClient
resource_client: resource client
:param azure.mgmt.compute.ComputeManagementClient compute_client:
@ -1217,11 +1217,11 @@ def action_remotefs_cluster_del(
storage.delete_storage_containers_remotefs(blob_client, config)
def action_remotefs_cluster_expand(
def action_fs_cluster_expand(
compute_client, network_client, config, rebalance):
# type: (azure.mgmt.compute.ComputeManagementClient,
# azure.mgmt.network.NetworkManagementClient, dict, bool) -> None
"""Action: Remotefs Cluster Expand
"""Action: Fs Cluster Expand
:param azure.mgmt.compute.ComputeManagementClient compute_client:
compute client
:param azure.mgmt.network.NetworkManagementClient network_client:
@ -1232,12 +1232,12 @@ def action_remotefs_cluster_expand(
if remotefs.expand_storage_cluster(
compute_client, network_client, config, _REMOTEFSPREP_FILE[0],
rebalance):
action_remotefs_cluster_status(compute_client, network_client, config)
action_fs_cluster_status(compute_client, network_client, config)
def action_remotefs_cluster_suspend(compute_client, config, wait):
def action_fs_cluster_suspend(compute_client, config, wait):
# type: (azure.mgmt.compute.ComputeManagementClient, dict, bool) -> None
"""Action: Remotefs Cluster Suspend
"""Action: Fs Cluster Suspend
:param azure.mgmt.compute.ComputeManagementClient compute_client:
compute client
:param dict config: configuration dict
@ -1246,11 +1246,11 @@ def action_remotefs_cluster_suspend(compute_client, config, wait):
remotefs.suspend_storage_cluster(compute_client, config, wait)
def action_remotefs_cluster_start(
def action_fs_cluster_start(
compute_client, network_client, config, wait):
# type: (azure.mgmt.compute.ComputeManagementClient,
# azure.mgmt.network.NetworkManagementClient, dict, bool) -> None
"""Action: Remotefs Cluster Start
"""Action: Fs Cluster Start
:param azure.mgmt.compute.ComputeManagementClient compute_client:
compute client
:param azure.mgmt.network.NetworkManagementClient network_client:
@ -1260,13 +1260,13 @@ def action_remotefs_cluster_start(
"""
remotefs.start_storage_cluster(compute_client, config, wait)
if wait:
action_remotefs_cluster_status(compute_client, network_client, config)
action_fs_cluster_status(compute_client, network_client, config)
def action_remotefs_cluster_status(compute_client, network_client, config):
def action_fs_cluster_status(compute_client, network_client, config):
# type: (azure.mgmt.compute.ComputeManagementClient,
# azure.mgmt.network.NetworkManagementClient, dict) -> None
"""Action: Remotefs Cluster Status
"""Action: Fs Cluster Status
:param azure.mgmt.compute.ComputeManagementClient compute_client:
compute client
:param azure.mgmt.network.NetworkManagementClient network_client:
@ -1277,12 +1277,12 @@ def action_remotefs_cluster_status(compute_client, network_client, config):
compute_client, network_client, config, _REMOTEFSSTAT_FILE[0])
def action_remotefs_cluster_ssh(
def action_fs_cluster_ssh(
compute_client, network_client, config, cardinal, hostname):
# type: (azure.mgmt.compute.ComputeManagementClient,
# azure.mgmt.network.NetworkManagementClient, dict, int,
# str) -> None
"""Action: Remotefs Cluster Ssh
"""Action: Fs Cluster Ssh
:param azure.mgmt.compute.ComputeManagementClient compute_client:
compute client
:param azure.mgmt.network.NetworkManagementClient network_client:

Просмотреть файл

@ -647,14 +647,14 @@ def _create_virtual_machine_extension(
'commandToExecute': './{bsf} {f}{m}{n}{p}{r}{s}'.format(
bsf=bootstrap_file,
f=' -f {}'.format(
rfs.storage_cluster.vm_disk_map[offset].format_as),
rfs.storage_cluster.vm_disk_map[offset].filesystem),
m=' -m {}'.format(
rfs.storage_cluster.file_server.mountpoint),
n=' -n' if settings.can_tune_tcp(
rfs.storage_cluster.vm_size) else '',
p=' -p' if premium else '',
r=' -r {}'.format(
rfs.storage_cluster.vm_disk_map[offset].raid_type),
rfs.storage_cluster.vm_disk_map[offset].raid_level),
s=' -s {}'.format(rfs.storage_cluster.file_server.type),
),
'storageAccountName': storage.get_storageaccount(),
@ -1179,13 +1179,6 @@ def delete_storage_cluster(
os_disk_async_ops.extend(delete_managed_disks(
compute_client, config, os_disk, wait=False,
confirm_override=True))
# delete data disks
data_disk_async_ops = []
for key in resources:
data_disks = resources[key]['data_disks']
if len(data_disks) > 0:
data_disk_async_ops.extend(delete_managed_disks(
compute_client, config, data_disks, wait=False))
# delete availability set
deleted = set()
as_async_ops = []
@ -1202,6 +1195,14 @@ def delete_storage_cluster(
nic = resources[key]['nic']
async_ops.append(_delete_network_interface(
network_client, rfs.resource_group, nic))
# delete data disks (delay from vm due to potential in use errors)
data_disk_async_ops = []
for key in resources:
data_disks = resources[key]['data_disks']
if len(data_disks) > 0:
data_disk_async_ops.extend(delete_managed_disks(
compute_client, config, data_disks, wait=False))
# wait for nics to delete
logger.debug('waiting for network interfaces to delete')
for op in async_ops:
op.result()
@ -1325,6 +1326,12 @@ def expand_storage_cluster(
vms = {}
new_disk_count = 0
for i in range(rfs.storage_cluster.vm_count):
# check if this vm filesystem supports expanding
if (rfs.storage_cluster.vm_disk_map[i].filesystem != 'btrfs' and
rfs.storage_cluster.vm_disk_map[i].raid_level == 0):
raise RuntimeError(
'Cannot expand mdadm-based RAID-0 volumes. Please re-create '
'your storage cluster with btrfs using new disks.')
vm_name = '{}-vm{}'.format(rfs.storage_cluster.hostname_prefix, i)
try:
vm = compute_client.virtual_machines.get(
@ -1363,12 +1370,13 @@ def expand_storage_cluster(
new_disk_count += 1
# check for proper raid setting and number of disks
pe_len = len(entry['pe_disks']['names'])
if pe_len <= 1 or rfs.storage_cluster.vm_disk_map[i].raid_type != 0:
if pe_len <= 1 or rfs.storage_cluster.vm_disk_map[i].raid_level != 0:
raise RuntimeError(
'Cannot expand array from {} disk(s) or RAID level {}'.format(
pe_len, rfs.storage_cluster.vm_disk_map[i].raid_type))
pe_len, rfs.storage_cluster.vm_disk_map[i].raid_level))
# add vm to map
vms[i] = entry
# check early return conditions
if len(vms) == 0:
logger.warning(
'no virtual machines to expand in storage cluster {}'.format(
@ -1432,12 +1440,12 @@ def expand_storage_cluster(
a=' -a',
b=' -b' if rebalance else '',
f=' -f {}'.format(
rfs.storage_cluster.vm_disk_map[offset].format_as),
rfs.storage_cluster.vm_disk_map[offset].filesystem),
m=' -m {}'.format(
rfs.storage_cluster.file_server.mountpoint),
p=' -p' if premium else '',
r=' -r {}'.format(
rfs.storage_cluster.vm_disk_map[offset].raid_type),
rfs.storage_cluster.vm_disk_map[offset].raid_level),
s=' -s {}'.format(rfs.storage_cluster.file_server.type),
)
ssh_priv_key, port, username, ip = _get_ssh_info(
@ -1676,7 +1684,7 @@ def stat_storage_cluster(
m=' -m {}'.format(
rfs.storage_cluster.file_server.mountpoint),
r=' -r {}'.format(
rfs.storage_cluster.vm_disk_map[offset].raid_type),
rfs.storage_cluster.vm_disk_map[offset].raid_level),
s=' -s {}'.format(rfs.storage_cluster.file_server.type),
)
cmd = ['ssh', '-o', 'StrictHostKeyChecking=no', '-o',

Просмотреть файл

@ -189,7 +189,7 @@ NetworkSecuritySettings = collections.namedtuple(
)
MappedVmDiskSettings = collections.namedtuple(
'MappedVmDiskSettings', [
'disk_array', 'format_as', 'raid_type'
'disk_array', 'filesystem', 'raid_level'
]
)
StorageClusterSettings = collections.namedtuple(
@ -2260,20 +2260,22 @@ def remotefs_settings(config):
disk_array, vmkey, _disk_set))
if len(disk_array) == 1:
# disable raid
raid_type = -1
raid_level = -1
else:
raid_type = conf[vmkey]['raid_type']
if raid_type == 0 and len(disk_array) < 2:
raid_level = conf[vmkey]['raid_level']
if raid_level == 0 and len(disk_array) < 2:
raise ValueError('RAID-0 arrays require at least two disks')
if raid_type != 0:
raise ValueError('Unsupported RAID level {}'.format(raid_type))
format_as = conf[vmkey]['format_as']
if format_as != 'btrfs' and not format_as.startswith('ext'):
raise ValueError('Unsupported format as type {}'.format(format_as))
if raid_level != 0:
raise ValueError('Unsupported RAID level {}'.format(
raid_level))
filesystem = conf[vmkey]['filesystem']
if filesystem != 'btrfs' and not filesystem.startswith('ext'):
raise ValueError('Unsupported filesystem type {}'.format(
filesystem))
disk_map[int(vmkey)] = MappedVmDiskSettings(
disk_array=disk_array,
format_as=conf[vmkey]['format_as'],
raid_type=raid_type,
filesystem=conf[vmkey]['filesystem'],
raid_level=raid_level,
)
# check disk map against vm_count
if len(disk_map) != sc_vm_count:

Просмотреть файл

@ -8,12 +8,12 @@ DEBIAN_FRONTEND=noninteractive
# vars
attach_disks=0
rebalance=0
format_as=
filesystem=
server_type=
mountpath=
optimize_tcp=0
premium_storage=0
raid_type=-1
raid_level=-1
# begin processing
while getopts "h?abf:m:npr:s:" opt; do
@ -23,11 +23,11 @@ while getopts "h?abf:m:npr:s:" opt; do
echo ""
echo "-a attach mode"
echo "-b rebalance filesystem on resize"
echo "-f [format as] format as"
echo "-f [filesystem] filesystem"
echo "-m [mountpoint] mountpoint"
echo "-n Tune TCP parameters"
echo "-p premium storage disks"
echo "-r [raid type] raid type"
echo "-r [RAID level] RAID level"
echo "-s [server type] server type"
echo ""
exit 1
@ -39,7 +39,7 @@ while getopts "h?abf:m:npr:s:" opt; do
rebalance=1
;;
f)
format_as=${OPTARG,,}
filesystem=${OPTARG,,}
;;
m)
mountpath=$OPTARG
@ -51,7 +51,7 @@ while getopts "h?abf:m:npr:s:" opt; do
premium_storage=1
;;
r)
raid_type=$OPTARG
raid_level=$OPTARG
;;
s)
server_type=${OPTARG,,}
@ -64,11 +64,11 @@ shift $((OPTIND-1))
echo "Parameters:"
echo " Attach mode: $attach_disks"
echo " Rebalance filesystem: $rebalance"
echo " Format as: $format_as"
echo " Filesystem: $filesystem"
echo " Mountpath: $mountpath"
echo " Tune TCP parameters: $optimize_tcp"
echo " Premium storage: $premium_storage"
echo " RAID type: $raid_type"
echo " RAID level: $raid_level"
echo " Server type: $server_type"
# first start prep
@ -164,24 +164,44 @@ fi
# check if disks are already in raid set
raid_resized=0
if [ $raid_type -ge 0 ]; then
if [ $raid_level -ge 0 ]; then
format_target=0
if [ $format_as == "btrfs" ]; then
if [ $raid_type -ne 0 ]; then
md_preexist=0
if [ $filesystem == "btrfs" ]; then
if [ $raid_level -ne 0 ]; then
echo "btrfs with non-RAID 0 is not supported."
exit 1
fi
else
target=/dev/md0
# find any pre-existing targets
set +e
mdadm --detail --scan
if [ $? -eq 0 ]; then
target=($(find /dev/md* -maxdepth 0 -type b))
if [ ${#target[@]} -ne 0 ]; then
target=${target[0]}
md_preexist=1
echo "Existing array found: $target"
# refresh target uuid to md target
read target_uuid < <(blkid ${target} | awk -F "[= ]" '{print $3}' | sed 's/\"//g')
else
echo "No pre-existing md target could be found"
fi
fi
set -e
if [ -z $target ]; then
target=/dev/md0
echo "Setting default target: $target"
fi
fi
declare -a raid_array
declare -a all_raid_disks
set +e
for disk in "${data_disks[@]}"; do
if [ $format_as == "btrfs" ]; then
if [ $filesystem == "btrfs" ]; then
btrfs device scan "${disk}1"
else
mdadm --detail "${disk}1"
mdadm --examine "${disk}1"
fi
if [ $? -ne 0 ]; then
raid_array=("${raid_array[@]}" "${disk}1")
@ -195,42 +215,38 @@ if [ $raid_type -ge 0 ]; then
echo "No disks require RAID setup"
elif [ $no_raid_count -eq $numdisks ]; then
echo "$numdisks data disks require RAID setup: ${raid_array[@]}"
if [ $format_as == "btrfs" ]; then
if [ $raid_type -eq 0 ]; then
if [ $filesystem == "btrfs" ]; then
if [ $raid_level -eq 0 ]; then
mkfs.btrfs -d raid0 ${raid_array[@]}
else
mkfs.btrfs -m raid${raid_type} ${raid_array[@]}
mkfs.btrfs -m raid${raid_level} ${raid_array[@]}
fi
else
set +e
# first check if this is a pre-existing array
mdadm --detail --scan
if [ $? -eq 0 ]; then
target=($(find /dev/md* -maxdepth 0 -type b))
if [ ${#target[@]} -ne 1 ]; then
mdadm_detail=$(mdadm --detail --scan)
if [ -z $mdadm_detail ]; then
set -e
mdadm --create --verbose $target --level=$raid_level --raid-devices=$numdisks ${raid_array[@]}
format_target=1
else
if [ $md_preexist -eq 0 ]; then
echo "Could not determine pre-existing md target"
exit 1
fi
target=${target[0]}
echo "Existing array found: $target"
# refresh target uuid to md target
read target_uuid < <(blkid ${target} | awk -F "[= ]" '{print $3}' | sed 's/\"//g')
else
set -e
mdadm --create --verbose $target --level=$raid_type --raid-devices=$numdisks ${raid_array[@]}
format_target=1
echo "Not creating a new array since pre-exsting md target found: $target"
fi
set -e
fi
else
echo "Mismatch of non-RAID disks $no_raid_count to total disks $numdisks."
echo "Will resize underlying RAID array with devices: ${raid_array[@]}"
if [ $raid_type -ne 0 ]; then
echo "Cannot resize with RAID type of $raid_type."
if [ $raid_level -ne 0 ]; then
echo "Cannot resize with RAID level of $raid_level."
exit 1
fi
if [ $format_as == "btrfs" ]; then
if [ $filesystem == "btrfs" ]; then
# add new block devices first
echo "Adding devices ${raid_array[@]} to $mountpath"
btrfs device add ${raid_array[@]} $mountpath
# resize btrfs volume
echo "Resizing filesystem at $mountpath."
@ -244,14 +260,16 @@ if [ $raid_type -ge 0 ]; then
raid_resized=0
else
# add new block device first
mdadm --add --verbose $target ${raid_array[@]}
echo "Adding devices ${raid_array[@]} to $target"
mdadm --add $target ${raid_array[@]}
# grow the array
mdadm --grow --verbose $target --raid-devices=$numdisks
echo "Growing array $target to a total of $numdisks devices"
mdadm --grow --raid-devices=$numdisks $target
raid_resized=1
fi
fi
# dump diagnostic info
if [ $format_as == "btrfs" ]; then
if [ $filesystem == "btrfs" ]; then
btrfs filesystem show
else
cat /proc/mdstat
@ -270,12 +288,12 @@ if [ $format_target -eq 1 ]; then
exit 1
fi
echo "Creating filesystem on $target."
if [ $format_as == "btrfs" ]; then
if [ $filesystem == "btrfs" ]; then
mkfs.btrfs $target
elif [[ $format_as == ext* ]]; then
mkfs.${format_as} -m 0 $target
elif [[ $filesystem == ext* ]]; then
mkfs.${filesystem} -m 0 $target
else
echo "Unknown format as: $format_as"
echo "Unknown filesystem: $filesystem"
exit 1
fi
# refresh target uuid
@ -311,7 +329,7 @@ if [ $attach_disks -eq 0 ]; then
echo "Adding $target_uuid to mountpoint $mountpath to /etc/fstab"
if [ $premium_storage -eq 1 ]; then
# disable barriers due to RO cache
if [ $format_as == "btrfs" ]; then
if [ $filesystem == "btrfs" ]; then
mo=",nobarrier"
else
mo=",barrier=0"
@ -320,7 +338,7 @@ if [ $attach_disks -eq 0 ]; then
# enable discard to save cost on standard storage
mo=",discard"
fi
echo "UUID=$target_uuid $mountpath $format_as defaults,noatime${mo} 0 2" >> /etc/fstab
echo "UUID=$target_uuid $mountpath $filesystem defaults,noatime${mo} 0 2" >> /etc/fstab
fi
# create mountpath
mkdir -p $mountpath
@ -337,12 +355,12 @@ fi
# grow underlying filesystem if required
if [ $raid_resized -eq 1 ]; then
echo "Resizing filesystem at $mountpath."
if [ $format_as == "btrfs" ]; then
if [ $filesystem == "btrfs" ]; then
btrfs filesystem resize max $mountpath
elif [[ $format_as == ext* ]]; then
elif [[ $filesystem == ext* ]]; then
resize2fs $mountpath
else
echo "Unknown format as: $format_as"
echo "Unknown filesystem: $filesystem"
exit 1
fi
fi

Просмотреть файл

@ -6,7 +6,7 @@ DEBIAN_FRONTEND=noninteractive
# vars
mountpath=
raid_type=-1
raid_level=-1
server_type=
# begin processing
@ -16,7 +16,7 @@ while getopts "h?m:r:s:" opt; do
echo "shipyard_remotefs_stat.sh parameters"
echo ""
echo "-m [mountpoint] mountpoint"
echo "-r [raid type] raid type"
echo "-r [RAID level] RAID level"
echo "-s [server type] server type"
echo ""
exit 1
@ -25,7 +25,7 @@ while getopts "h?m:r:s:" opt; do
mountpath=$OPTARG
;;
r)
raid_type=$OPTARG
raid_level=$OPTARG
;;
s)
server_type=${OPTARG,,}
@ -78,7 +78,7 @@ echo ""
formatted_as=$(echo $mount | cut -d" " -f5)
# get raid status
if [ $raid_type -ge 0 ]; then
if [ $raid_level -ge 0 ]; then
if [ $formatted_as == "btrfs" ]; then
echo "btrfs device status:"
for disk in "${data_disks[@]}"; do

Просмотреть файл

@ -79,19 +79,19 @@ class CliContext(object):
# management options
self.subscription_id = None
def initialize_for_remotefs(self):
def initialize_for_fs(self):
# type: (CliContext) -> None
"""Initialize context for remotefs commands
"""Initialize context for fs commands
:param CliContext self: this
"""
self._read_credentials_config()
self.resource_client, self.compute_client, self.network_client = \
convoy.fleet.create_remotefs_clients(self, self.config)
convoy.fleet.create_fs_clients(self, self.config)
self._cleanup_during_initialize()
self._init_config(
skip_global_config=False, skip_pool_config=True,
skip_remotefs_config=False)
clients = convoy.fleet.initialize(self.config, remotefs_context=True)
skip_fs_config=False)
clients = convoy.fleet.initialize(self.config, fs_context=True)
self._set_clients(*clients)
def initialize_for_storage(self):
@ -113,7 +113,7 @@ class CliContext(object):
self._cleanup_during_initialize()
self._init_config(
skip_global_config=True, skip_pool_config=True,
skip_remotefs_config=True)
skip_fs_config=True)
def initialize_for_batch(self):
# type: (CliContext) -> None
@ -126,7 +126,7 @@ class CliContext(object):
self._cleanup_during_initialize()
self._init_config(
skip_global_config=False, skip_pool_config=False,
skip_remotefs_config=True)
skip_fs_config=True)
clients = convoy.fleet.initialize(self.config)
self._set_clients(*clients)
@ -183,13 +183,13 @@ class CliContext(object):
def _init_config(
self, skip_global_config=False, skip_pool_config=False,
skip_remotefs_config=False):
skip_fs_config=False):
# type: (CliContext, bool, bool, bool) -> None
"""Initializes configuration of the context
:param CliContext self: this
:param bool skip_global_config: skip global config
:param bool skip_pool_config: skip pool config
:param bool skip_remotefs_config: skip remote fs config
:param bool skip_fs_config: skip remote fs config
"""
# use configdir if available
if self.configdir is not None:
@ -204,10 +204,10 @@ class CliContext(object):
self.json_pool = pathlib.Path(self.configdir, 'pool.json')
if self.json_jobs is None:
self.json_jobs = pathlib.Path(self.configdir, 'jobs.json')
if not skip_remotefs_config:
if self.json_remotefs is None:
self.json_remotefs = pathlib.Path(
self.configdir, 'remotefs.json')
if not skip_fs_config:
if self.json_fs is None:
self.json_fs = pathlib.Path(
self.configdir, 'fs.json')
# check for required json files
if (self.json_credentials is not None and
not isinstance(self.json_credentials, pathlib.Path)):
@ -222,11 +222,11 @@ class CliContext(object):
raise ValueError('pool json was not specified')
elif not isinstance(self.json_pool, pathlib.Path):
self.json_pool = pathlib.Path(self.json_pool)
if not skip_remotefs_config:
if self.json_remotefs is None:
raise ValueError('remotefs json was not specified')
elif not isinstance(self.json_remotefs, pathlib.Path):
self.json_remotefs = pathlib.Path(self.json_remotefs)
if not skip_fs_config:
if self.json_fs is None:
raise ValueError('fs json was not specified')
elif not isinstance(self.json_fs, pathlib.Path):
self.json_fs = pathlib.Path(self.json_fs)
# fetch credentials from keyvault, if json file is missing
kvcreds = None
if self.json_credentials is None or not self.json_credentials.exists():
@ -265,8 +265,8 @@ class CliContext(object):
# read rest of config files
if not skip_global_config:
self._read_json_file(self.json_config)
if not skip_remotefs_config:
self._read_json_file(self.json_remotefs)
if not skip_fs_config:
self._read_json_file(self.json_fs)
if not skip_pool_config:
self._read_json_file(self.json_pool)
if self.json_jobs is not None:
@ -552,16 +552,16 @@ def _jobs_option(f):
callback=callback)(f)
def _remotefs_option(f):
def _fs_option(f):
def callback(ctx, param, value):
clictx = ctx.ensure_object(CliContext)
clictx.json_remotefs = value
clictx.json_fs = value
return value
return click.option(
'--remotefs',
'--fs',
expose_value=False,
envvar='SHIPYARD_REMOTEFS_JSON',
help='RemoteFS json config file',
envvar='SHIPYARD_FS_JSON',
help='Filesystem json config file',
callback=callback)(f)
@ -600,10 +600,10 @@ def keyvault_options(f):
return f
def remotefs_options(f):
def fs_options(f):
f = aad_options(f)
f = _azure_management_subscription_id_option(f)
f = _remotefs_option(f)
f = _fs_option(f)
return f
@ -617,26 +617,26 @@ def cli(ctx):
@cli.group()
@pass_cli_context
def remotefs(ctx):
"""Remote Filesystem actions"""
def fs(ctx):
"""Filesystem in Azure actions"""
pass
@remotefs.group()
@fs.group()
@pass_cli_context
def cluster(ctx):
"""Storage cluster actions"""
"""Filesystem storage cluster in Azure actions"""
pass
@cluster.command('add')
@common_options
@remotefs_options
@fs_options
@pass_cli_context
def remotefs_add(ctx):
"""Create a storage cluster for a Remote Filesystem in Azure"""
ctx.initialize_for_remotefs()
convoy.fleet.action_remotefs_cluster_add(
def fs_add(ctx):
"""Create a filesystem storage cluster in Azure"""
ctx.initialize_for_fs()
convoy.fleet.action_fs_cluster_add(
ctx.resource_client, ctx.compute_client, ctx.network_client,
ctx.blob_client, ctx.config)
@ -653,14 +653,14 @@ def remotefs_add(ctx):
@click.option(
'--wait', is_flag=True, help='Wait for deletion to complete')
@common_options
@remotefs_options
@fs_options
@pass_cli_context
def remotefs_del(
def fs_del(
ctx, delete_all_resources, delete_data_disks, delete_virtual_network,
wait):
"""Delete a storage cluster used for a Remote Filesystem in Azure"""
ctx.initialize_for_remotefs()
convoy.fleet.action_remotefs_cluster_del(
"""Delete a filesystem storage cluster in Azure"""
ctx.initialize_for_fs()
convoy.fleet.action_fs_cluster_del(
ctx.resource_client, ctx.compute_client, ctx.network_client,
ctx.blob_client, ctx.config, delete_all_resources, delete_data_disks,
delete_virtual_network, wait)
@ -670,12 +670,12 @@ def remotefs_del(
@click.option(
'--rebalance', is_flag=True, help='Rebalance filesystem, if applicable')
@common_options
@remotefs_options
@fs_options
@pass_cli_context
def remotefs_expand(ctx, rebalance):
"""Expand a storage cluster used for a Remote Filesystem in Azure"""
ctx.initialize_for_remotefs()
convoy.fleet.action_remotefs_cluster_expand(
def fs_expand(ctx, rebalance):
"""Expand a filesystem storage cluster in Azure"""
ctx.initialize_for_fs()
convoy.fleet.action_fs_cluster_expand(
ctx.compute_client, ctx.network_client, ctx.config, rebalance)
@ -683,12 +683,12 @@ def remotefs_expand(ctx, rebalance):
@click.option(
'--wait', is_flag=True, help='Wait for suspension to complete')
@common_options
@remotefs_options
@fs_options
@pass_cli_context
def remotefs_suspend(ctx, wait):
"""Suspend a storage cluster used for a Remote Filesystem in Azure"""
ctx.initialize_for_remotefs()
convoy.fleet.action_remotefs_cluster_suspend(
def fs_suspend(ctx, wait):
"""Suspend a filesystem storage cluster in Azure"""
ctx.initialize_for_fs()
convoy.fleet.action_fs_cluster_suspend(
ctx.compute_client, ctx.config, wait)
@ -696,25 +696,23 @@ def remotefs_suspend(ctx, wait):
@click.option(
'--wait', is_flag=True, help='Wait for restart to complete')
@common_options
@remotefs_options
@fs_options
@pass_cli_context
def remotefs_start(ctx, wait):
"""Starts a previously suspended storage cluster used for a Remote
Filesystem in Azure"""
ctx.initialize_for_remotefs()
convoy.fleet.action_remotefs_cluster_start(
def fs_start(ctx, wait):
"""Starts a previously suspended filesystem storage cluster in Azure"""
ctx.initialize_for_fs()
convoy.fleet.action_fs_cluster_start(
ctx.compute_client, ctx.network_client, ctx.config, wait)
@cluster.command('status')
@common_options
@remotefs_options
@fs_options
@pass_cli_context
def remotefs_status(ctx):
"""Query status of a storage cluster used for a Remote
Filesystem in Azure"""
ctx.initialize_for_remotefs()
convoy.fleet.action_remotefs_cluster_status(
def fs_status(ctx):
"""Query status of a filesystem storage cluster in Azure"""
ctx.initialize_for_fs()
convoy.fleet.action_fs_cluster_status(
ctx.compute_client, ctx.network_client, ctx.config)
@ -726,17 +724,17 @@ def remotefs_status(ctx):
@click.option(
'--hostname', help='Hostname of remote fs vm to connect to')
@common_options
@remotefs_options
@fs_options
@pass_cli_context
def remotefs_ssh(ctx, cardinal, hostname):
"""Interactively login via SSH to a virtual machine in the Remote
Filesystem in Azure"""
ctx.initialize_for_remotefs()
convoy.fleet.action_remotefs_cluster_ssh(
def fs_ssh(ctx, cardinal, hostname):
"""Interactively login via SSH to a filesystem storage cluster virtual
machine in Azure"""
ctx.initialize_for_fs()
convoy.fleet.action_fs_cluster_ssh(
ctx.compute_client, ctx.network_client, ctx.config, cardinal, hostname)
@remotefs.group()
@fs.group()
@pass_cli_context
def disks(ctx):
"""Managed disk actions"""
@ -745,12 +743,12 @@ def disks(ctx):
@disks.command('add')
@common_options
@remotefs_options
@fs_options
@pass_cli_context
def remotefs_disks_add(ctx):
"""Create managed disks for Remote Filesystem in Azure"""
ctx.initialize_for_remotefs()
convoy.fleet.action_remotefs_disks_add(
def fs_disks_add(ctx):
"""Create managed disks in Azure"""
ctx.initialize_for_fs()
convoy.fleet.action_fs_disks_add(
ctx.resource_client, ctx.compute_client, ctx.config)
@ -760,12 +758,12 @@ def remotefs_disks_add(ctx):
@click.option(
'--wait', is_flag=True, help='Wait for disk deletion to complete')
@common_options
@remotefs_options
@fs_options
@pass_cli_context
def remotefs_disks_del(ctx, name, wait):
def fs_disks_del(ctx, name, wait):
"""Delete managed disks in Azure"""
ctx.initialize_for_remotefs()
convoy.fleet.action_remotefs_disks_del(
ctx.initialize_for_fs()
convoy.fleet.action_fs_disks_del(
ctx.compute_client, ctx.config, name, wait)
@ -774,12 +772,12 @@ def remotefs_disks_del(ctx, name, wait):
'--restrict-scope', is_flag=True,
help='List disks present only in configuration if they exist')
@common_options
@remotefs_options
@fs_options
@pass_cli_context
def remotefs_disks_list(ctx, restrict_scope):
def fs_disks_list(ctx, restrict_scope):
"""List managed disks in resource group"""
ctx.initialize_for_remotefs()
convoy.fleet.action_remotefs_disks_list(
ctx.initialize_for_fs()
convoy.fleet.action_fs_disks_list(
ctx.compute_client, ctx.config, restrict_scope)