- Fix issues with ext4 + mdadm
This commit is contained in:
Fred Park 2017-03-06 17:53:57 -08:00
Родитель f8e3fa52ed
Коммит a6a672a82e
7 изменённых файлов: 417 добавлений и 151 удалений

Просмотреть файл

@ -4,6 +4,7 @@
### Changed
- All dependencies updated to latest versions
- Update Batch API call compatibility for `azure-batch 2.0.0`
- Precompile python files for Docker images
## [2.5.4] - 2017-03-08
### Changed

Просмотреть файл

@ -1243,6 +1243,24 @@ def action_remotefs_cluster_del(
storage.delete_storage_containers_remotefs(blob_client, config)
def action_remotefs_cluster_expand(
compute_client, network_client, config, rebalance):
# type: (azure.mgmt.compute.ComputeManagementClient,
# azure.mgmt.network.NetworkManagementClient, dict, bool) -> None
"""Action: Remotefs Cluster Expand
:param azure.mgmt.compute.ComputeManagementClient compute_client:
compute client
:param azure.mgmt.network.NetworkManagementClient network_client:
network client
:param dict config: configuration dict
:param bool rebalance: rebalance filesystem
"""
if remotefs.expand_storage_cluster(
compute_client, network_client, config, _REMOTEFSPREP_FILE[0],
rebalance):
action_remotefs_cluster_status(compute_client, network_client, config)
def action_remotefs_cluster_suspend(compute_client, config, wait):
# type: (azure.mgmt.compute.ComputeManagementClient, dict, bool) -> None
"""Action: Remotefs Cluster Suspend
@ -1268,8 +1286,7 @@ def action_remotefs_cluster_start(
"""
remotefs.start_storage_cluster(compute_client, config, wait)
if wait:
remotefs.stat_storage_cluster(
compute_client, network_client, config, _REMOTEFSSTAT_FILE[0])
action_remotefs_cluster_status(compute_client, network_client, config)
def action_remotefs_cluster_status(compute_client, network_client, config):

Просмотреть файл

@ -784,11 +784,8 @@ def _create_virtual_machine_extension(
'fileUris': blob_urls,
},
protected_settings={
'commandToExecute': './{bsf} {b}{d}{f}{m}{n}{p}{r}{s}'.format(
'commandToExecute': './{bsf} {f}{m}{n}{p}{r}{s}'.format(
bsf=bootstrap_file,
b=' -b', # always allow rebalance on btrfs (for now)
d=' -d {}'.format(len(
rfs.storage_cluster.vm_disk_map[offset].disk_array)),
f=' -f {}'.format(
rfs.storage_cluster.vm_disk_map[offset].format_as),
m=' -m {}'.format(
@ -860,6 +857,13 @@ def create_storage_cluster(
# check if cluster already exists
logger.debug('checking if storage cluster {} exists'.format(
rfs.storage_cluster.id))
# construct disk map
disk_map = {}
disk_ids = list_disks(compute_client, config, restrict_scope=True)
for disk_id, sat in disk_ids:
disk_map[disk_id.split('/')[-1]] = (disk_id, sat)
del disk_ids
# check vms
for i in range(rfs.storage_cluster.vm_count):
vm_name = '{}-vm{}'.format(rfs.storage_cluster.hostname_prefix, i)
try:
@ -868,25 +872,27 @@ def create_storage_cluster(
vm_name=vm_name,
)
raise RuntimeError(
'existing virtual machine {} found, cannot add this '
'Existing virtual machine {} found, cannot add this '
'storage cluster'.format(vm.id))
except msrestazure.azure_exceptions.CloudError as e:
if e.status_code == 404:
pass
else:
raise
# check if all referenced managed disks exist
disk_ids = list_disks(compute_client, config, restrict_scope=True)
diskname_map = {}
for disk_id, sat in disk_ids:
diskname_map[disk_id.split('/')[-1]] = (disk_id, sat)
for key in rfs.storage_cluster.vm_disk_map:
for disk in rfs.storage_cluster.vm_disk_map[key].disk_array:
if disk not in diskname_map:
# check if all referenced managed disks exist and premium sku
# is specified if premium disk
for disk in rfs.storage_cluster.vm_disk_map[i].disk_array:
if disk not in disk_map:
raise RuntimeError(
'referenced managed disk {} unavailable in set {}'.format(
disk, diskname_map))
del disk_ids
('Referenced managed disk {} unavailable in set {} for '
'vm offset {}').format(disk, disk_map, i))
if (disk_map[disk][1] ==
computemodels.StorageAccountTypes.premium_lrs and
not rfs.storage_cluster.vm_size.lower().endswith('s')):
raise RuntimeError(
('Premium storage requires a DS, DS_V2, FS, GS or LS '
'series vm_size instead of {}'.format(
rfs.storage_cluster.vm_size)))
# create nsg
nsg_async_op = _create_network_security_group(network_client, rfs)
# create availability set if vm_count > 1
@ -944,7 +950,7 @@ def create_storage_cluster(
# create vms
for i in range(rfs.storage_cluster.vm_count):
async_ops.append(_create_virtual_machine(
compute_client, rfs, availset, nics, diskname_map, i))
compute_client, rfs, availset, nics, disk_map, i))
logger.debug('waiting for virtual machines to be created')
vm_ext_async_ops = {}
vms = {}
@ -953,7 +959,7 @@ def create_storage_cluster(
# install vm extension
vm_ext_async_ops[offset] = _create_virtual_machine_extension(
compute_client, rfs, bootstrap_file, blob_urls,
vm.name, diskname_map, offset)
vm.name, disk_map, offset)
# cache vm
vms[offset] = vm
async_ops.clear()
@ -1242,6 +1248,10 @@ def delete_storage_cluster(
logger.info('resource group {} deleted'.format(
rfs.resource_group))
return
if not util.confirm_action(
config, 'delete storage cluster {}'.format(
rfs.storage_cluster.id)):
return
# get vms and cache for concurent async ops
resources = {}
for i in range(rfs.storage_cluster.vm_count):
@ -1258,9 +1268,6 @@ def delete_storage_cluster(
else:
raise
else:
if not util.confirm_action(
config, 'delete virtual machine {}'.format(vm.name)):
continue
# get resources connected to vm
nic, pip, subnet, vnet, nsg, slb = \
_get_resource_names_from_virtual_machine(
@ -1422,6 +1429,187 @@ def delete_storage_cluster(
data_disk_async_ops.clear()
def expand_storage_cluster(
compute_client, network_client, config, bootstrap_file,
rebalance=False):
# type: (azure.mgmt.compute.ComputeManagementClient,
# azure.mgmt.network.NetworkManagementClient, dict, str,
# bool) -> bool
"""Expand a storage cluster
:param azure.mgmt.compute.ComputeManagementClient compute_client:
compute client
:param azure.mgmt.network.NetworkManagementClient network_client:
network client
:param dict config: configuration dict
:param str bootstrap_file: bootstrap file
:param bool rebalance: rebalance filesystem
:rtype: bool
:return: if cluster was expanded
"""
# retrieve remotefs settings
rfs = settings.remotefs_settings(config)
if not util.confirm_action(
config, 'expand storage cluster {}'.format(
rfs.storage_cluster.id)):
return False
# check if cluster exists
logger.debug('checking if storage cluster {} exists'.format(
rfs.storage_cluster.id))
# construct disk map
disk_map = {}
disk_ids = list_disks(compute_client, config, restrict_scope=True)
for disk_id, sat in disk_ids:
disk_map[disk_id.split('/')[-1]] = (disk_id, sat)
del disk_ids
# check vms
vms = {}
new_disk_count = 0
for i in range(rfs.storage_cluster.vm_count):
vm_name = '{}-vm{}'.format(rfs.storage_cluster.hostname_prefix, i)
try:
vm = compute_client.virtual_machines.get(
resource_group_name=rfs.resource_group,
vm_name=vm_name,
)
except msrestazure.azure_exceptions.CloudError as e:
if e.status_code == 404:
raise RuntimeError(
'Virtual machine {} not found, cannot expand this '
'storage cluster'.format(vm_name))
else:
raise
# create entry
entry = {
'vm': vm,
'pe_disks': {
'names': set(),
'luns': [],
},
'new_disks': [],
}
# get attached disks
for dd in vm.storage_profile.data_disks:
entry['pe_disks']['names'].add(dd.name)
entry['pe_disks']['luns'].append(dd.lun)
# check if all referenced managed disks exist
for disk in rfs.storage_cluster.vm_disk_map[i].disk_array:
if disk not in disk_map:
raise RuntimeError(
('Referenced managed disk {} unavailable in set {} for '
'vm offset {}. Ensure that this disk has been '
'provisioned first.').format(disk, disk_map, i))
if disk not in entry['pe_disks']['names']:
entry['new_disks'].append(disk)
new_disk_count += 1
# check for proper raid setting and number of disks
pe_len = len(entry['pe_disks']['names'])
if pe_len <= 1 or rfs.storage_cluster.vm_disk_map[i].raid_type != 0:
raise RuntimeError(
'Cannot expand array from {} disk(s) or RAID level {}'.format(
pe_len, rfs.storage_cluster.vm_disk_map[i].raid_type))
# add vm to map
vms[i] = entry
if len(vms) == 0:
logger.warning(
'no virtual machines to expand in storage cluster {}'.format(
rfs.storage_cluster.id))
return False
if settings.verbose(config):
logger.debug('expand settings:{}{}'.format(os.linesep, vms))
if new_disk_count == 0:
logger.error(
'no new disks detected for storage cluster {}'.format(
rfs.storage_cluster.id))
return False
# attach new data disks to each vm
async_ops = []
for key in vms:
entry = vms[key]
if len(entry['new_disks']) == 0:
logger.debug('no new disks to attach to virtual machine {}'.format(
vm.id))
continue
vm = entry['vm']
premium = False
# sort lun array and get last element
lun = sorted(entry['pe_disks']['luns'])[-1] + 1
for diskname in entry['new_disks']:
if (disk_map[diskname][1] ==
computemodels.StorageAccountTypes.premium_lrs):
premium = True
vm.storage_profile.data_disks.append(
computemodels.DataDisk(
lun=lun,
name=diskname,
create_option=computemodels.DiskCreateOption.attach,
managed_disk=computemodels.ManagedDiskParameters(
id=disk_map[diskname][0],
),
)
)
lun += 1
logger.info(
'attaching {} additional data disks to virtual machine {}'.format(
len(entry['new_disks']), vm.id))
# update vm
async_ops.append(
(key, premium, compute_client.virtual_machines.create_or_update(
resource_group_name=rfs.resource_group,
vm_name=vm.name,
parameters=vm)))
# wait for async ops to complete
if len(async_ops) == 0:
logger.error('no operations started for expansion')
return False
logger.debug('waiting for disks to attach to virtual machines')
for offset, premium, op in async_ops:
vm = op.result()
vms[offset]['vm'] = vm
# execute bootstrap script via ssh
script_cmd = \
'/opt/batch-shipyard/{bsf} {a}{b}{f}{m}{p}{r}{s}'.format(
bsf=bootstrap_file,
a=' -a',
b=' -b' if rebalance else '',
f=' -f {}'.format(
rfs.storage_cluster.vm_disk_map[offset].format_as),
m=' -m {}'.format(
rfs.storage_cluster.file_server.mountpoint),
p=' -p' if premium else '',
r=' -r {}'.format(
rfs.storage_cluster.vm_disk_map[offset].raid_type),
s=' -s {}'.format(rfs.storage_cluster.file_server.type),
)
ssh_priv_key, port, username, ip = _get_ssh_info(
compute_client, network_client, config, None, vm.name)
cmd = ['ssh', '-o', 'StrictHostKeyChecking=no', '-o',
'UserKnownHostsFile=/dev/null', '-i', str(ssh_priv_key),
'-p', str(port), '{}@{}'.format(username, ip),
'sudo']
cmd.extend(script_cmd.split())
proc = util.subprocess_nowait_pipe_stdout(cmd)
stdout = proc.communicate()[0]
if stdout is not None:
stdout = stdout.decode('utf8')
if util.on_windows():
stdout = stdout.replace('\n', os.linesep)
vms[offset]['status'] = proc.returncode
vms[offset]['stdout'] = '>>stdout>> {}:{}{}'.format(
vm.name, os.linesep, stdout)
logger.info('disk attach operations completed')
for key in vms:
entry = vms[key]
vm = entry['vm']
log = 'bootstrap exit code for virtual machine {}: {}'.format(
vm.name, entry['status'])
if entry['status'] == 0:
logger.info(log)
else:
logger.error(log)
logger.error(entry['stdout'])
return True
def _deallocate_virtual_machine(compute_client, rg_name, vm_name):
# type: (azure.mgmt.compute.ComputeManagementClient, str, str) ->
# msrestazure.azure_operation.AzureOperationPoller
@ -1639,9 +1827,10 @@ def stat_storage_cluster(
proc = util.subprocess_nowait_pipe_stdout(cmd)
stdout = proc.communicate()[0]
if proc.returncode == 0:
stdout = stdout.decode('utf8')
if util.on_windows():
stdout = stdout.replace('\n', os.linesep)
if stdout is not None:
stdout = stdout.decode('utf8')
if util.on_windows():
stdout = stdout.replace('\n', os.linesep)
fsstatus.append('>> File Server Status for {}:{}{}'.format(
vm.name, os.linesep, stdout))
else:

Просмотреть файл

@ -2309,7 +2309,7 @@ def remotefs_settings(config):
if raid_type == 0 and len(disk_array) < 2:
raise ValueError('RAID-0 arrays require at least two disks')
if raid_type != 0:
raise ValueError('Unsupported raid type {}'.format(raid_type))
raise ValueError('Unsupported RAID level {}'.format(raid_type))
format_as = conf[vmkey]['format_as']
if format_as != 'btrfs' and not format_as.startswith('ext'):
raise ValueError('Unsupported format as type {}'.format(format_as))
@ -2318,6 +2318,12 @@ def remotefs_settings(config):
format_as=conf[vmkey]['format_as'],
raid_type=raid_type,
)
# check disk map against vm_count
if len(disk_map) != sc_vm_count:
raise ValueError(
('Number of entries in storage_cluster:vm_disk_map {} '
'inconsistent with storage_cluster:vm_count {}').format(
len(disk_map), sc_vm_count))
return RemoteFsSettings(
resource_group=resource_group,
location=location,

Просмотреть файл

@ -5,13 +5,9 @@ set -o pipefail
DEBIAN_FRONTEND=noninteractive
# always copy scripts to well known location
mkdir -p /opt/batch-shipyard
cp shipyard_remotefs_*.sh /opt/batch-shipyard
# vars
rebalance_btrfs=0
numdisks_verify=
attach_disks=0
rebalance=0
format_as=
server_type=
mountpath=
@ -20,13 +16,13 @@ premium_storage=0
raid_type=-1
# begin processing
while getopts "h?bd:f:m:npr:s:" opt; do
while getopts "h?abf:m:npr:s:" opt; do
case "$opt" in
h|\?)
echo "shipyard_remotefs_bootstrap.sh parameters"
echo ""
echo "-b rebalance btrfs data on resize"
echo "-d [num] num disks"
echo "-a attach mode"
echo "-b rebalance filesystem on resize"
echo "-f [format as] format as"
echo "-m [mountpoint] mountpoint"
echo "-n Tune TCP parameters"
@ -36,11 +32,11 @@ while getopts "h?bd:f:m:npr:s:" opt; do
echo ""
exit 1
;;
b)
rebalance_btrfs=1
a)
attach_disks=1
;;
d)
numdisks_verify=$OPTARG
b)
rebalance=1
;;
f)
format_as=${OPTARG,,}
@ -66,8 +62,8 @@ shift $((OPTIND-1))
[ "$1" = "--" ] && shift
echo "Parameters:"
echo " Rebalance btrfs: $rebalance_btrfs"
echo " Num data disks: $numdisks_verify"
echo " Attach mode: $attach_disks"
echo " Rebalance filesystem: $rebalance"
echo " Format as: $format_as"
echo " Mountpath: $mountpath"
echo " Tune TCP parameters: $optimize_tcp"
@ -75,10 +71,15 @@ echo " Premium storage: $premium_storage"
echo " RAID type: $raid_type"
echo " Server type: $server_type"
# optimize network TCP settings
if [ $optimize_tcp -eq 1 ]; then
sysctlfile=/etc/sysctl.d/60-azure-batch-shipyard-remotefs.conf
if [ ! -e $sysctlfile ] || [ ! -s $sysctlfile ]; then
# first start prep
if [ $attach_disks -eq 0 ]; then
# always copy scripts to well known location
mkdir -p /opt/batch-shipyard
cp shipyard_remotefs_*.sh /opt/batch-shipyard
# optimize network TCP settings
if [ $optimize_tcp -eq 1 ]; then
sysctlfile=/etc/sysctl.d/60-azure-batch-shipyard-remotefs.conf
if [ ! -e $sysctlfile ] || [ ! -s $sysctlfile ]; then
cat > $sysctlfile << EOF
net.core.rmem_default=16777216
net.core.wmem_default=16777216
@ -94,20 +95,32 @@ net.ipv4.tcp_tw_reuse=1
net.ipv4.tcp_abort_on_overflow=1
net.ipv4.route.flush=1
EOF
fi
# reload settings
service procps reload
fi
# install required server_type software
apt-get update
if [ $server_type == "nfs" ]; then
apt-get install -y --no-install-recommends nfs-kernel-server
# patch buggy nfs-mountd.service unit file
# https://bugs.launchpad.net/ubuntu/+source/nfs-utils/+bug/1590799
set +e
grep "^After=network.target local-fs.target" /lib/systemd/system/nfs-mountd.service
if [ $? -eq 0 ]; then
set -e
sed -i -e "s/^After=network.target local-fs.target/After=rpcbind.target/g" /lib/systemd/system/nfs-mountd.service
fi
set -e
# reload unit files
systemctl daemon-reload
# enable and restart nfs server
systemctl enable nfs-kernel-server.service
systemctl restart nfs-kernel-server.service
else
echo "server_type $server_type not supported."
exit 1
fi
# reload settings
service procps reload
fi
# install required server_type software
apt-get update
if [ $server_type == "nfs" ]; then
apt-get install -y --no-install-recommends nfs-kernel-server
systemctl enable nfs-kernel-server.service
systemctl start nfs-kernel-server.service
else
echo "server_type $server_type not supported."
exit 1
fi
# get all data disks
@ -120,22 +133,15 @@ for disk in "${all_disks[@]}"; do
fi
done
unset all_disks
# validate number of data disks found
numdisks=${#data_disks[@]}
echo "found $numdisks data disks: ${data_disks[@]}"
if [ $numdisks -ne $numdisks_verify ]; then
echo "anticipated data disk count of $numdisks_verify does not match $numdisks disks found!"
exit 1
fi
unset numdisks_verify
# check if data disks are already partitioned
declare -a skipped_part
for disk in "${data_disks[@]}"; do
part1=$(partprobe -d -s $disk | cut -d' ' -f4)
if [ -z $part1 ]; then
echo "$disk: partition 1 not found. Partitioning $disk..."
echo "$disk: partition 1 not found. Partitioning $disk."
echo -e "n\np\n1\n\n\nw" | fdisk $disk
else
echo "$disk: partition 1 found. Skipping partitioning."
@ -184,6 +190,7 @@ if [ $raid_type -ge 0 ]; then
done
set -e
no_raid_count=${#raid_array[@]}
# take action depending upon no raid count
if [ $no_raid_count -eq 0 ]; then
echo "No disks require RAID setup"
elif [ $no_raid_count -eq $numdisks ]; then
@ -195,7 +202,25 @@ if [ $raid_type -ge 0 ]; then
mkfs.btrfs -m raid${raid_type} ${raid_array[@]}
fi
else
mdadm --create --verbose $target --level=$raid_type --raid-devices=$numdisks ${raid_array[@]}
set +e
# first check if this is a pre-existing array
mdadm --detail --scan
if [ $? -eq 0 ]; then
target=($(find /dev/md* -maxdepth 0 -type b))
if [ ${#target[@]} -ne 1 ]; then
echo "Could not determine pre-existing md target"
exit 1
fi
target=${target[0]}
echo "Existing array found: $target"
# refresh target uuid to md target
read target_uuid < <(blkid ${target} | awk -F "[= ]" '{print $3}' | sed 's/\"//g')
else
set -e
mdadm --create --verbose $target --level=$raid_type --raid-devices=$numdisks ${raid_array[@]}
format_target=1
fi
set -e
fi
else
echo "Mismatch of non-RAID disks $no_raid_count to total disks $numdisks."
@ -208,11 +233,11 @@ if [ $raid_type -ge 0 ]; then
# add new block devices first
btrfs device add ${raid_array[@]} $mountpath
# resize btrfs volume
echo "Resizing filesystem at $mountpath..."
echo "Resizing filesystem at $mountpath."
btrfs filesystem resize max $mountpath
# rebalance data and metadata across all devices
if [ $rebalance_btrfs -eq 1]; then
echo "Rebalancing btrfs on $mountpath..."
if [ $rebalance -eq 1 ]; then
echo "Rebalancing btrfs on $mountpath."
btrfs filesystem balance $mountpath
echo "Rebalance of btrfs on $mountpath complete."
fi
@ -225,14 +250,17 @@ if [ $raid_type -ge 0 ]; then
raid_resized=1
fi
fi
# dump diagnostic info
if [ $format_as == "btrfs" ]; then
read target_uuid < <(blkid ${all_raid_disks[0]} | awk -F "[= ]" '{print $3}' | sed 's/\"//g')
btrfs filesystem show
else
read target_uuid < <(blkid ${target} | awk -F "[= ]" '{print $3}' | sed 's/\"//g')
cat /proc/mdstat
mdadm --detail $target
fi
# get uuid of first disk as target uuid if not populated
if [ -z $target_uuid ]; then
read target_uuid < <(blkid ${all_raid_disks[0]} | awk -F "[= ]" '{print $3}' | sed 's/\"//g')
fi
fi
# create filesystem on target device
@ -241,71 +269,77 @@ if [ $format_target -eq 1 ]; then
echo "Target not specified for format"
exit 1
fi
echo "Creating filesystem on $target..."
echo "Creating filesystem on $target."
if [ $format_as == "btrfs" ]; then
mkfs.btrfs $target
elif [ $format_as == ext* ]; then
elif [[ $format_as == ext* ]]; then
mkfs.${format_as} -m 0 $target
else
echo "Unknown format as: $format_as"
exit 1
fi
# refresh target uuid
read target_uuid < <(blkid ${target} | awk -F "[= ]" '{print $3}' | sed 's/\"//g')
fi
# check if filesystem is mounted (active array)
mounted=0
set +e
mountpoint -q $mountpath
if [ $? -eq 0 ]; then
mounted=1
fi
set -e
# add fstab entry and mount
if [ $mounted -eq 0 ]; then
if [ -z $target_uuid ]; then
echo "Target UUID not populated!"
exit 1
fi
# check if fstab entry exists
add_fstab=0
# mount filesystem
if [ $attach_disks -eq 0 ]; then
# check if filesystem is mounted (active array)
mounted=0
set +e
grep "^UUID=${target_uuid}" /etc/fstab
if [ $? -ne 0 ]; then
add_fstab=1
mountpoint -q $mountpath
if [ $? -eq 0 ]; then
mounted=1
fi
set -e
# add fstab entry
if [ $add_fstab -eq 1 ]; then
echo "Adding $target_uuid to mountpoint $mountpath to /etc/fstab"
if [ $premium_storage -eq 1 ]; then
# disable barriers due to RO cache
if [ $format_as == "btrfs" ]; then
mo=",nobarrier"
else
mo=",barrier=0"
fi
else
# enable discard to save cost on standard storage
mo=",discard"
# add fstab entry and mount
if [ $mounted -eq 0 ]; then
if [ -z $target_uuid ]; then
echo "Target UUID not populated!"
exit 1
fi
echo "UUID=$target_uuid $mountpath $format_as defaults,noatime${mo} 0 2" >> /etc/fstab
# check if fstab entry exists
add_fstab=0
set +e
grep "^UUID=${target_uuid}" /etc/fstab
if [ $? -ne 0 ]; then
add_fstab=1
fi
set -e
# add fstab entry
if [ $add_fstab -eq 1 ]; then
echo "Adding $target_uuid to mountpoint $mountpath to /etc/fstab"
if [ $premium_storage -eq 1 ]; then
# disable barriers due to RO cache
if [ $format_as == "btrfs" ]; then
mo=",nobarrier"
else
mo=",barrier=0"
fi
else
# enable discard to save cost on standard storage
mo=",discard"
fi
echo "UUID=$target_uuid $mountpath $format_as defaults,noatime${mo} 0 2" >> /etc/fstab
fi
# create mountpath
mkdir -p $mountpath
# mount
mount $mountpath
# ensure proper permissions
chmod 1777 $mountpath
fi
# create mountpath
mkdir -p $mountpath
# mount
mount $mountpath
# ensure proper permissions
chmod 1777 $mountpath
# log mount
mount | grep $mountpath
fi
# grow underlying filesystem if required
if [ $raid_resized -eq 1 ]; then
echo "Resizing filesystem at $mountpath..."
echo "Resizing filesystem at $mountpath."
if [ $format_as == "btrfs" ]; then
btrfs filesystem resize max $mountpath
elif [ $format_as == ext* ]; then
elif [[ $format_as == ext* ]]; then
resize2fs $mountpath
else
echo "Unknown format as: $format_as"
@ -313,33 +347,32 @@ if [ $raid_resized -eq 1 ]; then
fi
fi
# log mount
mount | grep $mountpath
# set up server_type software
if [ $server_type == "nfs" ]; then
# edit /etc/exports
add_exports=0
set +e
grep "^${mountpath}" /etc/exports
if [ $? -ne 0 ]; then
add_exports=1
fi
if [ $add_exports -eq 1 ]; then
# note that the * address/hostname allow is ok since we block nfs
# inbound traffic at the network security group except for allowed
# ip addresses as specified in the remotefs.json file
echo "${mountpath} *(rw,sync,root_squash,no_subtree_check,mountpoint=${mountpath})" >> /etc/exports
systemctl reload nfs-kernel-server.service
fi
systemctl status nfs-kernel-server.service
if [ $? -ne 0 ]; then
if [ $attach_disks -eq 0 ]; then
if [ $server_type == "nfs" ]; then
# edit /etc/exports
add_exports=0
set +e
grep "^${mountpath}" /etc/exports
if [ $? -ne 0 ]; then
add_exports=1
fi
if [ $add_exports -eq 1 ]; then
# note that the * address/hostname allow is ok since we block nfs
# inbound traffic at the network security group except for allowed
# ip addresses as specified in the remotefs.json file
echo "${mountpath} *(rw,sync,root_squash,no_subtree_check,mountpoint=${mountpath})" >> /etc/exports
systemctl reload nfs-kernel-server.service
fi
systemctl status nfs-kernel-server.service
if [ $? -ne 0 ]; then
set -e
# attempt to start
systemctl restart nfs-kernel-server.service
fi
set -e
# attempt to start
systemctl restart nfs-kernel-server.service
else
echo "server_type $server_type not supported."
exit 1
fi
set -e
else
echo "server_type $server_type not supported."
exit 1
fi

Просмотреть файл

@ -92,7 +92,14 @@ if [ $raid_type -ge 0 ]; then
echo "/proc/mdstat:"
cat /proc/mdstat
echo ""
# find md target
target=($(find /dev/md* -maxdepth 0 -type b))
if [ ${#target[@]} -ne 1 ]; then
echo "Could not determine md target"
exit 1
fi
target=${target[0]}
echo "mdadm detail:"
mdadm --detail /dev/md0
mdadm --detail $target
fi
fi

Просмотреть файл

@ -53,7 +53,7 @@ class CliContext(object):
"""CliContext class: holds context for CLI commands"""
def __init__(self):
"""Ctor for CliContext"""
self.output_config = False
self.show_config = False
self.verbose = False
self.yes = False
self.config = None
@ -275,7 +275,7 @@ class CliContext(object):
# set internal config kv pairs
self.config['_verbose'] = self.verbose
self.config['_auto_confirm'] = self.yes
if self.output_config:
if self.show_config:
logger.debug('config:\n' + json.dumps(self.config, indent=4))
# free mem
del self.json_credentials
@ -313,16 +313,16 @@ def _confirm_option(f):
callback=callback)(f)
def _output_config_option(f):
def _show_config_option(f):
def callback(ctx, param, value):
clictx = ctx.ensure_object(CliContext)
clictx.output_config = value
clictx.show_config = value
return value
return click.option(
'--output-config',
'--show-config',
expose_value=False,
is_flag=True,
help='Output configuration',
help='Show configuration',
callback=callback)(f)
@ -555,7 +555,7 @@ def common_options(f):
f = _credentials_option(f)
f = _configdir_option(f)
f = _verbose_option(f)
f = _output_config_option(f)
f = _show_config_option(f)
f = _confirm_option(f)
return f
@ -647,6 +647,19 @@ def remotefs_del(
delete_virtual_network, wait)
@cluster.command('expand')
@click.option(
'--rebalance', is_flag=True, help='Rebalance filesystem, if applicable')
@common_options
@remotefs_options
@pass_cli_context
def remotefs_expand(ctx, rebalance):
"""Expand a storage cluster used for a Remote Filesystem in Azure"""
ctx.initialize_for_remotefs()
convoy.fleet.action_remotefs_cluster_expand(
ctx.compute_client, ctx.network_client, ctx.config, rebalance)
@cluster.command('suspend')
@click.option(
'--wait', is_flag=True, help='Wait for suspension to complete')