Modify glusterfs race fix with iptables

- Restrict smb account password from containing certain characters due
  to echo reinterpret issues
- Fix some more ssh/pathlib issues
This commit is contained in:
Fred Park 2017-04-14 14:56:35 -07:00
Родитель 469e5cb56f
Коммит 7b99cf0b85
4 изменённых файлов: 42 добавлений и 31 удалений

Просмотреть файл

@ -913,14 +913,14 @@ def ingress_data(
'valid SSH user')
# try to get valid ssh private key (from various config blocks)
ssh_private_key = dest.data_transfer.ssh_private_key
if ssh_private_key is None or not ssh_private_key.exists():
if ssh_private_key is None:
ssh_private_key = pool.ssh.ssh_private_key
if ssh_private_key is None or not ssh_private_key.exists():
if ssh_private_key is None:
ssh_private_key = pathlib.Path(crypto.get_ssh_key_prefix())
if ssh_private_key is None or not ssh_private_key.exists():
raise RuntimeError(
'ssh private key is invalid or does not exist: {}'.format(
ssh_private_key))
if not ssh_private_key.exists():
raise RuntimeError(
'specified SSH private key is invalid or does not '
'exist')
logger.debug('using ssh_private_key from: {}'.format(
ssh_private_key))
if (dest.data_transfer.method == 'scp' or

Просмотреть файл

@ -843,12 +843,11 @@ def create_storage_cluster(
key_data = rfs.storage_cluster.ssh.ssh_public_key_data
else:
# create universal ssh key for all vms if not specified
if util.is_none_or_empty(rfs.storage_cluster.ssh.ssh_public_key):
ssh_pub_key = rfs.storage_cluster.ssh.ssh_public_key
if ssh_pub_key is None:
_, ssh_pub_key = crypto.generate_ssh_keypair(
rfs.storage_cluster.ssh.generated_file_export_path,
crypto.get_remotefs_ssh_key_prefix())
else:
ssh_pub_key = rfs.storage_cluster.ssh.ssh_public_key
# read public key data
with ssh_pub_key.open('rb') as fd:
key_data = fd.read().decode('utf8')
@ -1094,7 +1093,8 @@ def resize_storage_cluster(
key_data = rfs.storage_cluster.ssh.ssh_public_key_data
else:
# create universal ssh key for all vms if not specified
if util.is_none_or_empty(rfs.storage_cluster.ssh.ssh_public_key):
ssh_pub_key = rfs.storage_cluster.ssh.ssh_public_key
if ssh_pub_key is None:
# check if ssh key exists first in default location
ssh_pub_key = pathlib.Path(
rfs.storage_cluster.ssh.generated_file_export_path,
@ -1103,10 +1103,9 @@ def resize_storage_cluster(
_, ssh_pub_key = crypto.generate_ssh_keypair(
rfs.storage_cluster.ssh.generated_file_export_path,
crypto.get_remotefs_ssh_key_prefix())
else:
ssh_pub_key = rfs.storage_cluster.ssh.ssh_public_key
with ssh_pub_key.open('rb') as fd:
key_data = fd.read().decode('utf8')
# read public key data
with ssh_pub_key.open('rb') as fd:
key_data = fd.read().decode('utf8')
ssh_pub_key = computemodels.SshPublicKey(
path='/home/{}/.ssh/authorized_keys'.format(
rfs.storage_cluster.ssh.username),

Просмотреть файл

@ -2487,6 +2487,10 @@ def fileserver_settings(config, vm_count):
raise ValueError(
'samba account password is invalid for username {}'.format(
smb_account.username))
if ('$' in smb_account.password or '!' in smb_account.password or
'\n' in smb_account.password):
raise ValueError(
'samba account password cannot contain $, ! or \\n characters')
if smb_account.uid is None or smb_account.gid is None:
raise ValueError(
('samba account uid and/or gid is invalid for '

Просмотреть файл

@ -99,7 +99,7 @@ gluster_poll_for_connections() {
done
set -e
echo "$numpeers host(s) joined peering"
# delay to wait for after peer connections
# delay wait after peer connections
sleep 5
}
@ -130,16 +130,9 @@ gluster_poll_for_volume() {
}
enable_and_start_glusterfs() {
systemctl enable glusterfs-server
# start service if not started
set +e
systemctl status glusterfs-server
if [ $? -ne 0 ]; then
set -e
systemctl start glusterfs-server
fi
set -e
flush_glusterfs_firewall_rules() {
iptables -F INPUT
iptables -L INPUT
}
setup_glusterfs() {
@ -431,15 +424,30 @@ EOF
if [ $? -ne 0 ]; then
set -e
systemctl start nfs-kernel-server.service
systemctl status nfs-kernel-server.service
fi
set -e
elif [ $server_type == "glusterfs" ]; then
apt-get install -y -q --no-install-recommends glusterfs-server
# reload unit files
systemctl daemon-reload
# ensure glusterfs server is stopped. we should not start it yet
# to prevent a race where the master (aka prober) script execution
# runs well before the child, we should block all gluster connection
# requests with iptables. we should not remove the filter rules
# until all local disk setup has been completed.
systemctl stop glusterfs-server
iptables -A INPUT -p tcp --destination-port 24007:24008 -j REJECT
iptables -A INPUT -p tcp --destination-port 49152:49215 -j REJECT
# install glusterfs server
apt-get install -y -q --no-install-recommends glusterfs-server
# enable gluster service
systemctl enable glusterfs-server
# start service if not started
set +e
systemctl status glusterfs-server
if [ $? -ne 0 ]; then
set -e
systemctl start glusterfs-server
systemctl status glusterfs-server
fi
set -e
iptables -L INPUT
else
echo "server_type $server_type not supported."
exit 1
@ -726,7 +734,7 @@ if [ $attach_disks -eq 0 ]; then
if [ $server_type == "nfs" ]; then
setup_nfs
elif [ $server_type == "glusterfs" ]; then
enable_and_start_glusterfs
flush_glusterfs_firewall_rules
setup_glusterfs
else
echo "server_type $server_type not supported."