Allow scp dm and credential encryption on Windows

- Rename old glusterfs scripts to be less confusing with remote
  glusterfs support
This commit is contained in:
Fred Park 2017-03-11 09:21:33 -08:00
Родитель 675c6c37f8
Коммит 0ed28d96fc
10 изменённых файлов: 69 добавлений и 58 удалений

Просмотреть файл

@ -38,6 +38,11 @@ Canonical/UbuntuServer/16.04-LTS instead
### Fixed
- Logging time format and incorrect Zulu time designation.
- `scp` and `multinode_scp` data movement capability is now supported in
Windows given `ssh.exe` and `scp.exe` can be found in `%PATH%` or the current
working directory. `rsync` methods are not supported on Windows.
- Credential encryption is now supported in Windows given `openssl.exe` can
be found in `%PATH%` or the current working directory.
## [2.5.4] - 2017-03-08
### Changed

Просмотреть файл

@ -488,7 +488,7 @@ def generate_ssh_tunnel_script(batch_client, pool, ssh_priv_key, nodes):
pool.ssh.generated_file_export_path, crypto._SSH_KEY_PREFIX)
ssh_args = [
'ssh', '-o', 'StrictHostKeyChecking=no',
'-o', 'UserKnownHostsFile=/dev/null',
'-o', 'UserKnownHostsFile={}'.format(os.devnull),
'-i', str(ssh_priv_key), '-p', '$port', '-N',
'-L', '2375:localhost:2375', '-L', '3476:localhost:3476',
'{}@$ip'.format(pool.ssh.username)
@ -520,7 +520,7 @@ def generate_ssh_tunnel_script(batch_client, pool, ssh_priv_key, nodes):
'echo tunneling to docker daemon on $node at '
'$ip:$port\n')
fd.write(' '.join(ssh_args))
fd.write(' >/dev/null 2>&1 &\n')
fd.write(' >{} 2>&1 &\n'.format(os.devnull))
fd.write('pid=$!\n')
fd.write('echo ssh tunnel pid is $pid\n')
fd.write(
@ -1029,10 +1029,10 @@ def _send_docker_kill_signal(
rls = batch_client.compute_node.get_remote_login_settings(
target[0], target[1])
ssh_args = [
'ssh', '-o', 'StrictHostKeyChecking=no', '-o',
'UserKnownHostsFile=/dev/null', '-i', str(ssh_private_key),
'-p', str(rls.remote_login_port), '-t',
'{}@{}'.format(username, rls.remote_login_ip_address),
'ssh', '-o', 'StrictHostKeyChecking=no',
'-o', 'UserKnownHostsFile={}'.format(os.devnull),
'-i', str(ssh_private_key), '-p', str(rls.remote_login_port),
'-t', '{}@{}'.format(username, rls.remote_login_ip_address),
('sudo /bin/bash -c "docker kill {tn}; '
'docker ps -qa -f name={tn} | '
'xargs --no-run-if-empty docker rm -v"').format(tn=task_name)

Просмотреть файл

@ -325,9 +325,8 @@ def _singlenode_transfer(dest, src, dst, username, ssh_private_key, rls):
logger.debug('creating remote directory: {}'.format(dst))
dirs = ['mkdir -p {}'.format(dst)]
mkdircmd = ('ssh -T -x -o StrictHostKeyChecking=no '
'-o UserKnownHostsFile=/dev/null '
'-i {} -p {} {}@{} {}'.format(
ssh_private_key, port, username, ip,
'-o UserKnownHostsFile={} -i {} -p {} {}@{} {}'.format(
os.devnull, ssh_private_key, port, username, ip,
util.wrap_commands_in_shell(dirs)))
rc = util.subprocess_with_output(
mkdircmd, shell=True, suppress_output=True)
@ -348,17 +347,16 @@ def _singlenode_transfer(dest, src, dst, username, ssh_private_key, rls):
# transfer data
if dest.data_transfer.method == 'scp':
cmd = ('scp -o StrictHostKeyChecking=no '
'-o UserKnownHostsFile=/dev/null -p '
'{} {} -i {} -P {} {} {}@{}:"{}"'.format(
dest.data_transfer.scp_ssh_extra_options, recursive,
ssh_private_key.resolve(), port, cmdsrc,
'-o UserKnownHostsFile={} -p {} {} -i {} '
'-P {} {} {}@{}:"{}"'.format(
os.devnull, dest.data_transfer.scp_ssh_extra_options,
recursive, ssh_private_key.resolve(), port, cmdsrc,
username, ip, shellquote(dst)))
elif dest.data_transfer.method == 'rsync+ssh':
cmd = ('rsync {} {} -e "ssh -T -x -o StrictHostKeyChecking=no '
'-o UserKnownHostsFile=/dev/null '
'{} -i {} -p {}" {} {}@{}:"{}"'.format(
'-o UserKnownHostsFile={} {} -i {} -p {}" {} {}@{}:"{}"'.format(
dest.data_transfer.rsync_extra_options, recursive,
dest.data_transfer.scp_ssh_extra_options,
os.devnull, dest.data_transfer.scp_ssh_extra_options,
ssh_private_key.resolve(), port,
cmdsrc, username, ip, shellquote(dst)))
else:
@ -502,9 +500,8 @@ def _multinode_transfer(
port = _rls.remote_login_port
del _rls
mkdircmd = ('ssh -T -x -o StrictHostKeyChecking=no '
'-o UserKnownHostsFile=/dev/null '
'-i {} -p {} {}@{} {}'.format(
ssh_private_key, port, username, ip,
'-o UserKnownHostsFile={} -i {} -p {} {}@{} {}'.format(
os.devnull, ssh_private_key, port, username, ip,
util.wrap_commands_in_shell(dirs)))
rc = util.subprocess_with_output(
mkdircmd, shell=True, suppress_output=True)
@ -579,23 +576,22 @@ def _spawn_next_transfer(
if method == 'multinode_scp':
if begin is None and end is None:
cmd = ('scp -o StrictHostKeyChecking=no '
'-o UserKnownHostsFile=/dev/null -p '
'{} -i {} -P {} {} {}@{}:"{}"'.format(
eo, ssh_private_key, port, shellquote(src),
'-o UserKnownHostsFile={} -p {} -i {} '
'-P {} {} {}@{}:"{}"'.format(
os.devnull, eo, ssh_private_key, port, shellquote(src),
username, ip, shellquote(dst)))
else:
cmd = ('ssh -T -x -o StrictHostKeyChecking=no '
'-o UserKnownHostsFile=/dev/null '
'{} -i {} -p {} {}@{} \'cat > "{}"\''.format(
eo, ssh_private_key, port,
'-o UserKnownHostsFile={} {} -i {} '
'-p {} {}@{} \'cat > "{}"\''.format(
os.devnull, eo, ssh_private_key, port,
username, ip, shellquote(dst)))
elif method == 'multinode_rsync+ssh':
if begin is not None or end is not None:
raise RuntimeError('cannot rsync with file offsets')
cmd = ('rsync {} -e "ssh -T -x -o StrictHostKeyChecking=no '
'-o UserKnownHostsFile=/dev/null '
'{} -i {} -p {}" {} {}@{}:"{}"'.format(
reo, eo, ssh_private_key, port, shellquote(src),
'-o UserKnownHostsFile={} {} -i {} -p {}" {} {}@{}:"{}"'.format(
reo, os.devnull, eo, ssh_private_key, port, shellquote(src),
username, ip, shellquote(dst)))
else:
raise ValueError('Unknown transfer method: {}'.format(method))
@ -680,10 +676,10 @@ def _multinode_thread_worker(
'rm -f {}.*'.format(dstpath)
]
joincmd = ('ssh -T -x -o StrictHostKeyChecking=no '
'-o UserKnownHostsFile=/dev/null '
'-i {} -p {} {}@{} {}'.format(
ssh_private_key, port, username, ip,
util.wrap_commands_in_shell(cmds)))
'-o UserKnownHostsFile={} -i {} '
'-p {} {}@{} {}'.format(
os.devnull, ssh_private_key, port, username,
ip, util.wrap_commands_in_shell(cmds)))
procs.append(
util.subprocess_nowait(joincmd, shell=True))
else:

Просмотреть файл

@ -31,6 +31,7 @@ from builtins import ( # noqa
next, oct, open, pow, round, super, filter, map, zip)
# stdlib imports
import logging
import os
try:
import pathlib2 as pathlib
except ImportError:
@ -103,12 +104,13 @@ _NODEPREP_FILE = (
str(pathlib.Path(_ROOT_PATH, 'scripts/shipyard_nodeprep.sh'))
)
_GLUSTERPREP_FILE = (
'shipyard_glusterfs.sh',
str(pathlib.Path(_ROOT_PATH, 'scripts/shipyard_glusterfs.sh'))
'shipyard_glusterfs_on_compute.sh',
str(pathlib.Path(_ROOT_PATH, 'scripts/shipyard_glusterfs_on_compute.sh'))
)
_GLUSTERRESIZE_FILE = (
'shipyard_glusterfs_resize.sh',
str(pathlib.Path(_ROOT_PATH, 'scripts/shipyard_glusterfs_resize.sh'))
'shipyard_glusterfs_on_compute_resize.sh',
str(pathlib.Path(
_ROOT_PATH, 'scripts/shipyard_glusterfs_on_compute_resize.sh'))
)
_HPNSSH_FILE = (
'shipyard_hpnssh.sh',
@ -1770,8 +1772,9 @@ def action_pool_ssh(batch_client, config, cardinal, nodeid):
ip, port, ssh_priv_key))
util.subprocess_with_output(
['ssh', '-o', 'StrictHostKeyChecking=no', '-o',
'UserKnownHostsFile=/dev/null', '-i', str(ssh_priv_key), '-p',
str(port), '{}@{}'.format(pool.ssh.username, ip)])
'UserKnownHostsFile={}'.format(os.devnull),
'-i', str(ssh_priv_key), '-p', str(port),
'{}@{}'.format(pool.ssh.username, ip)])
def action_pool_delnode(batch_client, config, nodeid):

Просмотреть файл

@ -1361,10 +1361,10 @@ def expand_storage_cluster(
)
ssh_priv_key, port, username, ip = _get_ssh_info(
compute_client, network_client, config, None, vm.name)
cmd = ['ssh', '-o', 'StrictHostKeyChecking=no', '-o',
'UserKnownHostsFile=/dev/null', '-i', str(ssh_priv_key),
'-p', str(port), '{}@{}'.format(username, ip),
'sudo']
cmd = ['ssh', '-o', 'StrictHostKeyChecking=no',
'-o', 'UserKnownHostsFile={}'.format(os.devnull),
'-i', str(ssh_priv_key), '-p', str(port),
'{}@{}'.format(username, ip), 'sudo']
cmd.extend(script_cmd.split())
proc = util.subprocess_nowait_pipe_stdout(cmd)
stdout = proc.communicate()[0]
@ -1611,10 +1611,10 @@ def stat_storage_cluster(
rfs.storage_cluster.vm_disk_map[offset].raid_level),
s=' -s {}'.format(rfs.storage_cluster.file_server.type),
)
cmd = ['ssh', '-o', 'StrictHostKeyChecking=no', '-o',
'UserKnownHostsFile=/dev/null', '-i', str(ssh_priv_key),
'-p', str(port), '{}@{}'.format(username, ip),
'sudo']
cmd = ['ssh', '-o', 'StrictHostKeyChecking=no',
'-o', 'UserKnownHostsFile={}'.format(os.devnull),
'-i', str(ssh_priv_key), '-p', str(port),
'{}@{}'.format(username, ip), 'sudo']
cmd.extend(script_cmd.split())
proc = util.subprocess_nowait_pipe_stdout(cmd)
stdout = proc.communicate()[0]
@ -1729,6 +1729,7 @@ def ssh_storage_cluster(
logger.info('connecting to virtual machine {}:{} with key {}'.format(
ip, port, ssh_priv_key))
util.subprocess_with_output(
['ssh', '-o', 'StrictHostKeyChecking=no', '-o',
'UserKnownHostsFile=/dev/null', '-i', str(ssh_priv_key), '-p',
str(port), '{}@{}'.format(username, ip)])
['ssh', '-o', 'StrictHostKeyChecking=no',
'-o', 'UserKnownHostsFile={}'.format(os.devnull),
'-i', str(ssh_priv_key), '-p', str(port),
'{}@{}'.format(username, ip)])

Просмотреть файл

@ -237,28 +237,25 @@ necessary software as well. With a manual installation, the following
programs must be installed to take advantage of data movement features of
Batch Shipyard:
1. An SSH client that provides `scp`. OpenSSH with
1. An SSH client that provides `ssh` and `scp` (or `ssh.exe` and `scp.exe`
on Windows). OpenSSH with
[HPN patches](https://www.psc.edu/index.php/using-joomla/extensions/templates/atomic/636-hpn-ssh)
can be used on the client side to further accelerate `scp` to Azure Batch
compute nodes where `hpn_server_swap` has been set to `true` in the
`pool_specification`.
2. `rsync` if `rsync` functionality is needed.
2. `rsync` if `rsync` functionality is needed. This is not supported on
Windows.
3. [blobxfer](https://github.com/Azure/blobxfer) if transfering to Azure
storage. This is automatically installed if `pip install` is used with
`requirements.txt` as per above. If installed with `--user` flag, this is
typically placed in `~/.local/bin`. This path will need to be added to your
`PATH` environment variable.
Note that data movement which involves programs required in from 1 or 2 above
are not supported if invoked from Windows.
###Encryption Support
Batch Shipyard supports encrypting credentials that are used by backend
components within your pool deployment. In order to utilize this feature,
you must have `openssl` installed. The `install.sh` script ensures that
OpenSSL is installed. The Docker CLI image also contains OpenSSL. Encryption
support is disabled on Windows; if you need this feature, please use
Linux or Mac.
OpenSSL is installed. The Docker CLI image also contains OpenSSL.
Note that all commandlines, environment variables and resource file URLs
which are stored by the Azure Batch Service are encrypted by the service.

Просмотреть файл

@ -141,7 +141,8 @@ created with pool creation.
the SSH user is not created.
* (optional) `generate_docker_tunnel_script` property directs script to
generate an SSH tunnel script that can be used to connect to the remote
Docker engine running on a compute node.
Docker engine running on a compute node. This script can only be used on
non-Windows systems.
* (optional) `generated_file_export_path` is the path to export the
generated RSA keypair and docker tunnel script to. If omitted, the
current directory is used.

Просмотреть файл

@ -14,6 +14,14 @@ Shipyard needs to take special actions or ensure the intended outcome:
`--termtasks` option with `jobs del`
The following are general limitations or restrictions:
* SSH tunnel script generation is only compatible with non-Windows machines.
* Data movement support on Windows is restricted to scp. Both `ssh.exe` and
`scp.exe` must be found through `%PATH%` or in the current working directory.
Rsync is not supported in Windows.
* `pool ssh` support in Windows is only available if `ssh.exe` is found
through `%PATH%` or is in the current working directory.
* Credential encryption support in Windows is available only if `openssl.exe`
is found through `%PATH%` or is in the current working directory.
* Compute pool resize down (i.e., removing nodes from a pool) is not supported
when peer-to-peer transfer is enabled.
* The maximum number of compute nodes with peer-to-peer enabled is currently

Просмотреть файл