Adding group info and registry info classes. Parsing image and ports and adding kubernetes class to deploy stuff to Kubernetes.
This commit is contained in:
Родитель
52f7ef0ed0
Коммит
d82638adfe
|
@ -109,7 +109,7 @@ class ACSClient(object):
|
|||
"""
|
||||
if self.is_direct:
|
||||
url = '{}:{}/{}'.format(
|
||||
self.cluster_info.api_endpoint, self.cluster_info.get_api_endpoint_port(), path)
|
||||
self.cluster_info.api_endpoint, self.cluster_info.get_api_endpoint_port(), path.strip('/'))
|
||||
else:
|
||||
local_port = self._setup_tunnel_server()
|
||||
url = 'http://127.0.0.1:{}/{}'.format(str(local_port), path)
|
||||
|
@ -126,7 +126,7 @@ class ACSClient(object):
|
|||
raise Exception('Invalid method {}'.format(method))
|
||||
|
||||
method_to_call = getattr(requests, method)
|
||||
headers = {'content-type': 'application/json'}
|
||||
headers = {'Content-type': 'application/json'}
|
||||
|
||||
if not data:
|
||||
response = method_to_call(
|
||||
|
|
|
@ -4,7 +4,9 @@ import sys
|
|||
import traceback
|
||||
|
||||
import dockercomposeparser
|
||||
|
||||
from clusterinfo import ClusterInfo
|
||||
from registryinfo import RegistryInfo
|
||||
from groupinfo import GroupInfo
|
||||
|
||||
class VstsLogFormatter(logging.Formatter):
|
||||
error_format = logging.Formatter('##[error]%(message)s')
|
||||
|
@ -31,8 +33,10 @@ def get_arg_parser():
|
|||
|
||||
parser.add_argument('--compose-file',
|
||||
help='[required] Docker-compose.yml file')
|
||||
parser.add_argument('--dcos-master-url',
|
||||
help='DC/OS master URL')
|
||||
parser.add_argument('--api-endpoint-url',
|
||||
help='API endpoint URL')
|
||||
parser.add_argument('--orchestrator',
|
||||
help='Orchestrator type (DCOS or Kubernetes)')
|
||||
|
||||
parser.add_argument('--group-name',
|
||||
help='[required] Application group name')
|
||||
|
@ -103,14 +107,20 @@ def init_logger(verbose):
|
|||
if __name__ == '__main__':
|
||||
arguments = process_arguments()
|
||||
init_logger(arguments.verbose)
|
||||
|
||||
cluster_info = ClusterInfo(
|
||||
arguments.acs_host, arguments.acs_port, arguments.acs_username, arguments.acs_password,
|
||||
arguments.acs_private_key, arguments.api_endpoint_url, arguments.orchestrator)
|
||||
|
||||
registry_info = RegistryInfo(
|
||||
arguments.registry_host, arguments.registry_username, arguments.registry_password)
|
||||
|
||||
group_info = GroupInfo(arguments.group_name, arguments.group_qualifier, arguments.group_version)
|
||||
|
||||
try:
|
||||
with dockercomposeparser.DockerComposeParser(
|
||||
arguments.compose_file, arguments.dcos_master_url, arguments.acs_host,
|
||||
arguments.acs_port, arguments.acs_username, arguments.acs_password,
|
||||
arguments.acs_private_key, arguments.group_name, arguments.group_qualifier,
|
||||
arguments.group_version, arguments.registry_host, arguments.registry_username,
|
||||
arguments.registry_password, arguments.minimum_health_capacity,
|
||||
check_dcos_version=True) as compose_parser:
|
||||
arguments.compose_file, cluster_info, registry_info, group_info,
|
||||
arguments.minimum_health_capacity) as compose_parser:
|
||||
compose_parser.deploy()
|
||||
sys.exit(0)
|
||||
except Exception as deployment_exc:
|
||||
|
|
|
@ -1,45 +1,32 @@
|
|||
import hashlib
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
import json
|
||||
|
||||
import yaml
|
||||
|
||||
import acsclient
|
||||
import acsinfo
|
||||
import dockerregistry
|
||||
import marathon
|
||||
import portmappings
|
||||
from kubernetes import Kubernetes
|
||||
import serviceparser
|
||||
|
||||
|
||||
class DockerComposeParser(object):
|
||||
def __init__(self, compose_file, master_url, acs_host, acs_port, acs_username,
|
||||
acs_password, acs_private_key, group_name, group_qualifier, group_version,
|
||||
registry_host, registry_username, registry_password,
|
||||
minimum_health_capacity, check_dcos_version=False):
|
||||
def __init__(self, compose_file, cluster_info, registry_info, group_info,
|
||||
minimum_health_capacity):
|
||||
|
||||
self.cleanup_needed = False
|
||||
self._ensure_docker_compose(compose_file)
|
||||
with open(compose_file, 'r') as compose_stream:
|
||||
self.compose_data = yaml.load(compose_stream)
|
||||
|
||||
self.acs_info = acsinfo.AcsInfo(acs_host, acs_port, acs_username,
|
||||
acs_password, acs_private_key, master_url)
|
||||
|
||||
self.group_name = group_name
|
||||
self.group_qualifier = group_qualifier
|
||||
self.group_version = group_version
|
||||
|
||||
self.registry_host = registry_host
|
||||
self.registry_username = registry_username
|
||||
self.registry_password = registry_password
|
||||
self.cluster_info = cluster_info
|
||||
self.registry_info = registry_info
|
||||
self.group_info = group_info
|
||||
|
||||
self.minimum_health_capacity = minimum_health_capacity
|
||||
|
||||
self.acs_client = acsclient.ACSClient(self.acs_info)
|
||||
if check_dcos_version:
|
||||
self.acs_client.ensure_dcos_version()
|
||||
self.acs_client = acsclient.ACSClient(self.cluster_info)
|
||||
self.kubernetes = Kubernetes(self.acs_client)
|
||||
|
||||
def __enter__(self):
|
||||
"""
|
||||
|
@ -90,233 +77,24 @@ class DockerComposeParser(object):
|
|||
docker_compose_file,
|
||||
docker_compose_expected_version))
|
||||
|
||||
def _get_hash(self, str):
|
||||
"""
|
||||
Gets the hashed string
|
||||
"""
|
||||
hash_value = hashlib.sha1(str)
|
||||
digest = hash_value.hexdigest()
|
||||
return digest
|
||||
|
||||
def _get_group_id(self, include_version=True):
|
||||
"""
|
||||
Gets the group id.
|
||||
<group_name>.<first 8 chars of SHA-1 hash of group qualifier>.<group_version>
|
||||
"""
|
||||
hash_qualifier = self._get_hash(self.group_qualifier)[:8]
|
||||
|
||||
if include_version:
|
||||
return '/{}.{}.{}'.format(self.group_name, hash_qualifier, self.group_version)
|
||||
|
||||
return '/{}.{}.'.format(self.group_name, hash_qualifier)
|
||||
|
||||
def _get_vip_name(self, service_name):
|
||||
"""
|
||||
Gets the vip name that includes hashed group name, qualifier,
|
||||
version and service name
|
||||
"""
|
||||
qualifier_hash = self._get_hash(self.group_qualifier)
|
||||
return '{}.{}'.format(self._get_hash(self.group_name + qualifier_hash)[:8],
|
||||
service_name)
|
||||
|
||||
def _parse_compose(self):
|
||||
"""
|
||||
Parses the docker-compose file and returns the initial marathon.json file
|
||||
"""
|
||||
group_name = self._get_group_id()
|
||||
all_apps = {'id': group_name, 'apps': []}
|
||||
group_name = self.group_info.get_id()
|
||||
all_deployments =[]
|
||||
|
||||
self.nginx_helper.ensure_exists(self.compose_data)
|
||||
docker_registry = dockerregistry.DockerRegistry(
|
||||
self.registry_host, self.registry_username, self.registry_password,
|
||||
self.marathon_helper)
|
||||
registry_secret = self.registry_info.create_secret_json()
|
||||
self.kubernetes.create_secret(registry_secret)
|
||||
|
||||
for service_name, service_info in self.compose_data['services'].items():
|
||||
# Get the app_json for the service
|
||||
service_parser = serviceparser.Parser(group_name, service_name, service_info)
|
||||
app_json = service_parser.get_app_json()
|
||||
service_parser = serviceparser.Parser(
|
||||
self.group_info, self.registry_info, service_name, service_info)
|
||||
deployment_json = service_parser.get_deployment_json()
|
||||
all_deployments.append(deployment_json)
|
||||
|
||||
# Add the registry auth URL if needed
|
||||
registry_auth_url = docker_registry.get_registry_auth_url()
|
||||
if registry_auth_url:
|
||||
app_json['uris'] = [registry_auth_url]
|
||||
all_apps['apps'].append(app_json)
|
||||
|
||||
return all_apps
|
||||
|
||||
def _predeployment_check(self):
|
||||
"""
|
||||
Checks if services can be deployed and
|
||||
returns True if services are being updated or
|
||||
False if this is the first deployment
|
||||
"""
|
||||
group_id = self._get_group_id(include_version=False)
|
||||
group_version_id = self._get_group_id()
|
||||
group_ids = self.marathon_helper.get_group_ids(group_id)
|
||||
group_count = len(group_ids)
|
||||
is_update = False
|
||||
existing_group_id = None
|
||||
|
||||
if group_count > 1:
|
||||
raise Exception(
|
||||
'Another deployment is already in progress')
|
||||
|
||||
if group_count == 1:
|
||||
# Do an additional check that includes the group version
|
||||
groups_with_version = self.marathon_helper.get_group_ids(group_version_id)
|
||||
|
||||
# Check if there's an existing group with the same version_id
|
||||
if len(groups_with_version) > 0:
|
||||
if group_version_id == groups_with_version[0]:
|
||||
raise Exception(
|
||||
'App with the same version already deployed')
|
||||
else:
|
||||
existing_group_id = group_ids[0]
|
||||
is_update = True
|
||||
|
||||
return is_update, existing_group_id
|
||||
|
||||
def _find_app_by_name(self, app_name, deployment_json):
|
||||
"""
|
||||
Finds the app object in Marathon json
|
||||
"""
|
||||
existing_app = None
|
||||
if not deployment_json or not 'apps' in deployment_json:
|
||||
return existing_app
|
||||
|
||||
for app in sorted(deployment_json['apps']):
|
||||
# Gets the app name from the full id
|
||||
existing_app_name = app['id'].split('/')[-1]
|
||||
if existing_app_name.lower() == app_name.lower():
|
||||
existing_app = app
|
||||
break
|
||||
return existing_app
|
||||
|
||||
def _create_or_update_private_ips(self, deployment_json, new_group_id):
|
||||
"""
|
||||
Goes through the deployment json and uses 'servicePort' to
|
||||
create a new private IP.
|
||||
"""
|
||||
private_ips = {}
|
||||
if not deployment_json or not 'apps' in deployment_json:
|
||||
return private_ips
|
||||
|
||||
new_group_id = new_group_id.rstrip('/')
|
||||
for app in deployment_json['apps']:
|
||||
app_id = app['id']
|
||||
|
||||
# Get the app name only, ignoring the group and everything else
|
||||
app_name = app_id.rstrip('/').split('/')[-1]
|
||||
new_id = '{}/{}'.format(new_group_id, app_name)
|
||||
|
||||
try:
|
||||
port_mappings = app['container']['docker']['portMappings']
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
if port_mappings is None:
|
||||
continue
|
||||
|
||||
if not len(port_mappings):
|
||||
continue
|
||||
|
||||
# Always get the first portMapping and use it to create the private IP
|
||||
port_mapping = port_mappings[0]
|
||||
port = int(port_mapping['servicePort'])
|
||||
x, y = divmod(port - 10000, 1<<8)
|
||||
ip = '10.64.' + str(x) + '.' + str(y)
|
||||
private_ips[str(new_id)] = ip
|
||||
logging.info('Creating new private IP "%s" for service "%s"', ip, new_id)
|
||||
|
||||
return private_ips
|
||||
|
||||
def _update_port_mappings(self, marathon_app, private_ips, service_info, vip_name):
|
||||
"""
|
||||
Updates portMappings in marathon_app for the service defined with service_info
|
||||
"""
|
||||
marathon_app_id = marathon_app['id']
|
||||
|
||||
if not marathon_app_id in private_ips:
|
||||
return
|
||||
|
||||
ip_address = private_ips[marathon_app_id]
|
||||
port_mapping = self.portmappings_helper.get_port_mappings(
|
||||
ip_address,
|
||||
service_info,
|
||||
vip_name)
|
||||
marathon_app['container']['docker']['portMappings'] = port_mapping
|
||||
|
||||
def _add_dependencies(self, marathon_app, private_ips, service_info):
|
||||
"""
|
||||
Parses the 'depends_on' for service defined in service_info and
|
||||
uses vip_tuples to look-up dependency ids and updates the marathon_app.
|
||||
"""
|
||||
if 'depends_on' in service_info:
|
||||
for dependency in service_info['depends_on']:
|
||||
all_dependency_ids = [t for t in private_ips if t.endswith(dependency)]
|
||||
if len(all_dependency_ids) > 0:
|
||||
# Check if the dependency is already added, before
|
||||
# adding it, so we don't get dupes
|
||||
exists = [d for d in marathon_app['dependencies'] if d.lower() == all_dependency_ids[0]]
|
||||
if len(exists) == 0:
|
||||
marathon_app['dependencies'].append(all_dependency_ids[0])
|
||||
|
||||
def _add_host(self, marathon_app, app_id, private_ips, alias=None):
|
||||
"""
|
||||
Adds a host entry ('add-host') to marathon_app in case it does not exist yet
|
||||
"""
|
||||
created_vip = private_ips[app_id]
|
||||
if ':' in created_vip:
|
||||
split = created_vip.split(':')
|
||||
created_vip = split[0]
|
||||
|
||||
if alias:
|
||||
host_value = alias + ':' + created_vip
|
||||
else:
|
||||
host_value = app_id.split('/')[-1] + ':' + created_vip
|
||||
|
||||
# If host does not exist yet, we add it
|
||||
if len([a for a in marathon_app['container']['docker']['parameters'] \
|
||||
if a['value'] == host_value]) == 0:
|
||||
marathon_app['container']['docker']['parameters'].append(
|
||||
{'key': 'add-host', 'value': host_value})
|
||||
|
||||
def _add_hosts(self, all_apps, marathon_app, private_ips):
|
||||
"""
|
||||
Creates 'add-host' entries for the marathon_app by adding
|
||||
VIPs of all other services to 'add-host'
|
||||
"""
|
||||
marathon_app_id = marathon_app['id']
|
||||
for app_id in private_ips:
|
||||
if not app_id.endswith(marathon_app_id.split('/')[-1]):
|
||||
if self._has_private_ip(all_apps, app_id):
|
||||
self._add_host(marathon_app, app_id, private_ips)
|
||||
|
||||
def _has_private_ip(self, all_apps, app_id):
|
||||
"""
|
||||
Checks if app_id in all_apps contains at least
|
||||
one port mapping with VIP_0 set
|
||||
"""
|
||||
apps = [app for app in all_apps \
|
||||
if app['id'].lower() == app_id.lower()]
|
||||
if len(apps) <= 0:
|
||||
return False
|
||||
app = apps[0]
|
||||
|
||||
try:
|
||||
port_mappings = app['container']['docker']['portMappings']
|
||||
except KeyError:
|
||||
return False
|
||||
|
||||
if port_mappings is None:
|
||||
return False
|
||||
|
||||
for port_mapping in port_mappings:
|
||||
if not 'labels' in port_mapping:
|
||||
continue
|
||||
if 'VIP_0' in port_mapping['labels']:
|
||||
return True
|
||||
return False
|
||||
return all_deployments
|
||||
|
||||
def _cleanup(self):
|
||||
"""
|
||||
|
@ -327,9 +105,9 @@ class DockerComposeParser(object):
|
|||
return
|
||||
|
||||
try:
|
||||
group_id = self._get_group_id()
|
||||
group_id = self.group_info.get_id()
|
||||
logging.info('Removing "%s".', group_id)
|
||||
self.marathon_helper.delete_group(group_id)
|
||||
# self.marathon_helper.delete_group(group_id)
|
||||
except Exception as remove_exception:
|
||||
raise remove_exception
|
||||
finally:
|
||||
|
@ -339,137 +117,8 @@ class DockerComposeParser(object):
|
|||
"""
|
||||
Deploys the services defined in docker-compose.yml file
|
||||
"""
|
||||
is_update, existing_group_id = self._predeployment_check()
|
||||
all_deployments = self._parse_compose()
|
||||
|
||||
# marathon_json is the instance we are working with and deploying
|
||||
marathon_json = self._parse_compose()
|
||||
|
||||
# 1. Deploy the initial marathon_json file (instances = 0, no VIPs)
|
||||
self.marathon_helper.deploy_group(marathon_json)
|
||||
|
||||
# At this point we need to clean up if anything
|
||||
# goes wrong
|
||||
self.cleanup_needed = True
|
||||
|
||||
group_id = self._get_group_id()
|
||||
if not self.marathon_helper.is_group_id_unique(group_id):
|
||||
raise Exception(
|
||||
'App with ID "{}" is not unique anymore'.format(group_id))
|
||||
|
||||
new_deployment_json = self.marathon_helper.get_group(group_id)
|
||||
|
||||
# Create the VIPs from servicePorts for apps we dont have the VIPs for yet
|
||||
private_ips = self._create_or_update_private_ips(new_deployment_json, group_id)
|
||||
|
||||
# Go through the docker-compose file and update the corresponding marathon_app with
|
||||
# portMappings, VIP, color and links
|
||||
for service_name, service_info in self.compose_data['services'].items():
|
||||
# Get the corresponding marathon JSON for the service in docker-compose file
|
||||
marathon_app = [app for app in marathon_json['apps'] \
|
||||
if app['id'].endswith('/' + service_name)][0]
|
||||
|
||||
logging.info('Updating port mappings for "%s"', marathon_app['id'])
|
||||
self._update_port_mappings(
|
||||
marathon_app,
|
||||
private_ips,
|
||||
service_info,
|
||||
self._get_vip_name(service_name))
|
||||
|
||||
# Handles the 'depends_on' key in docker-compose and adds any
|
||||
# dependencies to the dependencies list
|
||||
self._add_dependencies(marathon_app, private_ips, service_info)
|
||||
|
||||
for service_name, service_info in self.compose_data['services'].items():
|
||||
# Get the corresponding marathon JSON for the service in docker-compose file
|
||||
marathon_app = [app for app in marathon_json['apps'] \
|
||||
if app['id'].endswith('/' + service_name)][0]
|
||||
|
||||
# Add hosts (VIPs) for all services, except the current one
|
||||
self._add_hosts(marathon_json['apps'], marathon_app, private_ips)
|
||||
|
||||
# Update the dependencies and add 'add-host' entries for each link in the service
|
||||
if 'links' in service_info:
|
||||
for link_name in service_info['links']:
|
||||
link_service_name = link_name
|
||||
link_alias = link_name
|
||||
if ':' in link_name:
|
||||
# Split the link into service and alias
|
||||
link_service_name = link_name.split(':')[0]
|
||||
link_alias = link_name.split(':')[1]
|
||||
|
||||
# Get the VIP for the linked service
|
||||
link_id = [t for t in private_ips if t.endswith(link_service_name)][0]
|
||||
# Make sure app with name link_id has a VIP_0
|
||||
|
||||
if not self._has_private_ip(marathon_json['apps'], link_id):
|
||||
raise Exception(
|
||||
"Can't link '{}' to '{}'. '{}' doesn't expose any ports"
|
||||
.format(service_name, link_service_name, link_service_name))
|
||||
|
||||
self._add_host(marathon_app, link_id, private_ips, alias=link_alias)
|
||||
logging.info('Adding dependency "%s" to "%s"', link_id, service_name)
|
||||
marathon_app['dependencies'].append(link_id)
|
||||
|
||||
# Update the group with VIPs
|
||||
self.marathon_helper.update_group(marathon_json)
|
||||
|
||||
# 3. Update the instances and do the final deployment
|
||||
if is_update:
|
||||
existing_deployment_json = self.marathon_helper.get_group(existing_group_id)
|
||||
|
||||
# Get the number of instances for deployed services
|
||||
target_service_instances = {}
|
||||
for app in existing_deployment_json['apps']:
|
||||
full_app_id = app['id']
|
||||
# Just get the service name (e.g. service-a)
|
||||
app_id = full_app_id.split('/')[-1]
|
||||
new_apps = [a for a in new_deployment_json['apps'] \
|
||||
if a['id'].split('/')[-1] == app_id]
|
||||
new_app = new_apps[0]
|
||||
|
||||
# Store the new app ID and the instances of existing app
|
||||
# so we can easily look it up when scaling
|
||||
target_service_instances[new_app['id']] = app['instances']
|
||||
|
||||
for app in new_deployment_json['apps']:
|
||||
app_id = app['id']
|
||||
# Calculate the new instances for each service
|
||||
marathon_app = [app for app in marathon_json['apps'] \
|
||||
if app['id'] == app_id][0]
|
||||
if not app_id in target_service_instances:
|
||||
target_service_instances[app_id] = 1
|
||||
instance_count = math.ceil(
|
||||
(target_service_instances[app_id] * self.minimum_health_capacity) / 100)
|
||||
logging.info('Setting instances for app "%s" to %s',
|
||||
marathon_app['id'], instance_count)
|
||||
marathon_app['instances'] = instance_count
|
||||
|
||||
scale_factor = float(self.minimum_health_capacity)/100
|
||||
logging.info('Scale deployment "%s" by factor %s', existing_group_id, scale_factor)
|
||||
self.marathon_helper.scale_group(existing_group_id, scale_factor, log_failures=False)
|
||||
|
||||
logging.info('Update deployment "%s" with new instance counts', marathon_json['id'])
|
||||
self.marathon_helper.update_group(marathon_json)
|
||||
|
||||
# Scale the existing deployment instances to 0
|
||||
logging.info('Scale deployment "%s" by factor %s', existing_group_id, 0)
|
||||
self.marathon_helper.scale_group(existing_group_id, 0, log_failures=False)
|
||||
|
||||
# Scale up new deployment instances to target instance count
|
||||
for app in new_deployment_json['apps']:
|
||||
app_id = app['id']
|
||||
marathon_app = [app for app in marathon_json['apps'] \
|
||||
if app['id'] == app_id][0]
|
||||
logging.info('Setting instances for app "%s" to %s',
|
||||
marathon_app['id'], target_service_instances[app_id])
|
||||
marathon_app['instances'] = target_service_instances[app_id]
|
||||
|
||||
logging.info('Scale instances in deployment "%s" to target count', marathon_json['id'])
|
||||
self.marathon_helper.update_group(marathon_json)
|
||||
|
||||
logging.info('Delete deployment "%s"', existing_group_id)
|
||||
self.marathon_helper.delete_group(existing_group_id)
|
||||
else:
|
||||
for app in marathon_json['apps']:
|
||||
app['instances'] = 1
|
||||
self.marathon_helper.update_group(marathon_json)
|
||||
for deployment in all_deployments:
|
||||
# logging.info(json.dumps(deployment))
|
||||
self.kubernetes.create_deployment(deployment)
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
import hashlib
|
||||
|
||||
class GroupInfo(object):
|
||||
"""
|
||||
Holds info about the deployment
|
||||
"""
|
||||
def __init__(self, group_name, group_qualifier, group_version):
|
||||
self.name = group_name
|
||||
self.qualifier = group_qualifier
|
||||
self.version = group_version
|
||||
|
||||
def _get_hash(self, input_string):
|
||||
"""
|
||||
Gets the hashed string
|
||||
"""
|
||||
hash_value = hashlib.sha1(input_string)
|
||||
digest = hash_value.hexdigest()
|
||||
return digest
|
||||
|
||||
def get_id(self, include_version=True):
|
||||
"""
|
||||
Gets the group id.
|
||||
<group_name>.<first 8 chars of SHA-1 hash of group qualifier>.<group_version>
|
||||
"""
|
||||
hash_qualifier = self._get_hash(self.qualifier)[:8]
|
||||
|
||||
if include_version:
|
||||
return '{}.{}.{}'.format(self.name, hash_qualifier, self.version)
|
||||
|
||||
return '{}.{}.'.format(self.name, hash_qualifier)
|
|
@ -0,0 +1,48 @@
|
|||
class Kubernetes(object):
|
||||
"""
|
||||
Class used for working with Kubernetes API
|
||||
"""
|
||||
|
||||
def __init__(self, acs_client):
|
||||
self.acs_client = acs_client
|
||||
|
||||
def get_request(self, path, endpoint='api/v1'):
|
||||
"""
|
||||
Makes an HTTP GET request
|
||||
"""
|
||||
return self.acs_client.get_request('{}/{}'.format(endpoint, path))
|
||||
|
||||
def delete_request(self, path, endpoint='api/v1'):
|
||||
"""
|
||||
Makes an HTTP DELETE request
|
||||
"""
|
||||
return self.acs_client.delete_request('{}/{}'.format(endpoint, path))
|
||||
|
||||
def post_request(self, path, post_data, endpoint='api/v1'):
|
||||
"""
|
||||
Makes an HTTP POST request
|
||||
"""
|
||||
return self.acs_client.post_request('{}/{}'.format(endpoint, path),
|
||||
post_data=post_data)
|
||||
|
||||
def put_request(self, path, put_data=None, endpoint='api/v1', **kwargs):
|
||||
"""
|
||||
Makes an HTTP PUT request
|
||||
"""
|
||||
return self.acs_client.put_request('{}/{}'.format(endpoint, path),
|
||||
put_data=put_data, **kwargs)
|
||||
|
||||
def create_secret(self, secret_json, namespace='default'):
|
||||
"""
|
||||
Creates a secret on Kubernetes
|
||||
"""
|
||||
url = 'namespaces/{}/secrets'.format(namespace)
|
||||
return self.post_request(url, post_data=secret_json)
|
||||
|
||||
def create_deployment(self, deployment_json, namespace='default'):
|
||||
"""
|
||||
Creates a deployment on Kubernetes
|
||||
"""
|
||||
endpoint = '/apis/extensions/v1beta1'
|
||||
url = 'namespaces/{}/deployments'.format(namespace)
|
||||
return self.post_request(url, post_data=deployment_json, endpoint=endpoint)
|
|
@ -0,0 +1,42 @@
|
|||
import base64
|
||||
import json
|
||||
|
||||
|
||||
class RegistryInfo(object):
|
||||
"""
|
||||
Holds info about the Docker registry
|
||||
"""
|
||||
|
||||
def __init__(self, host, username, password):
|
||||
self.host = host
|
||||
self.username = username
|
||||
self.password = password
|
||||
|
||||
def create_secret_json(self):
|
||||
"""
|
||||
Creates the JSON with Kubernetes secret object
|
||||
"""
|
||||
return json.dumps({
|
||||
"apiVersion": "v1",
|
||||
"kind": "Secret",
|
||||
"metadata": {
|
||||
"name": self.host
|
||||
},
|
||||
"data": {
|
||||
".dockerconfigjson": self._get_encoded_config()
|
||||
},
|
||||
"type": "kubernetes.io/dockerconfigjson"
|
||||
})
|
||||
|
||||
def _get_encoded_config(self):
|
||||
"""
|
||||
Gets the config.json contents as an base64 encoded string
|
||||
"""
|
||||
config = {
|
||||
"auths": {
|
||||
self.host: {
|
||||
"auth": base64.b64encode(self.username + ':' + self.password)
|
||||
}
|
||||
}
|
||||
}
|
||||
return base64.b64encode(json.dumps(config))
|
|
@ -0,0 +1,258 @@
|
|||
import logging
|
||||
import pipes
|
||||
import re
|
||||
import json
|
||||
|
||||
|
||||
class Parser(object):
|
||||
|
||||
def __init__(self, group_info, registry_info, service_name, service_info):
|
||||
self.service_name = service_name
|
||||
self.service_info = service_info
|
||||
self.registry_info = registry_info
|
||||
self.group_info = group_info
|
||||
self.deployment_json = {}
|
||||
|
||||
def _add_label(self, name, value):
|
||||
"""
|
||||
Adds a label to deployment JSON
|
||||
"""
|
||||
self.deployment_json['spec']['template'][
|
||||
'metadata']['labels'][name] = value
|
||||
|
||||
def _add_container(self, name, image):
|
||||
"""
|
||||
Adds a container with name and image to the JSON
|
||||
"""
|
||||
self.deployment_json['spec']['template']['spec']['containers'].append({
|
||||
'name': name,
|
||||
'image': image
|
||||
})
|
||||
|
||||
def _add_image_pull_secret(self, name):
|
||||
"""
|
||||
Adds image pull secret to the deployment JSON
|
||||
"""
|
||||
self.deployment_json['spec']['template']['spec']['imagePullSecrets'].append({
|
||||
'name': name})
|
||||
|
||||
def _add_container_port(self, container_port):
|
||||
"""
|
||||
Adds a container port
|
||||
"""
|
||||
# TODO: Do we always grab the first container? Or do we need
|
||||
# to pass in the name of the container to find the right one
|
||||
|
||||
if not 'ports' in self.deployment_json['spec']['template']['spec']['containers'][0]:
|
||||
self.deployment_json['spec']['template']['spec']['containers'][0]['ports'] = []
|
||||
|
||||
self.deployment_json['spec']['template']['spec']['containers'][0]['ports'].append({
|
||||
'containerPort': container_port})
|
||||
|
||||
def get_deployment_json(self):
|
||||
"""
|
||||
Gets the app.json for the service in docker-compose
|
||||
"""
|
||||
self.deployment_json = self._get_empty_deployment_json()
|
||||
self._add_label('group_name', self.group_info.name)
|
||||
self._add_label('group_qualifier', self.group_info.qualifier)
|
||||
self._add_label('group_version', self.group_info.version)
|
||||
self._add_label('group_id', self.group_info.get_id())
|
||||
self._add_label('service_name', self.service_name)
|
||||
|
||||
self._add_image_pull_secret(self.registry_info.host)
|
||||
|
||||
# self.['id'] = '{}/{}'.format(self.group_name, self.service_name)
|
||||
|
||||
for key in self.service_info:
|
||||
# We look for the method named _parse_{key} (for example: _parse_ports)
|
||||
# and execute it to parse that key.
|
||||
# If we decide to support more stuff from docker-compose, we
|
||||
# can simply add a method named _parse_NEWKEY and implement how the
|
||||
# key translates to Marathon JSON.
|
||||
method_name = '_parse_{}'.format(key)
|
||||
if hasattr(self, method_name):
|
||||
logging.info('Parsing key "%s"', key)
|
||||
method_to_call = getattr(self, method_name)
|
||||
method_to_call(key)
|
||||
return json.dumps(self.deployment_json)
|
||||
|
||||
def _parse_image(self, key):
|
||||
"""
|
||||
Parses the 'image' key
|
||||
"""
|
||||
if key in self.service_info:
|
||||
self._add_container(self.service_name, self.service_info[key])
|
||||
|
||||
def _parse_ports(self, key):
|
||||
"""
|
||||
Parses the 'ports' key
|
||||
"""
|
||||
if key in self.service_info:
|
||||
internal_ports = self._parse_internal_ports()
|
||||
for port_tuple in internal_ports:
|
||||
# TODO: What do we do with host port???
|
||||
self._add_container_port(port_tuple[1])
|
||||
|
||||
def _parse_private_ports(self):
|
||||
"""
|
||||
Parses the 'expose' key in the docker-compose file and returns a
|
||||
list of tuples with port numbers. These tuples are used
|
||||
to create portMappings (blue/green only) in the marathon.json file
|
||||
"""
|
||||
port_tuple_list = []
|
||||
|
||||
if 'expose' not in self.service_info:
|
||||
return port_tuple_list
|
||||
|
||||
for port_entry in self.service_info['expose']:
|
||||
if self._is_number(port_entry):
|
||||
port_tuple_list.append((int(port_entry), int(port_entry)))
|
||||
else:
|
||||
raise ValueError(
|
||||
'Port number "%s" is not a valid number', port_entry)
|
||||
return port_tuple_list
|
||||
|
||||
def _parse_internal_ports(self):
|
||||
"""
|
||||
Parses the 'ports' key in the docker-compose file and returns a list of
|
||||
tuples with port numbers. These tuples are used to create
|
||||
portMappings (blue/green and cyan) in the marathon.json file
|
||||
"""
|
||||
port_tuple_list = []
|
||||
|
||||
if 'ports' not in self.service_info:
|
||||
return port_tuple_list
|
||||
|
||||
for port_entry in self.service_info['ports']:
|
||||
if ':' in str(port_entry):
|
||||
split = port_entry.split(':')
|
||||
vip_port = split[0]
|
||||
container_port = split[1]
|
||||
if self._is_port_range(vip_port) and self._is_port_range(container_port):
|
||||
# "8080-8090:9080-9090"
|
||||
if self._are_port_ranges_same_length(vip_port, container_port):
|
||||
vip_start, vip_end = self._split_port_range(vip_port)
|
||||
container_start, container_end = self._split_port_range(
|
||||
container_port)
|
||||
# vp = vip_port, cp = container_port; we do +1 on the end range to
|
||||
# include the last port as well
|
||||
for vp, cp in zip(range(vip_start, vip_end + 1), range(container_start, container_end + 1)):
|
||||
port_tuple_list.append((int(vp), int(cp)))
|
||||
else:
|
||||
raise ValueError('Port ranges "{}" and "{}" are not equal in length',
|
||||
vip_port, container_port)
|
||||
else:
|
||||
# "8080:8080"
|
||||
if self._is_number(vip_port) and self._is_number(container_port):
|
||||
port_tuple_list.append(
|
||||
(int(vip_port), int(container_port)))
|
||||
else:
|
||||
# e.g. invalid entry: 8080-8082:9000
|
||||
raise ValueError(
|
||||
'One of the ports is not a valid number or a valid range')
|
||||
else:
|
||||
if self._is_port_range(port_entry):
|
||||
# "3000-3005"
|
||||
range_start, range_end = self._split_port_range(port_entry)
|
||||
for i in range(range_start, range_end + 1):
|
||||
port_tuple_list.append((i, i))
|
||||
else:
|
||||
# "3000"
|
||||
if self._is_number(port_entry):
|
||||
port_tuple_list.append(
|
||||
(int(port_entry), int(port_entry)))
|
||||
else:
|
||||
raise ValueError(
|
||||
'One of the ports is not a valid number')
|
||||
return port_tuple_list
|
||||
|
||||
def _is_number(self, input_str):
|
||||
"""
|
||||
Checks if the string is a number or not
|
||||
"""
|
||||
try:
|
||||
int(input_str)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
def _is_port_range(self, port_entry):
|
||||
"""
|
||||
Checks if the provided string is a port entry or not
|
||||
"""
|
||||
if not port_entry:
|
||||
return False
|
||||
|
||||
if '-' in str(port_entry) and str(port_entry).count('-') == 1:
|
||||
split = port_entry.split('-')
|
||||
first_part = split[0]
|
||||
second_part = split[1]
|
||||
return self._is_number(first_part) and self._is_number(second_part)
|
||||
return False
|
||||
|
||||
def _split_port_range(self, port_range):
|
||||
"""
|
||||
Splits a port range and returns a tuple with start and end port
|
||||
"""
|
||||
if not self._is_port_range(port_range):
|
||||
raise ValueError(
|
||||
'Provided value "%s" is not a port range', port_range)
|
||||
split = port_range.split('-')
|
||||
return (int(split[0]), int(split[1]))
|
||||
|
||||
def _are_port_ranges_same_length(self, first_range, second_range):
|
||||
"""
|
||||
Checks if two port ranges are the same length
|
||||
"""
|
||||
|
||||
if not self._is_port_range(first_range) or not self._is_port_range(second_range):
|
||||
raise ValueError(
|
||||
'At least one of the provided values is not a port range')
|
||||
|
||||
first_split_start, first_split_end = self._split_port_range(
|
||||
first_range)
|
||||
second_split_start, second_split_end = self._split_port_range(
|
||||
second_range)
|
||||
|
||||
return len(range(first_split_start, first_split_end)) == len(range(second_split_start, second_split_end))
|
||||
|
||||
def _get_empty_deployment_json(self):
|
||||
deployment_json = {
|
||||
"apiVersion": "extensions/v1beta1",
|
||||
"kind": "Deployment",
|
||||
"metadata": {
|
||||
"name": self.service_name
|
||||
},
|
||||
"spec": {
|
||||
"replicas": 1,
|
||||
"template": {
|
||||
"metadata": {
|
||||
"labels": {
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"containers": [],
|
||||
"imagePullSecrets": []
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return deployment_json
|
||||
|
||||
def _to_quoted_string(self, args):
|
||||
"""
|
||||
Converts arguments to a properly quoted string
|
||||
"""
|
||||
cmd_string = ''
|
||||
|
||||
if not args:
|
||||
return args
|
||||
|
||||
if isinstance(args, list):
|
||||
for arg in args:
|
||||
cmd_string += pipes.quote(str(arg)) + ' '
|
||||
else:
|
||||
cmd_string = pipes.quote(args)
|
||||
return cmd_string.strip()
|
Загрузка…
Ссылка в новой задаче