2016-08-13 01:31:05 +03:00
|
|
|
#!/usr/bin/env python3
|
|
|
|
|
2016-08-28 03:27:36 +03:00
|
|
|
# Copyright (c) Microsoft Corporation
|
|
|
|
#
|
|
|
|
# All rights reserved.
|
|
|
|
#
|
|
|
|
# MIT License
|
|
|
|
#
|
|
|
|
# Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
# copy of this software and associated documentation files (the "Software"),
|
|
|
|
# to deal in the Software without restriction, including without limitation
|
|
|
|
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
# and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
# Software is furnished to do so, subject to the following conditions:
|
|
|
|
#
|
|
|
|
# The above copyright notice and this permission notice shall be included in
|
|
|
|
# all copies or substantial portions of the Software.
|
|
|
|
#
|
|
|
|
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
|
|
# DEALINGS IN THE SOFTWARE.
|
|
|
|
|
2016-08-13 01:31:05 +03:00
|
|
|
# stdlib imports
|
|
|
|
import argparse
|
|
|
|
import copy
|
|
|
|
import datetime
|
|
|
|
import json
|
2016-10-30 11:33:15 +03:00
|
|
|
import pathlib
|
2016-08-25 23:07:57 +03:00
|
|
|
import subprocess
|
|
|
|
import sys
|
2016-08-13 01:31:05 +03:00
|
|
|
# non-stdlib imports
|
|
|
|
import azure.storage.table as azuretable
|
|
|
|
|
|
|
|
# global defines
|
|
|
|
_PARTITION_KEY = None
|
|
|
|
_TABLE_NAME = None
|
|
|
|
|
|
|
|
|
2016-08-28 03:27:36 +03:00
|
|
|
def _create_credentials(config: dict) -> azuretable.TableService:
|
2016-08-13 01:31:05 +03:00
|
|
|
"""Create authenticated clients
|
|
|
|
:param dict config: configuration dict
|
|
|
|
:rtype: azure.storage.table.TableService
|
|
|
|
:return: table client
|
|
|
|
"""
|
2016-08-28 03:27:36 +03:00
|
|
|
global _PARTITION_KEY, _TABLE_NAME
|
|
|
|
_PARTITION_KEY = '{}${}'.format(
|
|
|
|
config['credentials']['batch']['account'],
|
|
|
|
config['pool_specification']['id'])
|
2016-11-12 08:08:58 +03:00
|
|
|
try:
|
|
|
|
sep = config['batch_shipyard']['storage_entity_prefix']
|
|
|
|
if sep is None or len(sep) == 0:
|
|
|
|
raise KeyError()
|
|
|
|
except KeyError:
|
|
|
|
sep = 'shipyard'
|
|
|
|
_TABLE_NAME = sep + 'perf'
|
2016-09-01 01:35:33 +03:00
|
|
|
ssel = config['batch_shipyard']['storage_account_settings']
|
2016-08-13 01:31:05 +03:00
|
|
|
table_client = azuretable.TableService(
|
2016-08-28 03:27:36 +03:00
|
|
|
account_name=config['credentials']['storage'][ssel]['account'],
|
|
|
|
account_key=config['credentials']['storage'][ssel]['account_key'],
|
2016-08-24 00:50:17 +03:00
|
|
|
endpoint_suffix=config['credentials']['storage'][ssel]['endpoint'])
|
2016-08-13 01:31:05 +03:00
|
|
|
return table_client
|
|
|
|
|
|
|
|
|
2016-08-28 03:27:36 +03:00
|
|
|
def _compute_delta_t(
|
|
|
|
data: dict, nodeid: str, event1: str, event1_pos: int, event2: str,
|
|
|
|
event2_pos: int) -> float:
|
|
|
|
"""Compute time delta between two events
|
|
|
|
:param dict data: data
|
|
|
|
:param str nodeid: node id
|
|
|
|
:param str event1: event1
|
|
|
|
:param int event1_pos: event1 position in stream
|
|
|
|
:param str event2: event2
|
|
|
|
:param int event2_pos: event2 position in stream
|
|
|
|
:rtype: float
|
|
|
|
:return: delta t of events
|
|
|
|
"""
|
2016-08-13 01:31:05 +03:00
|
|
|
# attempt to get directly recorded diff
|
|
|
|
try:
|
|
|
|
return data[nodeid][event2][event2_pos]['message']['diff']
|
|
|
|
except (TypeError, KeyError):
|
|
|
|
return (data[nodeid][event2][event2_pos]['timestamp'] -
|
|
|
|
data[nodeid][event1][event1_pos]['timestamp']).total_seconds()
|
|
|
|
|
|
|
|
|
2016-08-28 03:27:36 +03:00
|
|
|
def _parse_message(event: str, msg: str) -> dict:
|
|
|
|
"""Parse message
|
|
|
|
:param str event: event
|
|
|
|
:param str msg: message
|
|
|
|
:rtype: dict
|
|
|
|
:return: dict of message entries
|
|
|
|
"""
|
2016-08-13 01:31:05 +03:00
|
|
|
parts = msg.split(',')
|
|
|
|
m = {}
|
|
|
|
for part in parts:
|
|
|
|
tmp = part.split('=')
|
2016-08-17 18:30:53 +03:00
|
|
|
if tmp[0] == 'size':
|
|
|
|
if event == 'cascade:pull-end':
|
|
|
|
sz = tmp[1].split()
|
2016-08-18 01:59:41 +03:00
|
|
|
sz[0] = float(sz[0])
|
2016-08-17 18:30:53 +03:00
|
|
|
if sz[1] == 'kB':
|
|
|
|
sz[0] *= 1024
|
|
|
|
elif sz[1] == 'MB':
|
|
|
|
sz[0] *= 1024 * 1024
|
|
|
|
elif sz[1] == 'GB':
|
|
|
|
sz[0] *= 1024 * 1024 * 1024
|
|
|
|
elif sz[1] == 'TB':
|
|
|
|
sz[0] *= 1024 * 1024 * 1024 * 1024
|
|
|
|
tmp[1] = sz[0]
|
|
|
|
m[tmp[0]] = int(tmp[1])
|
|
|
|
elif tmp[0] == 'nglobalresources':
|
2016-08-13 01:31:05 +03:00
|
|
|
m[tmp[0]] = int(tmp[1])
|
|
|
|
elif tmp[0] == 'diff':
|
|
|
|
m[tmp[0]] = float(tmp[1])
|
|
|
|
else:
|
|
|
|
m[tmp[0]] = tmp[1]
|
|
|
|
return m
|
|
|
|
|
|
|
|
|
2016-08-28 03:27:36 +03:00
|
|
|
def _diff_events(
|
|
|
|
data: dict, nodeid: str, event: str, end_event: str, timing: dict,
|
|
|
|
prefix: str, sizes: dict=None) -> None:
|
|
|
|
"""Diff start and end event
|
|
|
|
:param dict data: data
|
|
|
|
:param str nodeid: node id
|
|
|
|
:param str event: start event
|
|
|
|
:param str end_event: end event
|
|
|
|
:param dict timing: timing dict
|
|
|
|
:param str prefix: prefix
|
|
|
|
:param dict sizes: sizes dict
|
|
|
|
"""
|
2016-08-13 01:31:05 +03:00
|
|
|
for i in range(0, len(data[nodeid][event])):
|
2016-08-16 01:16:45 +03:00
|
|
|
# torrent start -> load start may not always exist due to pull
|
|
|
|
if (event == 'cascade:torrent-start' and
|
|
|
|
end_event == 'cascade:load-start' and
|
|
|
|
end_event not in data[nodeid]):
|
|
|
|
return
|
|
|
|
# find end event for this img
|
2016-08-13 01:31:05 +03:00
|
|
|
subevent = data[nodeid][event][i]
|
|
|
|
img = subevent['message']['img']
|
|
|
|
found = False
|
|
|
|
for j in range(0, len(data[nodeid][end_event])):
|
|
|
|
pei = data[
|
|
|
|
nodeid][end_event][j]['message']['img']
|
|
|
|
if pei == img:
|
|
|
|
timing[prefix + img] = _compute_delta_t(
|
|
|
|
data, nodeid, event, i, end_event, j)
|
2016-08-16 01:16:45 +03:00
|
|
|
if sizes is not None and img not in sizes:
|
2016-08-19 01:17:04 +03:00
|
|
|
try:
|
|
|
|
if event == 'cascade:load-start':
|
|
|
|
sizes[img] = data[
|
|
|
|
nodeid][event][j]['message']['size']
|
|
|
|
else:
|
|
|
|
sizes[img] = data[
|
|
|
|
nodeid][end_event][j]['message']['size']
|
|
|
|
except KeyError:
|
|
|
|
pass
|
2016-08-13 01:31:05 +03:00
|
|
|
found = True
|
|
|
|
break
|
2016-08-16 01:16:45 +03:00
|
|
|
if not found and event != 'cascade:torrent-start':
|
2016-08-13 01:31:05 +03:00
|
|
|
raise RuntimeError(
|
|
|
|
'could not find corresponding event for {}:{}'.format(
|
2016-08-16 01:16:45 +03:00
|
|
|
event, img))
|
2016-08-13 01:31:05 +03:00
|
|
|
|
|
|
|
|
2016-08-28 03:27:36 +03:00
|
|
|
def coalesce_data(table_client: azuretable.TableService) -> tuple:
|
|
|
|
"""Coalesce perf data from table
|
|
|
|
:param azure.storage.table.TableService table_client: table client
|
|
|
|
:rtype: tuple
|
2016-08-29 05:43:53 +03:00
|
|
|
:return: (timing, sizes, offer, sku)
|
2016-08-28 03:27:36 +03:00
|
|
|
"""
|
2016-08-13 01:31:05 +03:00
|
|
|
print('graphing data from {} with pk={}'.format(
|
|
|
|
_TABLE_NAME, _PARTITION_KEY))
|
|
|
|
entities = table_client.query_entities(
|
|
|
|
_TABLE_NAME, filter='PartitionKey eq \'{}\''.format(_PARTITION_KEY))
|
|
|
|
data = {}
|
|
|
|
# process events
|
|
|
|
for ent in entities:
|
|
|
|
nodeid = ent['NodeId']
|
|
|
|
event = ent['Event']
|
|
|
|
if nodeid not in data:
|
|
|
|
data[nodeid] = {}
|
|
|
|
if event not in data[nodeid]:
|
|
|
|
data[nodeid][event] = []
|
|
|
|
ev = {
|
|
|
|
'timestamp': datetime.datetime.fromtimestamp(
|
|
|
|
float(ent['RowKey'])),
|
|
|
|
}
|
|
|
|
try:
|
2016-08-17 18:30:53 +03:00
|
|
|
ev['message'] = _parse_message(event, ent['Message'])
|
2016-08-13 01:31:05 +03:00
|
|
|
except KeyError:
|
|
|
|
ev['message'] = None
|
|
|
|
data[nodeid][event].append(ev)
|
|
|
|
del entities
|
2016-08-16 01:16:45 +03:00
|
|
|
sizes = {}
|
2016-08-29 05:43:53 +03:00
|
|
|
offer = None
|
|
|
|
sku = None
|
2016-08-13 01:31:05 +03:00
|
|
|
for nodeid in data:
|
2016-08-29 05:43:53 +03:00
|
|
|
if offer is None:
|
|
|
|
offer = data[nodeid]['nodeprep:start'][0]['message']['offer']
|
|
|
|
sku = data[nodeid]['nodeprep:start'][0]['message']['sku']
|
2016-08-13 01:31:05 +03:00
|
|
|
# calculate dt timings
|
|
|
|
timing = {
|
|
|
|
'nodeprep': _compute_delta_t(
|
|
|
|
data, nodeid, 'nodeprep:start', 0, 'nodeprep:end', 0),
|
|
|
|
'global_resources_loaded': _compute_delta_t(
|
|
|
|
data, nodeid, 'cascade:start', 0, 'cascade:gr-done', 0),
|
|
|
|
}
|
2016-08-17 18:30:53 +03:00
|
|
|
try:
|
|
|
|
timing['docker_install'] = _compute_delta_t(
|
|
|
|
data, nodeid, 'nodeprep:start', 0, 'privateregistry:start', 0)
|
|
|
|
except KeyError:
|
|
|
|
# when no private registry setup exists, install time is
|
|
|
|
# equivalent to nodeprep time
|
|
|
|
timing['docker_install'] = timing['nodeprep']
|
|
|
|
try:
|
|
|
|
timing['private_registry_setup'] = _compute_delta_t(
|
|
|
|
data, nodeid, 'privateregistry:start', 0,
|
|
|
|
'privateregistry:end', 0)
|
|
|
|
except KeyError:
|
|
|
|
timing['private_registry_setup'] = 0
|
2016-08-27 08:56:00 +03:00
|
|
|
try:
|
|
|
|
timing['docker_shipyard_container_pull'] = _compute_delta_t(
|
|
|
|
data, nodeid, 'shipyard:pull-start', 0,
|
|
|
|
'shipyard:pull-end', 0)
|
|
|
|
except KeyError:
|
|
|
|
timing['docker_shipyard_container_pull'] = 0
|
2016-08-25 23:07:57 +03:00
|
|
|
data[nodeid]['start'] = data[
|
|
|
|
nodeid]['nodeprep:start'][0]['timestamp'].timestamp()
|
2016-08-13 01:31:05 +03:00
|
|
|
data[nodeid].pop('nodeprep:start')
|
|
|
|
data[nodeid].pop('nodeprep:end')
|
2016-08-17 18:30:53 +03:00
|
|
|
data[nodeid].pop('privateregistry:start', None)
|
|
|
|
data[nodeid].pop('privateregistry:end', None)
|
2016-08-27 08:56:00 +03:00
|
|
|
data[nodeid].pop('shipyard:pull-start', None)
|
|
|
|
data[nodeid].pop('shipyard:pull-end', None)
|
2016-08-13 01:31:05 +03:00
|
|
|
data[nodeid].pop('cascade:start')
|
|
|
|
data[nodeid].pop('cascade:gr-done')
|
|
|
|
for event in data[nodeid]:
|
|
|
|
# print(event, data[nodeid][event])
|
|
|
|
if event == 'cascade:pull-start':
|
|
|
|
_diff_events(
|
2016-08-17 18:30:53 +03:00
|
|
|
data, nodeid, event, 'cascade:pull-end', timing, 'pull:',
|
|
|
|
sizes)
|
2016-08-13 01:31:05 +03:00
|
|
|
elif event == 'cascade:save-start':
|
2016-08-16 01:16:45 +03:00
|
|
|
_diff_events(
|
|
|
|
data, nodeid, event, 'cascade:save-end', timing, 'save:',
|
|
|
|
sizes)
|
2016-08-13 01:31:05 +03:00
|
|
|
elif event == 'cascade:torrent-start':
|
2016-08-16 01:16:45 +03:00
|
|
|
_diff_events(
|
|
|
|
data, nodeid, event, 'cascade:load-start', timing,
|
|
|
|
'torrent:')
|
2016-08-13 01:31:05 +03:00
|
|
|
elif event == 'cascade:load-start':
|
2016-08-16 01:16:45 +03:00
|
|
|
_diff_events(
|
|
|
|
data, nodeid, event, 'cascade:load-end', timing,
|
|
|
|
'load:', sizes)
|
|
|
|
data[nodeid].pop('cascade:pull-start', None)
|
|
|
|
data[nodeid].pop('cascade:pull-end', None)
|
|
|
|
data[nodeid].pop('cascade:save-start', None)
|
|
|
|
data[nodeid].pop('cascade:save-end', None)
|
2016-08-17 18:30:53 +03:00
|
|
|
data[nodeid].pop('cascade:torrent-start', None)
|
2016-08-16 01:16:45 +03:00
|
|
|
data[nodeid].pop('cascade:load-start', None)
|
|
|
|
data[nodeid].pop('cascade:load-end', None)
|
|
|
|
data[nodeid]['timing'] = timing
|
2016-08-29 05:43:53 +03:00
|
|
|
return data, sizes, offer, sku
|
2016-08-16 01:16:45 +03:00
|
|
|
|
|
|
|
|
2016-08-29 05:43:53 +03:00
|
|
|
def graph_data(data: dict, sizes: dict, offer: str, sku: str):
|
2016-08-28 03:27:36 +03:00
|
|
|
"""Graph data via gnuplot
|
|
|
|
:param dict data: timing data
|
|
|
|
:param dict sizes: size data
|
2016-08-29 05:43:53 +03:00
|
|
|
:param str offer: offer
|
|
|
|
:param str sku: sku
|
2016-08-28 03:27:36 +03:00
|
|
|
"""
|
2016-08-16 01:16:45 +03:00
|
|
|
print(sizes)
|
2016-08-25 23:07:57 +03:00
|
|
|
# create data file
|
|
|
|
dat_fname = _PARTITION_KEY.replace('$', '-') + '.dat'
|
|
|
|
mintime = float(sys.maxsize)
|
|
|
|
maxtime = 0.0
|
|
|
|
rdata = {}
|
2016-08-16 01:16:45 +03:00
|
|
|
for nodeid in data:
|
2016-08-25 23:07:57 +03:00
|
|
|
start = data[nodeid]['start']
|
|
|
|
if start in rdata:
|
|
|
|
raise RuntimeError('cannot create reverse mapping')
|
|
|
|
rdata[start] = nodeid
|
|
|
|
if start < mintime:
|
|
|
|
mintime = start
|
|
|
|
if start > maxtime:
|
|
|
|
maxtime = start
|
2016-10-30 11:33:15 +03:00
|
|
|
print('nodeready variance:', maxtime - mintime)
|
2016-08-25 23:07:57 +03:00
|
|
|
total_gr = 0
|
|
|
|
total_ac = 0
|
|
|
|
with open(dat_fname, 'w') as f:
|
|
|
|
f.write(
|
2016-08-27 08:56:00 +03:00
|
|
|
'NodePrepStartTime NodeId NodePrep+DockerInstall '
|
|
|
|
'PrivateRegistrySetup ShipyardContainerPull GlobalResourcesLoad '
|
|
|
|
'TotalPull TotalSave TotalLoad TotalTorrent\n')
|
2016-08-25 23:07:57 +03:00
|
|
|
for start in sorted(rdata):
|
|
|
|
nodeid = rdata[start]
|
|
|
|
pull = 0
|
|
|
|
save = 0
|
|
|
|
load = 0
|
|
|
|
torrent = 0
|
|
|
|
for event in data[nodeid]['timing']:
|
|
|
|
if event.startswith('pull:'):
|
|
|
|
pull += data[nodeid]['timing'][event]
|
|
|
|
elif event.startswith('save:'):
|
|
|
|
save += data[nodeid]['timing'][event]
|
|
|
|
elif event.startswith('load:'):
|
|
|
|
load += data[nodeid]['timing'][event]
|
|
|
|
elif event.startswith('torrent:'):
|
|
|
|
torrent += data[nodeid]['timing'][event]
|
|
|
|
acquisition = pull + torrent + load
|
|
|
|
total_ac += acquisition
|
|
|
|
print(nodeid, data[nodeid]['timing'])
|
|
|
|
f.write(
|
2016-08-27 08:56:00 +03:00
|
|
|
('{0} {1} {2} {3} {4} {5} {6:.5f} {7:.5f} {8:.5f} '
|
|
|
|
'{9:.5f}\n').format(
|
|
|
|
datetime.datetime.fromtimestamp(start).strftime(
|
|
|
|
'%Y-%m-%d-%H:%M:%S.%f'),
|
|
|
|
nodeid,
|
|
|
|
data[nodeid]['timing']['docker_install'],
|
|
|
|
data[nodeid]['timing']['private_registry_setup'],
|
|
|
|
data[nodeid]['timing']['docker_shipyard_container_pull'],
|
|
|
|
data[nodeid]['timing']['global_resources_loaded'],
|
|
|
|
pull,
|
|
|
|
save,
|
|
|
|
load,
|
|
|
|
torrent)
|
|
|
|
)
|
2016-08-25 23:07:57 +03:00
|
|
|
total_gr += data[nodeid]['timing']['global_resources_loaded']
|
|
|
|
print('total gr: {} avg: {}'.format(total_gr, total_gr / len(data)))
|
|
|
|
print('total acq: {} avg: {}'.format(total_ac, total_ac / len(data)))
|
|
|
|
# create plot file
|
|
|
|
plot_fname = _PARTITION_KEY.replace('$', '-') + '.plot'
|
|
|
|
with open(plot_fname, 'w') as f:
|
|
|
|
f.write('set terminal pngcairo enhanced transparent crop\n')
|
2016-08-27 08:56:00 +03:00
|
|
|
f.write(
|
2016-08-29 05:43:53 +03:00
|
|
|
('set title "Shipyard Performance for {} ({} {})" '
|
|
|
|
'font ", 10" \n').format(
|
|
|
|
_PARTITION_KEY.split('$')[-1], offer, sku))
|
|
|
|
f.write(
|
|
|
|
'set key top right horizontal autotitle columnhead '
|
|
|
|
'font ", 7"\n')
|
|
|
|
f.write('set xtics rotate by 45 right font ", 7"\n')
|
|
|
|
f.write('set ytics font ", 8"\n')
|
2016-08-27 08:56:00 +03:00
|
|
|
f.write('set xlabel "Node Prep Start Time" font ", 8"\n')
|
|
|
|
f.write('set ylabel "Seconds" font ", 8"\n')
|
2016-08-25 23:07:57 +03:00
|
|
|
f.write('set format x "%H:%M:%.3S"\n')
|
|
|
|
f.write('set xdata time\n')
|
|
|
|
f.write('set timefmt "%Y-%m-%d-%H:%M:%S"\n')
|
2016-08-29 05:43:53 +03:00
|
|
|
f.write('set style fill solid\n')
|
2016-08-25 23:07:57 +03:00
|
|
|
f.write('set boxwidth {0:.5f} absolute\n'.format(
|
|
|
|
(maxtime - mintime) / 100.0))
|
2016-08-27 08:56:00 +03:00
|
|
|
f.write('plot "{}" using 1:($3+$4+$5+$6) with boxes, \\\n'.format(
|
2016-08-25 23:07:57 +03:00
|
|
|
dat_fname))
|
2016-08-27 08:56:00 +03:00
|
|
|
f.write('\t"" using 1:($3+$4+$5) with boxes, \\\n')
|
|
|
|
f.write('\t"" using 1:($3+$4) with boxes, \\\n')
|
|
|
|
f.write('\t"" using 1:3 with boxes\n')
|
2016-08-25 23:07:57 +03:00
|
|
|
png_fname = _PARTITION_KEY.replace('$', '-') + '.png'
|
|
|
|
subprocess.check_call(
|
|
|
|
'gnuplot {} > {}'.format(plot_fname, png_fname), shell=True)
|
2016-08-13 01:31:05 +03:00
|
|
|
|
|
|
|
|
2016-08-28 03:27:36 +03:00
|
|
|
def merge_dict(dict1: dict, dict2: dict) -> dict:
|
2016-08-13 01:31:05 +03:00
|
|
|
"""Recursively merge dictionaries: dict2 on to dict1. This differs
|
|
|
|
from dict.update() in that values that are dicts are recursively merged.
|
|
|
|
Note that only dict value types are merged, not lists, etc.
|
|
|
|
|
|
|
|
:param dict dict1: dictionary to merge to
|
|
|
|
:param dict dict2: dictionary to merge with
|
|
|
|
:rtype: dict
|
|
|
|
:return: merged dictionary
|
|
|
|
"""
|
|
|
|
if not isinstance(dict1, dict) or not isinstance(dict2, dict):
|
|
|
|
raise ValueError('dict1 or dict2 is not a dictionary')
|
|
|
|
result = copy.deepcopy(dict1)
|
|
|
|
for k, v in dict2.items():
|
|
|
|
if k in result and isinstance(result[k], dict):
|
|
|
|
result[k] = merge_dict(result[k], v)
|
|
|
|
else:
|
|
|
|
result[k] = copy.deepcopy(v)
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
|
|
|
def main():
|
|
|
|
"""Main function"""
|
|
|
|
# get command-line args
|
|
|
|
args = parseargs()
|
|
|
|
|
2016-10-30 11:33:15 +03:00
|
|
|
if args.configdir is not None:
|
|
|
|
if args.credentials is None:
|
|
|
|
args.credentials = str(
|
|
|
|
pathlib.Path(args.configdir, 'credentials.json'))
|
|
|
|
if args.config is None:
|
|
|
|
args.config = str(pathlib.Path(args.configdir, 'config.json'))
|
|
|
|
if args.pool is None:
|
|
|
|
args.pool = str(pathlib.Path(args.configdir, 'pool.json'))
|
|
|
|
|
2016-08-24 00:50:17 +03:00
|
|
|
if args.credentials is None:
|
|
|
|
raise ValueError('credentials json not specified')
|
2016-08-13 01:31:05 +03:00
|
|
|
if args.config is None:
|
2016-08-24 00:50:17 +03:00
|
|
|
raise ValueError('config json not specified')
|
2016-10-30 11:33:15 +03:00
|
|
|
if args.pool is None:
|
|
|
|
raise ValueError('pool json not specified')
|
2016-08-13 01:31:05 +03:00
|
|
|
|
2016-08-24 00:50:17 +03:00
|
|
|
with open(args.credentials, 'r') as f:
|
2016-08-13 01:31:05 +03:00
|
|
|
config = json.load(f)
|
|
|
|
with open(args.config, 'r') as f:
|
|
|
|
config = merge_dict(config, json.load(f))
|
2016-08-24 00:50:17 +03:00
|
|
|
with open(args.pool, 'r') as f:
|
|
|
|
config = merge_dict(config, json.load(f))
|
2016-08-13 01:31:05 +03:00
|
|
|
|
|
|
|
# create storage credentials
|
|
|
|
table_client = _create_credentials(config)
|
|
|
|
# graph data
|
2016-08-29 05:43:53 +03:00
|
|
|
data, sizes, offer, sku = coalesce_data(table_client)
|
|
|
|
graph_data(data, sizes, offer, sku)
|
2016-08-13 01:31:05 +03:00
|
|
|
|
|
|
|
|
|
|
|
def parseargs():
|
|
|
|
"""Parse program arguments
|
|
|
|
:rtype: argparse.Namespace
|
|
|
|
:return: parsed arguments
|
|
|
|
"""
|
|
|
|
parser = argparse.ArgumentParser(
|
2016-10-30 11:33:15 +03:00
|
|
|
description='Batch Shipyard perf graph generator')
|
|
|
|
parser.add_argument(
|
|
|
|
'--configdir', help='json config dir')
|
2016-08-13 01:31:05 +03:00
|
|
|
parser.add_argument(
|
2016-08-24 00:50:17 +03:00
|
|
|
'--credentials', help='credentials json config')
|
|
|
|
parser.add_argument(
|
|
|
|
'--config', help='general json config for option')
|
2016-08-13 01:31:05 +03:00
|
|
|
parser.add_argument(
|
2016-08-24 00:50:17 +03:00
|
|
|
'--pool', help='pool json config')
|
2016-08-13 01:31:05 +03:00
|
|
|
return parser.parse_args()
|
|
|
|
|
2016-11-14 21:54:23 +03:00
|
|
|
|
2016-08-13 01:31:05 +03:00
|
|
|
if __name__ == '__main__':
|
|
|
|
main()
|