2012-04-18 03:21:41 +04:00
|
|
|
# This Source Code Form is subject to the terms of the Mozilla Public License,
|
|
|
|
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
|
|
|
|
# obtain one at http://mozilla.org/MPL/2.0/.
|
|
|
|
|
2012-05-01 22:42:35 +04:00
|
|
|
import argparse
|
2012-04-18 03:39:13 +04:00
|
|
|
import ConfigParser
|
2012-06-20 01:38:32 +04:00
|
|
|
import copy
|
2013-01-30 02:11:00 +04:00
|
|
|
import fcntl
|
2012-04-13 03:50:29 +04:00
|
|
|
import inspect
|
2013-01-31 03:11:18 +04:00
|
|
|
import json
|
2012-11-01 02:08:52 +04:00
|
|
|
import logging
|
2012-05-02 00:29:07 +04:00
|
|
|
import os
|
2012-04-13 03:50:29 +04:00
|
|
|
import platform
|
2013-01-30 02:11:00 +04:00
|
|
|
import resource
|
|
|
|
import signal
|
2012-05-01 22:42:35 +04:00
|
|
|
import subprocess
|
2012-04-03 03:52:35 +04:00
|
|
|
import sys
|
2012-04-13 03:50:29 +04:00
|
|
|
import traceback
|
2012-04-03 03:52:35 +04:00
|
|
|
|
2013-01-05 02:49:21 +04:00
|
|
|
import pika
|
|
|
|
|
2013-01-11 01:52:50 +04:00
|
|
|
|
2013-01-15 23:43:03 +04:00
|
|
|
# Names of netconfigs and operating systems
|
|
|
|
NETCONFIGS = ('broadband', 'umts', 'gsm')
|
|
|
|
OPERATING_SYSTEMS = ('linux', 'mac', 'windows')
|
2012-05-08 03:32:11 +04:00
|
|
|
|
2012-05-01 22:42:35 +04:00
|
|
|
|
2013-01-05 03:52:03 +04:00
|
|
|
# RabbitMQ queue names
|
|
|
|
INCOMING_QUEUE = 'sr_incoming'
|
|
|
|
OUTGOING_QUEUE = 'sr_outgoing'
|
2013-01-10 02:00:12 +04:00
|
|
|
NETCONFIG_QUEUES = {
|
|
|
|
'broadband': {'incoming': 'sr_nc_broadband', 'rpc': 'sr_nc_broadband_rpc'},
|
|
|
|
'umts': {'incoming': 'sr_nc_umts', 'rpc': 'sr_nc_umts_rpc'},
|
|
|
|
'gsm': {'incoming': 'sr_nc_gsm', 'rpc': 'sr_nc_gsm_rpc'}
|
|
|
|
}
|
2013-01-10 02:48:20 +04:00
|
|
|
CLIENT_QUEUES = {
|
|
|
|
'linux': 'sr_ct_linux',
|
|
|
|
'mac': 'sr_ct_mac',
|
|
|
|
'windows': 'sr_ct_windows'
|
|
|
|
}
|
2013-01-05 03:52:03 +04:00
|
|
|
|
2013-01-11 01:52:50 +04:00
|
|
|
|
2012-11-01 02:08:52 +04:00
|
|
|
# Logging configuration
|
2013-01-11 00:54:41 +04:00
|
|
|
LOG_FMT = '%(asctime)s %(pathname)s:%(lineno)d %(levelname)s: %(message)s'
|
2013-02-02 04:08:37 +04:00
|
|
|
_parser = argparse.ArgumentParser(add_help=False)
|
2012-11-01 02:08:52 +04:00
|
|
|
_parser.add_argument('--log')
|
|
|
|
_args, _ = _parser.parse_known_args()
|
|
|
|
if _args.log:
|
|
|
|
_logger = logging.getLogger()
|
|
|
|
_logger.setLevel(logging.DEBUG)
|
|
|
|
_handler = logging.FileHandler(_args.log)
|
2013-01-11 00:54:41 +04:00
|
|
|
_formatter = logging.Formatter(fmt=LOG_FMT)
|
2012-11-01 02:08:52 +04:00
|
|
|
_handler.setFormatter(_formatter)
|
|
|
|
_logger.addHandler(_handler)
|
|
|
|
|
|
|
|
def log(msg):
|
|
|
|
if _args.log:
|
|
|
|
logging.debug(msg)
|
|
|
|
|
|
|
|
def log_exc(msg):
|
|
|
|
if _args.log:
|
|
|
|
logging.exception(msg)
|
|
|
|
|
2012-04-13 03:50:29 +04:00
|
|
|
def main(_main):
|
|
|
|
"""Mark a function as the main function to run when run as a script.
|
|
|
|
If that function throws an exception, we'll print the traceback to
|
|
|
|
stderr and exit.
|
|
|
|
"""
|
|
|
|
parent = inspect.stack()[1][0]
|
|
|
|
name = parent.f_locals.get('__name__', None)
|
|
|
|
if name == '__main__':
|
2012-11-01 02:47:28 +04:00
|
|
|
log('%s' % (' '.join(sys.argv),))
|
2012-04-13 03:50:29 +04:00
|
|
|
try:
|
2012-05-01 22:42:35 +04:00
|
|
|
_main()
|
2013-02-02 03:00:56 +04:00
|
|
|
except Exception as e:
|
2012-11-01 02:08:52 +04:00
|
|
|
log_exc('EXCEPTION')
|
2012-05-01 22:42:35 +04:00
|
|
|
traceback.print_exception(type(e), e, sys.exc_info()[2], None,
|
|
|
|
sys.stderr)
|
2012-04-13 03:50:29 +04:00
|
|
|
sys.exit(1)
|
2012-11-01 02:47:28 +04:00
|
|
|
log('FINISHED')
|
|
|
|
sys.exit(0)
|
2012-04-13 03:50:29 +04:00
|
|
|
return _main
|
2012-04-17 23:00:27 +04:00
|
|
|
|
2013-01-11 01:52:50 +04:00
|
|
|
|
|
|
|
_cp = None
|
|
|
|
_srconf = None
|
|
|
|
_runconf = None
|
|
|
|
|
|
|
|
|
2013-02-01 00:54:36 +04:00
|
|
|
def get_config_file():
|
|
|
|
return _srconf
|
|
|
|
|
|
|
|
|
2012-07-24 21:17:21 +04:00
|
|
|
def get_config(section, option, default=None):
|
2013-01-11 01:52:50 +04:00
|
|
|
"""Read a config entry from the stoneridge ini files.
|
2012-04-18 03:21:41 +04:00
|
|
|
"""
|
2012-05-10 07:51:09 +04:00
|
|
|
global _cp
|
|
|
|
|
2012-11-02 01:15:28 +04:00
|
|
|
logging.debug('reading %s.%s (default %s)' % (section, option, default))
|
|
|
|
|
2012-05-10 07:51:09 +04:00
|
|
|
if _cp is None:
|
|
|
|
_cp = ConfigParser.SafeConfigParser()
|
2013-01-11 01:52:50 +04:00
|
|
|
|
|
|
|
if _srconf:
|
|
|
|
logging.debug('loading stoneridge config file %s' % (_srconf,))
|
|
|
|
_cp.read(_srconf)
|
|
|
|
|
|
|
|
if _runconf:
|
|
|
|
logging.debug('loading run config file %s' % (_runconf,))
|
|
|
|
_cp.read(_runconf)
|
2012-04-18 03:39:13 +04:00
|
|
|
|
|
|
|
try:
|
2012-11-02 01:15:28 +04:00
|
|
|
val = _cp.get(section, option)
|
|
|
|
logging.debug('found %s.%s, returning %s' % (section, option, val))
|
|
|
|
return val
|
2013-01-30 02:08:53 +04:00
|
|
|
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError) as e:
|
2012-11-02 01:15:28 +04:00
|
|
|
logging.debug('unable to find %s.%s, returning default %s' %
|
|
|
|
(section, option, default))
|
2012-07-24 21:17:21 +04:00
|
|
|
return default
|
2012-05-10 07:51:09 +04:00
|
|
|
|
2013-01-11 01:52:50 +04:00
|
|
|
|
2013-01-16 00:33:02 +04:00
|
|
|
def get_config_int(section, option, default=0):
|
|
|
|
"""Get an integer config variable from the stoneridge ini files
|
|
|
|
"""
|
|
|
|
value = get_config(section, option, default=default)
|
|
|
|
try:
|
|
|
|
return int(value)
|
|
|
|
except ValueError:
|
|
|
|
logging.debug('invalid int value %s, returning default %s' %
|
|
|
|
(value, default))
|
|
|
|
return default
|
|
|
|
|
|
|
|
|
2013-02-01 23:20:57 +04:00
|
|
|
def get_config_bool(section, option):
|
|
|
|
"""Get a boolean config variable from the stoneridge ini files
|
|
|
|
"""
|
|
|
|
value = get_config(section, option)
|
|
|
|
|
|
|
|
if value is None:
|
|
|
|
value = False
|
|
|
|
else:
|
|
|
|
truthy_values = ('y', 'yes', 't', 'true', 'ok', '1')
|
|
|
|
if value.lower() in truthy_values:
|
|
|
|
value = True
|
|
|
|
else:
|
|
|
|
value = False
|
|
|
|
|
|
|
|
return value
|
|
|
|
|
|
|
|
|
2013-02-02 03:50:51 +04:00
|
|
|
_xpcshell = None
|
2013-01-11 01:52:50 +04:00
|
|
|
_xpcshell_environ = None
|
|
|
|
|
|
|
|
|
2012-04-24 00:04:50 +04:00
|
|
|
def run_xpcshell(args, stdout=subprocess.PIPE):
|
2013-01-11 01:52:50 +04:00
|
|
|
"""Run xpcshell with the appropriate args.
|
2012-04-24 00:04:50 +04:00
|
|
|
"""
|
2013-02-02 03:50:51 +04:00
|
|
|
global _xpcshell
|
2012-06-20 01:38:32 +04:00
|
|
|
global _xpcshell_environ
|
2013-01-11 20:48:18 +04:00
|
|
|
|
2013-01-11 03:58:12 +04:00
|
|
|
bindir = get_config('run', 'bin')
|
|
|
|
if bindir is None:
|
2013-02-02 04:00:07 +04:00
|
|
|
return (None, [])
|
|
|
|
|
|
|
|
if not os.path.exists(bindir):
|
|
|
|
return (None, [])
|
2013-01-11 20:48:18 +04:00
|
|
|
|
2012-06-20 01:38:32 +04:00
|
|
|
if _xpcshell_environ is None:
|
|
|
|
_xpcshell_environ = copy.copy(os.environ)
|
|
|
|
ldlibpath = _xpcshell_environ.get('LD_LIBRARY_PATH')
|
|
|
|
if ldlibpath:
|
|
|
|
ldlibpath = os.path.pathsep.join([bindir, ldlibpath])
|
|
|
|
else:
|
|
|
|
ldlibpath = bindir
|
|
|
|
_xpcshell_environ['LD_LIBRARY_PATH'] = ldlibpath
|
|
|
|
|
2013-02-02 03:50:51 +04:00
|
|
|
if _xpcshell is None:
|
|
|
|
xpcshell_bin = get_config('machine', 'xpcshell')
|
|
|
|
_xpcshell = os.path.join(bindir, xpcshell_bin)
|
|
|
|
|
|
|
|
xpcargs = [_xpcshell] + args
|
|
|
|
logging.debug('Running xpcshell: %s' % (xpcargs,))
|
2012-04-24 00:04:50 +04:00
|
|
|
proc = subprocess.Popen(xpcargs, stdout=stdout,
|
2012-06-20 01:38:32 +04:00
|
|
|
stderr=subprocess.STDOUT, cwd=bindir,
|
|
|
|
env=_xpcshell_environ)
|
2012-04-24 00:04:50 +04:00
|
|
|
res = proc.wait()
|
|
|
|
return (res, proc.stdout)
|
|
|
|
|
2013-01-11 01:52:50 +04:00
|
|
|
|
|
|
|
_xpcoutdir = None
|
|
|
|
|
|
|
|
|
|
|
|
def get_xpcshell_output_directory():
|
|
|
|
"""Get the directory where xpcshell output will be placed.
|
2012-04-24 00:04:50 +04:00
|
|
|
"""
|
2013-01-11 01:52:50 +04:00
|
|
|
global _xpcoutdir
|
2012-05-10 08:04:31 +04:00
|
|
|
|
2013-01-11 01:52:50 +04:00
|
|
|
if _xpcoutdir is None:
|
2013-01-11 20:48:18 +04:00
|
|
|
xpcoutleaf = get_config('run', 'xpcoutleaf')
|
|
|
|
if xpcoutleaf is None:
|
|
|
|
return None
|
|
|
|
|
2013-01-11 01:52:50 +04:00
|
|
|
xpcshell_tmp_dir = None
|
2012-05-02 01:14:10 +04:00
|
|
|
_, stdout = run_xpcshell(['-e',
|
|
|
|
'dump("SR-TMP-DIR:" + '
|
|
|
|
' Components.classes["@mozilla.org/file/directory_service;1"]'
|
|
|
|
' .getService(Components.interfaces.nsIProperties)'
|
|
|
|
' .get("TmpD", Components.interfaces.nsILocalFile)'
|
2012-05-10 22:23:39 +04:00
|
|
|
' .path + "\\n");'
|
2012-05-02 01:14:10 +04:00
|
|
|
'quit(0);'])
|
2012-05-04 02:25:10 +04:00
|
|
|
|
2012-05-02 01:14:10 +04:00
|
|
|
for line in stdout:
|
|
|
|
if line.startswith('SR-TMP-DIR:'):
|
2013-01-11 01:52:50 +04:00
|
|
|
xpcshell_tmp_dir = line.strip().split(':', 1)[1]
|
2012-05-02 01:14:10 +04:00
|
|
|
|
2013-01-11 01:52:50 +04:00
|
|
|
if xpcshell_tmp_dir is None:
|
|
|
|
# TODO - maybe raise exception?
|
|
|
|
return None
|
2012-04-24 00:04:50 +04:00
|
|
|
|
2013-01-11 01:52:50 +04:00
|
|
|
_xpcoutdir = os.path.join(xpctmp, xpcoutleaf)
|
2012-04-24 00:04:50 +04:00
|
|
|
|
2013-01-11 01:52:50 +04:00
|
|
|
return _xpcoutdir
|
|
|
|
|
|
|
|
|
|
|
|
_os_version = None
|
|
|
|
|
|
|
|
|
|
|
|
def get_os_version():
|
|
|
|
"""Return the OS version in use.
|
2012-04-20 21:50:18 +04:00
|
|
|
"""
|
2013-01-11 01:52:50 +04:00
|
|
|
global _os_version
|
|
|
|
|
|
|
|
if _os_version is None:
|
|
|
|
os_name = get_config('machine', 'os')
|
|
|
|
if os_name == 'linux':
|
|
|
|
_os_version = ' '.join(platform.linux_distribution()[0:2])
|
|
|
|
elif os_name == 'mac':
|
|
|
|
_os_version = platform.mac_ver()[0]
|
|
|
|
elif os_name == 'windows':
|
|
|
|
_os_version = platform.win32_ver()[1]
|
2012-04-20 21:50:18 +04:00
|
|
|
else:
|
2013-01-11 01:52:50 +04:00
|
|
|
_os_version = 'Unknown'
|
2012-04-20 21:50:18 +04:00
|
|
|
|
2013-01-11 01:52:50 +04:00
|
|
|
return _os_version
|
|
|
|
|
|
|
|
|
|
|
|
_netconfig_ids = {
|
|
|
|
'broadband':'0',
|
|
|
|
'umts':'1',
|
|
|
|
'gsm':'2',
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
_os_ids = {
|
|
|
|
'windows':'w',
|
|
|
|
'linux':'l',
|
|
|
|
'mac':'m',
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
_buildid_suffix = None
|
|
|
|
|
|
|
|
|
|
|
|
def get_buildid_suffix():
|
|
|
|
"""Return the suffix to be used to uniquify the build id.
|
2012-04-20 21:50:18 +04:00
|
|
|
"""
|
2013-01-11 01:52:50 +04:00
|
|
|
global _buildid_suffix
|
|
|
|
|
|
|
|
if _buildid_suffix is None:
|
2013-01-11 20:48:18 +04:00
|
|
|
os_name = get_config('machine', 'os')
|
|
|
|
current_netconfig = get_config('run', 'netconfig')
|
|
|
|
if os_name is None or current_netconfig is None:
|
|
|
|
return ''
|
|
|
|
|
2013-01-11 01:52:50 +04:00
|
|
|
_buildid_suffix = _os_ids[os_name] + _netconfig_ids[current_netconfig]
|
|
|
|
|
|
|
|
return _buildid_suffix
|
|
|
|
|
2012-05-04 02:25:10 +04:00
|
|
|
|
2013-01-31 03:11:39 +04:00
|
|
|
_root = None
|
|
|
|
|
|
|
|
|
|
|
|
def run_process(procname, *args, **kwargs):
|
2013-01-11 01:52:50 +04:00
|
|
|
"""Run a python process under the stoneridge environment.
|
2013-01-05 02:49:21 +04:00
|
|
|
"""
|
2013-01-31 03:11:39 +04:00
|
|
|
global _root
|
|
|
|
|
|
|
|
if _root is None:
|
|
|
|
_root = get_config('stoneridge', 'root')
|
|
|
|
|
2013-01-30 02:08:53 +04:00
|
|
|
logger = kwargs.get('logger', logging)
|
2013-01-31 03:11:39 +04:00
|
|
|
command = [sys.executable, os.path.join(_root, procname)] + map(str, args)
|
2013-01-10 02:48:20 +04:00
|
|
|
logger.debug('Running %s' % (procname,))
|
|
|
|
logger.debug(' '.join(command))
|
2013-01-05 02:49:21 +04:00
|
|
|
try:
|
|
|
|
proc_stdout = subprocess.check_output(command,
|
|
|
|
stderr=subprocess.STDOUT)
|
2013-01-10 02:48:20 +04:00
|
|
|
logger.debug(proc_stdout)
|
|
|
|
logger.debug('SUCCEEDED: %s' % (procname,))
|
2013-02-02 03:00:56 +04:00
|
|
|
except subprocess.CalledProcessError as e:
|
2013-01-10 02:48:20 +04:00
|
|
|
logger.error('FAILED: %s (%s)' % (procname, e.returncode))
|
|
|
|
logger.error(e.output)
|
2013-01-05 02:49:21 +04:00
|
|
|
raise # Do this in case caller has any special handling
|
|
|
|
|
2012-09-25 22:27:22 +04:00
|
|
|
|
2012-04-17 23:00:27 +04:00
|
|
|
class ArgumentParser(argparse.ArgumentParser):
|
2012-04-20 21:50:18 +04:00
|
|
|
"""An argument parser for stone ridge programs that handles the arguments
|
2013-01-11 01:52:50 +04:00
|
|
|
required by all of them.
|
2012-04-20 21:50:18 +04:00
|
|
|
"""
|
2012-04-17 23:00:27 +04:00
|
|
|
def __init__(self, **kwargs):
|
2012-04-18 03:21:41 +04:00
|
|
|
argparse.ArgumentParser.__init__(self, **kwargs)
|
2012-04-17 23:00:27 +04:00
|
|
|
|
2012-05-08 03:50:04 +04:00
|
|
|
self.add_argument('--config', dest='_sr_config_', required=True,
|
|
|
|
help='Configuration file')
|
2012-11-01 02:08:52 +04:00
|
|
|
self.add_argument('--log', dest='_sr_log_', default=None, required=True,
|
|
|
|
help='File to place log info in')
|
2012-04-17 23:00:27 +04:00
|
|
|
|
2012-05-02 00:29:07 +04:00
|
|
|
def parse_args(self, **kwargs):
|
2013-01-11 01:52:50 +04:00
|
|
|
global _srconf
|
2012-05-08 03:32:11 +04:00
|
|
|
|
2012-04-18 03:21:41 +04:00
|
|
|
args = argparse.ArgumentParser.parse_args(self, **kwargs)
|
2012-04-17 23:00:27 +04:00
|
|
|
|
2013-01-11 01:52:50 +04:00
|
|
|
_srconf = args._sr_config_
|
|
|
|
logging.debug('_srconf: %s' % (_srconf,))
|
2013-01-11 20:48:18 +04:00
|
|
|
logging.debug('_srlog: %s' % (args._sr_log_,))
|
2013-01-11 01:52:50 +04:00
|
|
|
|
|
|
|
return args
|
|
|
|
|
2012-05-08 03:32:11 +04:00
|
|
|
|
2013-01-30 02:11:00 +04:00
|
|
|
def daemon_sig(pidfile):
|
|
|
|
"""Signal handler for daemons created with stoneridge.daemonize.
|
|
|
|
"""
|
|
|
|
logging.debug('signal handler: unlinking pidfile')
|
|
|
|
os.unlink(pidfile)
|
|
|
|
logging.debug('signal handler: daemon exiting')
|
|
|
|
sys.exit(0)
|
|
|
|
|
|
|
|
|
|
|
|
def daemonize(pidfile, function, **kwargs):
|
|
|
|
"""Run a function as a daemon.
|
|
|
|
|
|
|
|
pidfile - Name of file to write PID to
|
|
|
|
function - Function object to call as the daemon
|
|
|
|
kwargs - Arguments to pass to <function>
|
|
|
|
"""
|
|
|
|
logging.debug('forking for daemonization')
|
|
|
|
pid = os.fork()
|
|
|
|
|
|
|
|
if pid < 0:
|
|
|
|
# Fork failure
|
|
|
|
logging.error('fork failed (%s)' % (os.strerror(pid,)))
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
if pid:
|
|
|
|
# Parent
|
|
|
|
sys.exit(0)
|
|
|
|
|
|
|
|
sid = os.setsid()
|
|
|
|
if sid == -1:
|
|
|
|
# Error setting session ID
|
|
|
|
logging.error('error setting sid')
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
devnull = getattr(os, 'devnull', '/dev/null')
|
|
|
|
logging.debug('devnull = %s' % (devnull,))
|
|
|
|
|
|
|
|
log_fds = set()
|
|
|
|
logger = logging.getLogger()
|
|
|
|
for handler in logger.handlers:
|
|
|
|
if isinstance(handler, logging.FileHandler):
|
|
|
|
log_fds.add(handler.stream.fileno())
|
|
|
|
logging.debug('log fds = %s' % (log_fds,))
|
|
|
|
|
|
|
|
for fd in range(resource.getrlimit(resource.RLIMIT_NOFILE)[0]):
|
|
|
|
if fd in log_fds:
|
|
|
|
logging.debug('not closing fd %s (log)' % (fd,))
|
|
|
|
else:
|
|
|
|
try:
|
|
|
|
os.close(fd)
|
|
|
|
logging.debug('closed fd %s' % (fd,))
|
|
|
|
except OSError:
|
|
|
|
# Didn't have it open, don't care
|
|
|
|
pass
|
|
|
|
|
|
|
|
# Make stdin, stdout & stderr point to /dev/null
|
|
|
|
logging.debug('pointing std{in,out,err} -> devnull')
|
|
|
|
os.open(devnull, os.O_RDWR)
|
|
|
|
os.dup(0)
|
|
|
|
os.dup(0)
|
|
|
|
|
|
|
|
# Set a sane umask
|
|
|
|
logging.debug('setting umask 027')
|
|
|
|
os.umask(027)
|
|
|
|
|
|
|
|
# Change to the usual daemon directory
|
|
|
|
logging.debug('chdir -> /')
|
|
|
|
os.chdir('/')
|
|
|
|
|
|
|
|
with file(pidfile, 'w') as f:
|
|
|
|
logging.debug('locking %s' % (pidfile,))
|
|
|
|
fcntl.lockf(f, fcntl.LOCK_EX|fcntl.LOCK_NB)
|
|
|
|
|
|
|
|
logging.debug('writing pid')
|
|
|
|
f.write('%s' % (os.getpid(),))
|
|
|
|
f.flush()
|
|
|
|
|
|
|
|
logging.debug('setting up sigterm handler')
|
|
|
|
signal.signal(signal.SIGTERM, lambda sig, frame: daemon_sig(pidfile))
|
|
|
|
|
|
|
|
logging.debug('calling daemon function')
|
|
|
|
function(**kwargs)
|
|
|
|
|
|
|
|
# If we get here, we assume the program is exiting cleanly
|
|
|
|
logging.debug('unlinking pidfile')
|
|
|
|
os.unlink(pidfile)
|
|
|
|
logging.debug('daemon exiting')
|
|
|
|
sys.exit(0)
|
|
|
|
|
|
|
|
|
2013-01-11 23:36:55 +04:00
|
|
|
class DaemonArgumentParser(ArgumentParser):
|
|
|
|
"""An argument parser for stone ridge programs that run as daemons.
|
|
|
|
"""
|
|
|
|
def __init__(self, **kwargs):
|
|
|
|
ArgumentParser.__init__(self, **kwargs)
|
|
|
|
|
|
|
|
self.add_argument('--nodaemon', dest='nodaemon', action='store_true')
|
|
|
|
self.add_argument('--pidfile', dest='pidfile')
|
|
|
|
|
|
|
|
def do_exit(self, msg):
|
|
|
|
self.print_usage()
|
|
|
|
self.exit(2, msg % (self.prog,))
|
|
|
|
|
|
|
|
def do_mutex_exit(self, arg):
|
|
|
|
msg = '%%s: error: argument %s: not allowed with argument --nodaemon\n'
|
|
|
|
self.do_exit(msg % (arg,))
|
|
|
|
|
|
|
|
def do_missing_exit(self, arg):
|
|
|
|
msg = '%%s: error: argument %s is required\n'
|
|
|
|
self.do_exit(msg % (arg,))
|
|
|
|
|
|
|
|
def parse_args(self, **kwargs):
|
|
|
|
self.args = ArgumentParser.parse_args(self, **kwargs)
|
|
|
|
|
|
|
|
if self.args.nodaemon:
|
|
|
|
if self.args.pidfile:
|
|
|
|
self.do_mutex_exit('--pidfile')
|
|
|
|
elif not self.args.pidfile:
|
|
|
|
self.do_missing_exit('--pidfile')
|
|
|
|
|
|
|
|
return self.args
|
|
|
|
|
|
|
|
def start_daemon(self, daemon_func, **kwargs):
|
|
|
|
if self.args.nodaemon:
|
2013-01-30 02:11:00 +04:00
|
|
|
logging.debug('not running daemonized')
|
2013-01-11 23:36:55 +04:00
|
|
|
daemon_func(**kwargs)
|
|
|
|
sys.exit(0)
|
|
|
|
|
2013-01-30 02:11:00 +04:00
|
|
|
logging.debug('starting daemon')
|
|
|
|
daemonize(self.args.pidfile, daemon_func, **kwargs)
|
2013-01-11 23:36:55 +04:00
|
|
|
|
|
|
|
|
2013-01-11 01:52:50 +04:00
|
|
|
class TestRunArgumentParser(ArgumentParser):
|
|
|
|
"""Like stoneridge.ArgumentParser, but adds arguments specific for programs
|
|
|
|
that are run as part of a test run.
|
|
|
|
"""
|
|
|
|
def __init__(self, **kwargs):
|
|
|
|
ArgumentParser.__init__(self, **kwargs)
|
2012-04-17 23:00:27 +04:00
|
|
|
|
2013-01-11 01:52:50 +04:00
|
|
|
self.add_argument('--runconfig', dest='_sr_runconfig_', required=True,
|
|
|
|
help='Run-specific configuration file')
|
|
|
|
|
|
|
|
def parse_args(self, **kwargs):
|
|
|
|
global _runconf
|
|
|
|
|
|
|
|
args = ArgumentParser.parse_args(self, **kwargs)
|
|
|
|
|
|
|
|
_runconf = args._sr_runconfig_
|
|
|
|
logging.debug('_runconf: %s' % (_runconf,))
|
2012-09-25 22:27:22 +04:00
|
|
|
|
2012-04-17 23:00:27 +04:00
|
|
|
return args
|
2013-01-05 02:49:21 +04:00
|
|
|
|
2013-01-11 01:52:50 +04:00
|
|
|
|
2013-01-05 02:49:21 +04:00
|
|
|
class QueueListener(object):
|
2013-01-11 01:52:50 +04:00
|
|
|
"""A class to be used as the base for stone ridge daemons that need to
|
|
|
|
respond to entries on a queue.
|
|
|
|
"""
|
2013-01-11 20:48:18 +04:00
|
|
|
def __init__(self, queue, **kwargs):
|
|
|
|
self._host = get_config('stoneridge', 'mqhost')
|
2013-01-10 22:59:08 +04:00
|
|
|
self._queue = queue
|
2013-01-11 20:48:18 +04:00
|
|
|
self._params = pika.ConnectionParameters(host=self._host)
|
2013-01-10 22:59:08 +04:00
|
|
|
self._args = kwargs
|
|
|
|
self.setup(**kwargs)
|
2013-01-05 02:49:21 +04:00
|
|
|
|
2013-01-11 01:52:50 +04:00
|
|
|
def setup(self, **kwargs):
|
|
|
|
"""Used for class-specific things that would normally go in __init__.
|
|
|
|
"""
|
2013-01-05 02:49:21 +04:00
|
|
|
pass
|
|
|
|
|
2013-01-05 03:59:49 +04:00
|
|
|
def handle(self, **kwargs):
|
2013-01-11 01:52:50 +04:00
|
|
|
"""The callback that is called when a message is received on the queue.
|
|
|
|
All subclasses must override this. Nothing is done with the returned
|
|
|
|
value.
|
|
|
|
"""
|
2013-01-05 02:49:21 +04:00
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
def _handle(self, channel, method, properties, body):
|
2013-01-11 01:52:50 +04:00
|
|
|
"""Internal callback for when a message is received. Deserializes the
|
|
|
|
message and calls handle. Once handle succeeds, the message is
|
|
|
|
acknowledged.
|
|
|
|
"""
|
2013-01-05 02:49:21 +04:00
|
|
|
msg = json.loads(body)
|
|
|
|
self.handle(**msg)
|
|
|
|
channel.basic_ack(delivery_tag=method.delivery_tag)
|
|
|
|
|
|
|
|
def run(self):
|
2013-01-11 01:52:50 +04:00
|
|
|
"""Main event loop for a queue listener.
|
|
|
|
"""
|
2013-01-30 02:11:00 +04:00
|
|
|
logging.debug('Running queue listener for %s' % (self._queue,))
|
2013-01-10 22:59:08 +04:00
|
|
|
if self._queue is None:
|
2013-01-05 02:49:21 +04:00
|
|
|
raise Exception('You must set queue for %s' % (type(self),))
|
|
|
|
|
2013-01-10 22:59:08 +04:00
|
|
|
connection = pika.BlockingConnection(self._params)
|
2013-01-05 02:49:21 +04:00
|
|
|
channel = connection.channel()
|
|
|
|
|
|
|
|
channel.basic_qos(prefetch_count=1)
|
2013-01-10 22:59:08 +04:00
|
|
|
channel.basic_consume(self._handle, queue=self._queue)
|
2013-01-05 02:49:21 +04:00
|
|
|
|
|
|
|
channel.start_consuming()
|
|
|
|
|
2013-01-11 01:52:50 +04:00
|
|
|
|
2013-01-05 02:49:21 +04:00
|
|
|
class QueueWriter(object):
|
2013-01-11 01:52:50 +04:00
|
|
|
"""Used when someone needs to write to a stone ridge queue.
|
|
|
|
"""
|
2013-01-11 20:48:18 +04:00
|
|
|
def __init__(self, queue):
|
|
|
|
self._host = get_config('stoneridge', 'mqhost')
|
|
|
|
self._params = pika.ConnectionParameters(host=self._host)
|
2013-01-10 22:59:08 +04:00
|
|
|
self._queue = queue
|
2013-01-05 02:49:21 +04:00
|
|
|
|
|
|
|
def enqueue(self, **msg):
|
2013-01-11 01:52:50 +04:00
|
|
|
"""Place a message on the queue. The message is serialized as a JSON
|
|
|
|
string before being placed on the queue.
|
|
|
|
"""
|
2013-01-10 22:59:08 +04:00
|
|
|
connection = pika.BlockingConnection(self._params)
|
2013-01-05 02:49:21 +04:00
|
|
|
channel = connection.channel()
|
|
|
|
|
|
|
|
body = json.dumps(msg)
|
2013-01-10 22:59:08 +04:00
|
|
|
channel.basic_publish(exchange='', routing_key=self._queue, body=body,
|
2013-01-05 02:49:21 +04:00
|
|
|
properties=pika.BasicProperties(delivery_mode=2)) # Durable
|
|
|
|
connection.close() # Ensures the message is sent
|
2013-01-10 02:00:12 +04:00
|
|
|
|
2013-01-11 01:52:50 +04:00
|
|
|
|
2013-01-15 22:19:46 +04:00
|
|
|
def enqueue(nightly=True, ldap='', sha='', netconfigs=None,
|
2013-02-01 01:31:13 +04:00
|
|
|
operating_systems=None, srid=None, attempt=1):
|
2013-01-15 22:19:46 +04:00
|
|
|
"""Convenience function to kick off a test run. If called with no arguments,
|
|
|
|
this will kick off a run for all operating systems with all netconfigs
|
|
|
|
against the latest nightly build.
|
|
|
|
"""
|
2013-02-01 01:31:13 +04:00
|
|
|
if not netconfigs:
|
2013-01-15 22:19:46 +04:00
|
|
|
netconfigs = _netconfig_ids.keys()
|
|
|
|
else:
|
|
|
|
for nc in netconfigs:
|
|
|
|
if nc not in _netconfig_ids:
|
|
|
|
raise ValueError('Invalid net config %s' % (nc,))
|
|
|
|
|
2013-02-01 01:31:13 +04:00
|
|
|
if not operating_systems:
|
2013-01-15 22:19:46 +04:00
|
|
|
operating_systems = _os_ids.keys()
|
|
|
|
else:
|
|
|
|
for ops in operating_systems:
|
|
|
|
if ops not in _os_ids:
|
|
|
|
raise ValueError('Invalid operating system %s' % (nc,))
|
|
|
|
|
|
|
|
if nightly:
|
|
|
|
if ldap or sha:
|
|
|
|
raise ValueError('ldap and sha are not compatible with nightly')
|
|
|
|
else:
|
|
|
|
if not ldap or not sha:
|
|
|
|
raise ValueError('both ldap and sha must be set')
|
|
|
|
|
|
|
|
writer = QueueWriter(INCOMING_QUEUE)
|
|
|
|
writer.enqueue(nightly=nightly, ldap=ldap, sha=sha, netconfigs=netconfigs,
|
2013-02-01 01:31:13 +04:00
|
|
|
operating_systems=operating_systems, srid=srid, attempt=attempt)
|
2013-01-15 22:19:46 +04:00
|
|
|
|
|
|
|
|
2013-01-10 02:00:12 +04:00
|
|
|
class RpcCaller(object):
|
2013-01-11 01:52:50 +04:00
|
|
|
"""Used to call remote functions via the stone ridge mq of choice.
|
|
|
|
"""
|
2013-01-11 20:48:18 +04:00
|
|
|
def __init__(self, outgoing_queue, incoming_queue):
|
|
|
|
self._host = get_config('stoneridge', 'mqhost')
|
2013-01-10 22:59:08 +04:00
|
|
|
self._outgoing_queue = outgoing_queue
|
|
|
|
self._incoming_queue = incoming_queue
|
2013-01-10 02:00:12 +04:00
|
|
|
|
2013-01-11 20:48:18 +04:00
|
|
|
params = pika.ConnectionParameters(host=self._host)
|
2013-01-10 22:59:08 +04:00
|
|
|
self._connection = pika.BlockingConnection(params)
|
2013-02-01 02:29:29 +04:00
|
|
|
self._channel = self._connection.channel()
|
2013-01-10 22:59:08 +04:00
|
|
|
self._channel.basic_consume(self._on_rpc_done, no_ack=True,
|
|
|
|
queue=self._incoming_queue)
|
2013-01-10 02:00:12 +04:00
|
|
|
|
|
|
|
def _on_rpc_done(self, channel, method, properties, body):
|
2013-01-11 01:52:50 +04:00
|
|
|
"""The callback that is called when the remote function call
|
|
|
|
is complete.
|
|
|
|
"""
|
2013-02-01 02:47:28 +04:00
|
|
|
logging.debug('RpcCaller got callback')
|
|
|
|
logging.debug('Body: %s' % (body,))
|
|
|
|
logging.debug('Correlation id: %s' % (properties.correlation_id,))
|
2013-01-10 22:59:08 +04:00
|
|
|
if self._srid == properties.correlation_id:
|
2013-02-01 02:47:28 +04:00
|
|
|
logging.debug('Correlation ID matches.')
|
2013-01-10 22:59:08 +04:00
|
|
|
self._response = body
|
2013-02-01 02:47:28 +04:00
|
|
|
else:
|
|
|
|
logging.debug('No match for correlation ID. Ignoring.')
|
2013-01-10 02:00:12 +04:00
|
|
|
|
|
|
|
def __call__(self, **msg):
|
|
|
|
if 'srid' not in msg:
|
|
|
|
logging.error('Attempted to make an RPC call without an srid!')
|
|
|
|
return None
|
|
|
|
|
2013-01-10 22:59:08 +04:00
|
|
|
self._response = None
|
|
|
|
self._srid = msg['srid']
|
2013-01-10 02:00:12 +04:00
|
|
|
|
2013-02-01 02:47:28 +04:00
|
|
|
logging.debug('Making RPC call with correlation id %s' % (self._srid,))
|
|
|
|
logging.debug('Sending to: %s' % (self._outgoing_queue,))
|
|
|
|
logging.debug('Reply to: %s' % (self._incoming_queue,))
|
|
|
|
|
2013-01-10 22:59:08 +04:00
|
|
|
properties = pika.BasicProperties(reply_to=self._incoming_queue,
|
|
|
|
correlation_id=self._srid)
|
2013-01-10 02:00:12 +04:00
|
|
|
body = json.dumps(msg)
|
2013-01-10 22:59:08 +04:00
|
|
|
self._channel.basic_publish(exchange='',
|
|
|
|
routing_key=self._outgoing_queue, body=body,
|
|
|
|
properties=properties)
|
2013-01-10 02:00:12 +04:00
|
|
|
|
2013-01-10 22:59:08 +04:00
|
|
|
while self._response is None:
|
|
|
|
self._connection.process_data_events()
|
2013-01-10 02:00:12 +04:00
|
|
|
|
2013-02-01 02:47:28 +04:00
|
|
|
self._srid = None
|
|
|
|
|
2013-01-10 22:59:08 +04:00
|
|
|
return json.loads(self._response)
|
2013-01-10 02:48:20 +04:00
|
|
|
|
2013-01-11 01:52:50 +04:00
|
|
|
|
2013-01-10 02:48:20 +04:00
|
|
|
class RpcHandler(QueueListener):
|
2013-01-11 01:52:50 +04:00
|
|
|
"""Like stoneridge.QueueListener, but for programs that service RPC instead
|
|
|
|
of asynchronous queue events.
|
|
|
|
"""
|
|
|
|
def handle(self, **kwargs):
|
|
|
|
"""Just like stoneridge.QueueListener.handle, except the return value
|
|
|
|
from this must be serializable as a JSON string.
|
|
|
|
"""
|
|
|
|
raise NotImplementedError
|
|
|
|
|
2013-01-10 02:48:20 +04:00
|
|
|
def _handle(self, channel, method, properties, body):
|
2013-01-11 01:52:50 +04:00
|
|
|
"""Internal message callback to perform the RPC and return the result
|
|
|
|
to the caller.
|
|
|
|
"""
|
2013-01-10 02:48:20 +04:00
|
|
|
msg = json.loads(body)
|
2013-02-01 02:47:28 +04:00
|
|
|
logging.debug('RPC Handler got message %s' % (msg,))
|
2013-01-10 02:48:20 +04:00
|
|
|
res = self.handle(**msg)
|
|
|
|
|
|
|
|
body = json.dumps(res)
|
2013-02-01 02:47:28 +04:00
|
|
|
logging.debug('Returning RPC result %s' % (res,))
|
|
|
|
logging.debug('Returning to %s' % (properties.correlation_id,))
|
2013-01-10 02:48:20 +04:00
|
|
|
res_properties = pika.BasicProperties(
|
|
|
|
correlation_id=properties.correlation_id)
|
|
|
|
channel.basic_publish(exchange='', routing_key=properties.reply_to,
|
|
|
|
properties=res_properties, body=body)
|
|
|
|
|
|
|
|
channel.basic_ack(delivery_tag=method.delivery_tag)
|