Allow AWFY to build on the awfy machine, and run elsewhere

This commit is contained in:
Marty Rosenberg 2014-02-28 09:06:27 -05:00
Родитель 641a3b40b9
Коммит 192a11d934
7 изменённых файлов: 260 добавлений и 60 удалений

9
benchmarks/asmjs-apps/bullet/configure поставляемый
Просмотреть файл

@ -15042,6 +15042,15 @@ esac
case "$host" in
arm*)
cat >>confdefs.h <<\_ACEOF
#define ARCH_ARM 1
_ACEOF
ARCH_SPECIFIC_CFLAGS=""
ARCH_STRING="ARM"
;;
i?86-* | k?-* | athlon-* | pentium*-)
cat >>confdefs.h <<\_ACEOF

Просмотреть файл

@ -9,10 +9,16 @@ import sys
import urllib2
import StringIO
import subprocess
import signal
import pickle
import remote
import ConfigParser
import submitter
import utils
from collections import namedtuple
Mode = namedtuple('Mode', ['shell', 'args', 'env', 'name', 'cset'])
class Benchmark(object):
def __init__(self, name, folder):
self.name = name
@ -34,8 +40,9 @@ class Benchmark(object):
tests = None
print('Running ' + self.name + ' under ' + mode.shell + ' ' + ' '.join(mode.args))
tests = self.benchmark(mode.shell, mode.env, mode.args)
except:
except Exception as e:
print('Failed to run ' + self.name + '!')
print("Exception: " + repr(e))
pass
if tests:
submit.AddTests(tests, self.name, mode.name)
@ -51,15 +58,12 @@ class AsmJS(Benchmark):
def _run(self, submit, native, modes):
# Run the C++ mode.
full_args = ['python', 'harness.py', '--native']
full_args = ['python2.7', 'harness.py', '--native']
full_args += ['--cc="' + native.cc + '"']
full_args += ['--cxx="' + native.cxx + '"']
full_args += ['--'] + native.args
print(' '.join(full_args))
p = subprocess.Popen(full_args, stdout=subprocess.PIPE, env=os.environ)
output = p.communicate()[0]
print(output)
output = utils.RunTimedCheckOutput(full_args)
tests = self.parse(output)
submit.AddTests(tests, self.name, native.mode)
@ -67,12 +71,10 @@ class AsmJS(Benchmark):
super(AsmJS, self)._run(submit, native, modes)
def benchmark(self, shell, env, args):
full_args = ['python', 'harness.py', shell, '--'] + args
full_args = ['python2.7', 'harness.py', shell, '--'] + args
print(' '.join(full_args))
p = subprocess.Popen(full_args, stdout=subprocess.PIPE, env=env)
output = p.communicate()[0]
print(output)
output = utils.RunTimedCheckOutput(full_args, env=env)
return self.parse(output)
def parse(self, output):
@ -108,8 +110,7 @@ class Octane(Benchmark):
full_args.append('run.js')
print(os.getcwd())
p = subprocess.Popen(full_args, stdout=subprocess.PIPE, env=env)
output = p.communicate()[0]
output = utils.RunTimedCheckOutput(full_args, env=env)
tests = []
lines = output.splitlines()
@ -138,14 +139,11 @@ class SunSpider(Benchmark):
else:
args = ''
p = subprocess.Popen(["./sunspider",
"--shell=" + shell,
"--runs=" + str(self.runs),
args],
stdout=subprocess.PIPE,
env=env)
output = p.communicate()[0]
output = utils.RunTimedCheckOutput(["./sunspider",
"--shell=" + shell,
"--runs=" + str(self.runs),
args],
env=env)
tests = []
lines = output.splitlines()
@ -170,10 +168,26 @@ class SunSpider(Benchmark):
return tests
Benchmarks = [AsmJSApps(),
Benchmarks = [ AsmJSApps(),
AsmJSMicro(),
SunSpider('ss', 'SunSpider', 20),
SunSpider('kraken', 'kraken', 5),
SunSpider('misc', 'misc', 3),
Octane(),
]
def runBenches_(submit, native, modes):
# Run through each benchmark.
print "runBenches_ believes the timeout is: " + str(utils.Timeout)
for benchmark in Benchmarks:
benchmark.run(submit, native, modes)
submit.Finish(1)
def runBenches(slave, submit, native, modes):
slave.rpc(sys.modules[__name__], submit, native, modes, async=True)
default_function = runBenches_
if __name__ == "__main__":
remote.takerpc()

Просмотреть файл

@ -120,6 +120,8 @@ class V8(Engine):
super(V8, self).__init__()
self.puller = 'svn'
self.source = utils.config.get('v8', 'source')
self.CXX = utils.config_get_default('v8', 'CXX')
self.LINK = utils.config.get('v8', 'LINK')
self.args = ['--expose-gc']
self.important = True
self.hardfp = (utils.config.has_option('main', 'flags')) and \
@ -132,16 +134,22 @@ class V8(Engine):
]
def build(self):
Run(['make', 'dependencies'])
env = os.environ.copy()
if self.CXX is not None:
env['CXX'] = self.CXX
if self.LINK is not None:
env['LINK'] = self.LINK
Run(['make', 'dependencies'], env)
if self.cpu == 'x64':
Run(['make', 'x64.release'])
Run(['make', 'x64.release'], env)
elif self.cpu == 'arm':
if self.hardfp:
Run(['make', 'arm.release', 'hardfp=on'])
Run(['make', 'arm.release', 'hardfp=on', 'i18nsupport=off'], env)
else:
Run(['make', 'arm.release'])
Run(['make', 'arm.release', 'i18nsupport=off'], env)
elif self.cpu == 'x86':
Run(['make', 'ia32.release'])
Run(['make', 'ia32.release'], env)
def shell(self):
if self.cpu == 'x64':

Просмотреть файл

@ -7,12 +7,14 @@ import os
import submitter
import builders
import sys
import signal
import resource
import utils
import time
import puller
import remote
from optparse import OptionParser
from benchmark import Benchmarks
from benchmark import runBenches
from collections import namedtuple
parser = OptionParser(usage="usage: %prog [options]")
@ -26,6 +28,12 @@ parser.add_option("-c", "--config", dest="config_name", type="string", default="
(options, args) = parser.parse_args()
utils.InitConfig(options.config_name)
remote.InitSlaves()
for slave in remote.slaves:
# make sure the slaves are synchronized with us.
slave.pushRemote(utils.DriverPath + os.path.sep, slave.DriverPath)
# uhhh... do we ever update the benchmarks?
slave.pushRemote(utils.BenchmarkPath + os.path.sep, slave.BenchmarkPath)
# Set resource limits for child processes
resource.setrlimit(resource.RLIMIT_AS, (-1, -1))
@ -54,11 +62,12 @@ for e in KnownEngines:
NumUpdated += 1
Engines.append([e, cset, updated])
submit = submitter.Submitter()
# No updates. Report to server and wait 60 seconds, before moving on
if NumUpdated == 0 and not options.force:
submit.Awake();
for slave in slaves:
submit = submitter.Submitter(slave)
submit.Awake();
time.sleep(60)
sys.exit(0)
@ -74,6 +83,9 @@ for entry in Engines:
e = entry[0]
cset = entry[1]
shell = os.path.join(utils.RepoPath, e.source, e.shell())
for slave in remote.slaves:
rshell = os.path.join(slave.RepoPath, e.source, e.shell())
slave.pushRemote(shell, rshell, follow=True)
env = None
with utils.chdir(os.path.join(utils.RepoPath, e.source)):
env = e.env()
@ -90,27 +102,14 @@ for entry in Engines:
modes.append(mode)
# Inform AWFY of each mode we found.
submit.Start()
for mode in modes:
submit.AddEngine(mode.name, mode.cset)
submit.AddEngine(native.mode, native.signature)
for slave in remote.slaves:
submit = submitter.Submitter(slave)
submit.Start()
for mode in modes:
submit.AddEngine(mode.name, mode.cset)
submit.AddEngine(native.mode, native.signature)
runBenches(slave, submit, native, modes)
# Run through each benchmark.
class TimeException(Exception):
pass
def timeout_handler(signum, frame):
raise TimeException()
for benchmark in Benchmarks:
try:
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(15*60) # trigger alarm in 15 minutes
benchmark.run(submit, native, modes)
signal.alarm(0)
except TimeException:
pass
submit.Finish(1)
# wait for all of the slaves to finish running before exiting.
for slave in remote.slaves:
slave.synchronize()

115
driver/remote.py Normal file
Просмотреть файл

@ -0,0 +1,115 @@
import sys
import utils
import re
import pickle
import os
import subprocess
import __main__
slaves = []
class Slave:
# I wanted to have the constructor pull the machine id from the config file, but that doesn't work
# because utils needs to include remote, and remote would neet to include utils.
def __init__(self, name):
self.name = name
self.machine = utils.config.get(name,'machine')
# default slaves to using the exact same locations as locally.
self.RepoPath = utils.config_get_default(name, 'repos', utils.RepoPath)
self.BenchmarkPath = utils.config_get_default(name, 'benchmarks', utils.BenchmarkPath)
self.DriverPath = utils.config_get_default(name, 'repos', utils.DriverPath)
self.Timeout = utils.config_get_default(name, 'timeout', str(utils.Timeout))
# make multiplication work!
self.Timeout = eval(self.Timeout, {}, {})
# assume that the remote python shell is the currently running shell.
self.PythonName = utils.config_get_default(name, 'python', sys.executable)
self.delayed = None
self.delayedCommand = None
def isLocal(self):
return self.name == "main"
def runRemote(self, cmds, async = False):
# sanity check
if (self.name == "main"):
raise Error("shouldn't be able to run a remote command on the local host.")
# no matter what, we don't want to start running a new command until the old one is gone.
self.synchronize()
fullcmd = ["ssh", self.name, "--"] + cmds
if async:
print ("ASYNC: " + " ".join(fullcmd))
self.delayed = subprocess.Popen(fullcmd, stderr = subprocess.STDOUT, stdout = subprocess.PIPE)
subprocess.Popen(['sed', '-e', 's/^/' + self.name + ': /'], stdin = self.delayed.stdout)
self.delayedCommand = str(fullcmd)
else:
utils.Run(["ssh", self.name, "--"] + cmds)
def pushRemote(self, file_loc, file_remote, follow = False):
if self.isLocal():
return
# if they asked us to follow symlinks, then add '-L' into the arguments.
rsync_flags = "-aP"
if follow:
rsync_flags += "L"
utils.Run(["rsync", rsync_flags, file_loc, self.name + ":" + file_remote])
def synchronize(self):
if self.delayed:
print("Waiting for: "+self.delayedCommand)
retval = self.delayed.wait()
if retval != 0:
raise Exception(self.delayedCommand + ": failed with exit code" + str(retval))
self.delayed = None
self.delayedCommand = None
def rpc(self, module, *args, **opt):
# rpc's are simple when they aren't remote
# they also ignore async.
if self.isLocal():
module.default_function(*args)
async = False
if 'async' in opt:
async = opt['async']
fd = open("state.p", "wb")
# dump the global state gathered from the config file
pickle.dump(utils.config, fd)
# dump out the per-slave path *as* the global path for the rpc
pickle.dump(self.RepoPath, fd)
pickle.dump(self.BenchmarkPath, fd)
pickle.dump(self.DriverPath, fd)
pickle.dump(self.Timeout, fd)
# dump out all the arguments
pickle.dump(args, fd)
fd.close()
# send the pickled data over the wire so we can make a call
self.pushRemote(os.path.join(utils.DriverPath, "state.p"), os.path.join(self.DriverPath, "state.p"))
# cd into the driver's directory, then start running the module.
self.runRemote(["cd", self.DriverPath, ";", self.PythonName, module.__name__ + '.py', os.path.join(self.DriverPath, "state.p")], async=async)
def takerpc(func=None, name=sys.argv[1]):
if not func:
func = __main__.default_function
fd = open("state.p", "rb")
# pull out the global configuration
utils.config = pickle.load(fd)
utils.RepoPath = pickle.load(fd)
utils.BenchmarkPath = pickle.load(fd)
utils.DriverPath = pickle.load(fd)
utils.Timeout = pickle.load(fd)
# pull out the pickled arguments
args = pickle.load(fd)
fd.close()
# call the one true function
func(*args)
def InitSlaves():
global slaves
slaves = [Slave(name) for name in re.split(":", utils.config.get('main', 'slaves'))]
# nasty trick, if the user didn't define slaves, use the magic name 'main', which is
# a) treated as a non-remote machine
# b) pulls all per-slave configuration from the master configuration
if not slaves:
slaves = [Slave("main")]

Просмотреть файл

@ -9,9 +9,9 @@ import urllib
import urllib2
class Submitter:
def __init__(self):
def __init__(self, slave):
self.url = utils.config.get('main', 'updateURL')
self.machine = utils.config.get('main', 'machine')
self.machine = slave.machine
def Awake(self):
url = self.url

Просмотреть файл

@ -7,20 +7,27 @@ import os
import sys
import commands
import subprocess
import signal
import ConfigParser
config = None
RepoPath = None
BenchmarkPath = None
DriverPath = None
Timeout = 15*60
def InitConfig(name):
global config, RepoPath, BenchmarkPath
global config, RepoPath, BenchmarkPath, DriverPath, Timeout
config = ConfigParser.RawConfigParser()
if not os.path.isfile(name):
raise Exception('could not find file: ' + name)
config.read(name)
RepoPath = config.get('main', 'repos')
BenchmarkPath = config.get('main', 'benchmarks')
# banal assumption that we are running this from the driver directory.
DriverPath = config_get_default('main', 'driver', os.getcwd())
Timeout = config_get_default('main', 'timeout', str(Timeout))
# silly hack to allow 30*60 in the config file.
Timeout = eval(Timeout, {}, {})
class FolderChanger:
def __init__(self, folder):
@ -36,10 +43,16 @@ class FolderChanger:
def chdir(folder):
return FolderChanger(folder)
def Run(vec):
def Run(vec, env = os.environ.copy()):
print(">> Executing in " + os.getcwd())
print(' '.join(vec))
o = subprocess.check_output(vec, stderr=subprocess.STDOUT)
print("with: " + str(env))
try:
o = subprocess.check_output(vec, stderr=subprocess.STDOUT, env=env)
except subprocess.CalledProcessError as e:
print 'output was: ' + e.output
print e
raise e
o = o.decode("utf-8")
print(o)
return o
@ -50,3 +63,45 @@ def Shell(string):
print(output)
return output
def config_get_default(section, name, default=None):
if config.has_option(section, name):
return config.get(section, name)
return default
class TimeException(Exception):
pass
def timeout_handler(signum, frame):
raise TimeException()
class Handler():
def __init__(self, signum, lam):
self.signum = signum
self.lam = lam
self.old = None
def __enter__(self):
self.old = signal.signal(self.signum, self.lam)
def __exit__(self, type, value, traceback):
signal.signal(self.signum, self.old)
def RunTimedCheckOutput(args, env = os.environ.copy(), timeout = None, **popenargs):
if timeout is None:
timeout = Timeout
print('Running: "'+ '" "'.join(args) + '" with timeout: ' + str(timeout))
p = subprocess.Popen(args, env = env, stdout=subprocess.PIPE, **popenargs)
with Handler(signal.SIGALRM, timeout_handler):
try:
signal.alarm(timeout)
output = p.communicate()[0]
# if we get an alarm right here, nothing too bad should happen
signal.alarm(0)
if p.returncode:
print "ERROR: returned" + str(p.returncode)
except TimeException:
# make sure it is no longer running
p.kill()
# in case someone looks at the logs...
print ("WARNING: Timed Out")
# try to get any partial output
output = p.communicate()[0]
print (output)
return output