create tools/tests/run_all.py to run all our Python self-tests within tools

Eventually, we want all of these self-tests to be written in Python;
this change makes it easier for us to add new Python self-tests, and discourages
the addition of bash self-tests.

BUG=skia:677,skia:1943
NOTRY=True
R=rmistry@google.com, scroggo@google.com

Author: epoger@google.com

Review URL: https://codereview.chromium.org/112163004

git-svn-id: http://skia.googlecode.com/svn/trunk@12775 2bbb7eff-a529-9590-31e7-b0007b416f81
This commit is contained in:
commit-bot@chromium.org 2013-12-19 18:22:32 +00:00
Родитель 6d7296aaa0
Коммит 9e8f88b6ef
3 изменённых файлов: 50 добавлений и 32 удалений

Просмотреть файл

@ -216,19 +216,6 @@ download_bench_rawdata $PLATFORM $REVISION "$BENCHDATA_FILE_SUFFIXES_NO_INDIVIDU
download_bench_rawdata $PLATFORM $REVISION "$BENCHDATA_FILE_SUFFIXES_YES_INDIVIDUAL_TILES"
benchalert_test $PLATFORM $REVISION
#
# Run self test for skimage ...
#
COMMAND="python tools/tests/skimage_self_test.py"
echo "$COMMAND"
$COMMAND
ret=$?
if [ $ret -ne 0 ]; then
echo "skimage self tests failed."
exit 1
fi
#
# Test rebaseline.py ...
#
@ -248,4 +235,23 @@ JSONDIFF_OUTPUT=tools/tests/jsondiff/output
jsondiff_test "$JSONDIFF_INPUT/old.json $JSONDIFF_INPUT/new.json" "$JSONDIFF_OUTPUT/old-vs-new"
#
# Launch all the self-tests which have been written in Python.
#
# TODO: Over time, we should move all of our tests into Python, and delete
# the bash tests above.
# See https://code.google.com/p/skia/issues/detail?id=677
# ('make tools/tests/run.sh work cross-platform')
#
COMMAND="python tools/tests/run_all.py"
echo "$COMMAND"
$COMMAND
ret=$?
if [ $ret -ne 0 ]; then
echo "failure in Python self-tests; see stack trace above"
exit 1
fi
echo "All tests passed."

20
tools/tests/run_all.py Executable file
Просмотреть файл

@ -0,0 +1,20 @@
#!/usr/bin/python
"""
Copyright 2013 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
Run all self-tests that were written in Python, raising an exception if any
of them fail.
"""
import skimage_self_test
def main():
"""Run all self-tests, raising an exception if any of them fail."""
skimage_self_test.main()
if __name__ == '__main__':
main()

Просмотреть файл

@ -35,9 +35,8 @@ def PickBinaryPath(base_dir):
# Quit early if two files have different content.
def DieIfFilesMismatch(expected, actual):
if not filecmp.cmp(expected, actual):
print 'Error: file mismatch! expected=%s , actual=%s' % (
expected, actual)
exit(1)
raise Exception("Error: file mismatch! expected=%s , actual=%s" % (
expected, actual))
def test_invalid_file(file_dir, skimage_binary):
""" Test the return value of skimage when an invalid file is decoded.
@ -51,8 +50,7 @@ def test_invalid_file(file_dir, skimage_binary):
args = [skimage_binary, "--readPath", invalid_file]
result = subprocess.call(args)
if 0 == result:
print "'%s' should have reported failure!" % " ".join(args)
exit(1)
raise Exception("'%s' should have reported failure!" % " ".join(args))
# Directory holding all expectations files
expectations_dir = os.path.join(file_dir, "skimage", "input", "bad-images")
@ -64,8 +62,7 @@ def test_invalid_file(file_dir, skimage_binary):
"--readExpectationsPath", incorrect_expectations]
result = subprocess.call(args)
if 0 == result:
print "'%s' should have reported failure!" % " ".join(args)
exit(1)
raise Exception("'%s' should have reported failure!" % " ".join(args))
# Empty expectations:
empty_expectations = os.path.join(expectations_dir, "empty-results.json")
@ -78,8 +75,8 @@ def test_invalid_file(file_dir, skimage_binary):
# in the output. That test could be passed if the output changed so
# "Missing" never appears. This ensures that an error is not missed if
# that happens.
print "skimage output changed! This may cause other self tests to fail!"
exit(1)
raise Exception(
"skimage output changed! This may cause other self tests to fail!")
# Ignore failure:
ignore_expectations = os.path.join(expectations_dir, "ignore-results.json")
@ -92,8 +89,8 @@ def test_invalid_file(file_dir, skimage_binary):
# appear in the output. That test could be passed if the output changed
# so "failures" never appears. This ensures that an error is not missed
# if that happens.
print "skimage output changed! This may cause other self tests to fail!"
exit(1)
raise Exception(
"skimage output changed! This may cause other self tests to fail!")
def test_incorrect_expectations(file_dir, skimage_binary):
""" Test that comparing to incorrect expectations fails, unless
@ -110,8 +107,7 @@ def test_incorrect_expectations(file_dir, skimage_binary):
incorrect_results]
result = subprocess.call(args)
if 0 == result:
print "'%s' should have reported failure!" % " ".join(args)
exit(1)
raise Exception("'%s' should have reported failure!" % " ".join(args))
ignore_results = os.path.join(expectations_dir, "ignore-failures.json")
subprocess.check_call([skimage_binary, "--readPath", valid_file,
@ -153,17 +149,13 @@ def main():
# the expectations file was created from this same image. (It will print
# "Missing" in this case before listing the missing expectations).
if "Missing" in output:
print "Expectations file was missing expectations!"
print output
exit(1)
raise Exception("Expectations file was missing expectations: %s" % output)
# Again, skimage would succeed if there were known failures (and print
# "failures"), but there should be no failures, since the file just
# created did not include failures to ignore.
if "failures" in output:
print "Image failed!"
print output
exit(1)
raise Exception("Image failed: %s" % output)
test_incorrect_expectations(file_dir=file_dir,