2009-03-11 18:56:58 +03:00
|
|
|
#!/usr/bin/env python
|
|
|
|
#
|
2012-05-21 15:12:37 +04:00
|
|
|
# This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
2009-03-11 18:56:58 +03:00
|
|
|
|
2020-12-11 19:05:25 +03:00
|
|
|
from __future__ import absolute_import, division, print_function
|
2017-09-08 04:15:35 +03:00
|
|
|
|
2013-07-20 06:27:14 +04:00
|
|
|
import copy
|
2013-07-23 06:44:25 +04:00
|
|
|
import json
|
2014-07-02 15:52:00 +04:00
|
|
|
import mozdebug
|
2013-07-23 06:44:25 +04:00
|
|
|
import os
|
Bug 1503613: Make 'mach xpcshell-test' print the command in shell syntax. r=ted
When passed the `--verbose` flag, `mach xpcshell-test` already prints the
current directory, environment variables, and command used for the test.
However, it prints them in Python syntax. This seems like a good thing to do, as
it's a clear indication of what the test is actually doing. But if one wants to
reproduce a problem by running the xpcshell command directly, it's an
error-prone pain to convert from Python lists of strings to the Bourne shell
syntax.
Fortunately, the Python 2.7 `pipes` module has a function, `pipes.quote`, which
produces properly quoted Unix shell commands, ready to be copied and pasted into
one's terminal.
Unfortunately, the xpcshell tests still clean up the $obj/temp/xpc-plugins-FOO
directory, so this still isn't quite ready to go.
Differential Revision: https://phabricator.services.mozilla.com/D10392
--HG--
extra : moz-landing-system : lando
2018-12-19 18:37:32 +03:00
|
|
|
import pipes
|
2013-07-23 06:44:25 +04:00
|
|
|
import random
|
2014-01-26 08:27:11 +04:00
|
|
|
import re
|
2013-07-23 06:44:25 +04:00
|
|
|
import shutil
|
|
|
|
import signal
|
2021-05-11 18:30:41 +03:00
|
|
|
import subprocess
|
2013-07-23 06:44:25 +04:00
|
|
|
import sys
|
2016-05-26 22:09:46 +03:00
|
|
|
import tempfile
|
2013-07-23 06:44:25 +04:00
|
|
|
import time
|
|
|
|
import traceback
|
2020-07-09 19:49:48 +03:00
|
|
|
import six
|
2014-10-22 23:53:42 +04:00
|
|
|
|
2017-09-14 16:51:27 +03:00
|
|
|
from argparse import Namespace
|
2017-02-22 22:34:29 +03:00
|
|
|
from collections import defaultdict, deque, namedtuple
|
2017-08-16 15:55:55 +03:00
|
|
|
from datetime import datetime, timedelta
|
2013-07-17 21:53:30 +04:00
|
|
|
from distutils import dir_util
|
2017-06-20 17:52:33 +03:00
|
|
|
from functools import partial
|
2013-07-20 06:27:14 +04:00
|
|
|
from multiprocessing import cpu_count
|
2009-03-11 18:56:58 +03:00
|
|
|
from subprocess import Popen, PIPE, STDOUT
|
2010-04-27 21:28:56 +04:00
|
|
|
from tempfile import mkdtemp, gettempdir
|
2014-10-22 23:53:42 +04:00
|
|
|
from threading import (
|
|
|
|
Timer,
|
|
|
|
Thread,
|
|
|
|
Event,
|
|
|
|
current_thread,
|
|
|
|
)
|
2009-03-11 18:56:58 +03:00
|
|
|
|
2013-07-20 06:27:14 +04:00
|
|
|
try:
|
|
|
|
import psutil
|
2020-10-26 21:34:53 +03:00
|
|
|
|
2013-07-20 06:27:14 +04:00
|
|
|
HAVE_PSUTIL = True
|
2014-12-24 08:34:01 +03:00
|
|
|
except Exception:
|
2013-07-20 06:27:14 +04:00
|
|
|
HAVE_PSUTIL = False
|
|
|
|
|
2015-08-27 15:05:50 +03:00
|
|
|
from xpcshellcommandline import parser_desktop
|
|
|
|
|
|
|
|
SCRIPT_DIR = os.path.abspath(os.path.realpath(os.path.dirname(__file__)))
|
2013-08-06 21:28:29 +04:00
|
|
|
|
2017-06-23 15:25:21 +03:00
|
|
|
try:
|
|
|
|
from mozbuild.base import MozbuildObject
|
2020-10-26 21:34:53 +03:00
|
|
|
|
2017-06-23 15:25:21 +03:00
|
|
|
build = MozbuildObject.from_environment(cwd=SCRIPT_DIR)
|
|
|
|
except ImportError:
|
|
|
|
build = None
|
|
|
|
|
2013-07-20 06:27:14 +04:00
|
|
|
HARNESS_TIMEOUT = 5 * 60
|
|
|
|
|
|
|
|
# benchmarking on tbpl revealed that this works best for now
|
|
|
|
NUM_THREADS = int(cpu_count() * 4)
|
|
|
|
|
2014-10-22 23:53:42 +04:00
|
|
|
EXPECTED_LOG_ACTIONS = set(
|
|
|
|
[
|
|
|
|
"test_status",
|
|
|
|
"log",
|
|
|
|
]
|
|
|
|
)
|
2013-07-23 06:44:25 +04:00
|
|
|
|
2013-07-20 06:27:14 +04:00
|
|
|
# --------------------------------------------------------------
|
|
|
|
# TODO: this is a hack for mozbase without virtualenv, remove with bug 849900
|
|
|
|
#
|
|
|
|
here = os.path.dirname(__file__)
|
|
|
|
mozbase = os.path.realpath(os.path.join(os.path.dirname(here), "mozbase"))
|
|
|
|
|
|
|
|
if os.path.isdir(mozbase):
|
|
|
|
for package in os.listdir(mozbase):
|
|
|
|
sys.path.append(os.path.join(mozbase, package))
|
|
|
|
|
2015-03-24 16:21:11 +03:00
|
|
|
from manifestparser import TestManifest
|
2021-01-23 00:14:27 +03:00
|
|
|
from manifestparser.filters import chunk_by_slice, tags, pathprefix, failures
|
2020-06-04 17:50:15 +03:00
|
|
|
from manifestparser.util import normsep
|
2015-07-16 17:38:40 +03:00
|
|
|
from mozlog import commandline
|
2013-07-20 06:27:14 +04:00
|
|
|
import mozcrash
|
2016-11-09 18:32:46 +03:00
|
|
|
import mozfile
|
2013-07-20 06:27:14 +04:00
|
|
|
import mozinfo
|
2018-10-26 20:46:03 +03:00
|
|
|
from mozprofile import Profile
|
2018-11-08 17:50:32 +03:00
|
|
|
from mozprofile.cli import parse_preferences
|
2015-08-27 02:57:36 +03:00
|
|
|
from mozrunner.utils import get_stack_fixer_function
|
2013-07-20 06:27:14 +04:00
|
|
|
|
2014-01-26 08:27:11 +04:00
|
|
|
# --------------------------------------------------------------
|
|
|
|
|
|
|
|
# TODO: perhaps this should be in a more generally shared location?
|
|
|
|
# This regex matches all of the C0 and C1 control characters
|
|
|
|
# (U+0000 through U+001F; U+007F; U+0080 through U+009F),
|
2014-02-10 02:23:02 +04:00
|
|
|
# except TAB (U+0009), CR (U+000D), LF (U+000A) and backslash (U+005C).
|
2014-01-26 08:27:11 +04:00
|
|
|
# A raw string is deliberately not used.
|
2014-02-10 02:23:02 +04:00
|
|
|
_cleanup_encoding_re = re.compile(u"[\x00-\x08\x0b\x0c\x0e-\x1f\x7f-\x9f\\\\]")
|
2017-09-14 16:51:27 +03:00
|
|
|
|
|
|
|
|
2014-01-26 08:27:11 +04:00
|
|
|
def _cleanup_encoding_repl(m):
|
|
|
|
c = m.group(0)
|
2014-05-06 23:11:30 +04:00
|
|
|
return "\\\\" if c == "\\" else "\\x{0:02X}".format(ord(c))
|
2017-09-14 16:51:27 +03:00
|
|
|
|
|
|
|
|
2014-01-26 08:27:11 +04:00
|
|
|
def cleanup_encoding(s):
|
|
|
|
"""S is either a byte or unicode string. Either way it may
|
|
|
|
contain control characters, unpaired surrogates, reserved code
|
|
|
|
points, etc. If it is a byte string, it is assumed to be
|
2014-10-22 23:53:42 +04:00
|
|
|
UTF-8, but it may not be *correct* UTF-8. Return a
|
|
|
|
sanitized unicode object."""
|
2020-07-09 19:49:48 +03:00
|
|
|
if not isinstance(s, six.string_types):
|
|
|
|
if isinstance(s, six.binary_type):
|
|
|
|
return six.ensure_str(s)
|
|
|
|
else:
|
2020-07-21 20:10:51 +03:00
|
|
|
return six.text_type(s)
|
2020-07-09 19:49:48 +03:00
|
|
|
if isinstance(s, six.binary_type):
|
2014-01-26 08:27:11 +04:00
|
|
|
s = s.decode("utf-8", "replace")
|
|
|
|
# Replace all C0 and C1 control characters with \xNN escapes.
|
2014-10-22 23:53:42 +04:00
|
|
|
return _cleanup_encoding_re.sub(_cleanup_encoding_repl, s)
|
2013-07-20 06:27:14 +04:00
|
|
|
|
2017-09-14 16:51:27 +03:00
|
|
|
|
2020-07-19 21:06:17 +03:00
|
|
|
def ensure_bytes(value, encoding="utf-8"):
|
|
|
|
if isinstance(value, six.text_type):
|
|
|
|
return value.encode(encoding)
|
|
|
|
return value
|
|
|
|
|
|
|
|
|
|
|
|
def ensure_unicode(value, encoding="utf-8"):
|
|
|
|
if isinstance(value, six.binary_type):
|
|
|
|
return value.decode(encoding)
|
|
|
|
return value
|
|
|
|
|
|
|
|
|
|
|
|
def ensure_subprocess_env(env, encoding="utf-8"):
|
|
|
|
"""Ensure the environment is in the correct format for the `subprocess`
|
|
|
|
module.
|
|
|
|
|
|
|
|
This will convert all keys and values to bytes on Python 2, and text on
|
|
|
|
Python 3.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
env (dict): Environment to ensure.
|
|
|
|
encoding (str): Encoding to use when converting to/from bytes/text
|
|
|
|
(default: utf-8).
|
|
|
|
"""
|
|
|
|
ensure = ensure_bytes if sys.version_info[0] < 3 else ensure_unicode
|
|
|
|
return {ensure(k, encoding): ensure(v, encoding) for k, v in six.iteritems(env)}
|
|
|
|
|
|
|
|
|
2013-07-20 06:27:14 +04:00
|
|
|
""" Control-C handling """
|
|
|
|
gotSIGINT = False
|
2017-09-14 16:51:27 +03:00
|
|
|
|
|
|
|
|
2013-07-20 06:27:14 +04:00
|
|
|
def markGotSIGINT(signum, stackFrame):
|
|
|
|
global gotSIGINT
|
|
|
|
gotSIGINT = True
|
|
|
|
|
2017-09-14 16:51:27 +03:00
|
|
|
|
2013-07-20 06:27:14 +04:00
|
|
|
class XPCShellTestThread(Thread):
|
2017-08-15 18:06:16 +03:00
|
|
|
def __init__(
|
|
|
|
self, test_object, retry=True, verbose=False, usingTSan=False, **kwargs
|
|
|
|
):
|
2013-07-20 06:27:14 +04:00
|
|
|
Thread.__init__(self)
|
|
|
|
self.daemon = True
|
|
|
|
|
|
|
|
self.test_object = test_object
|
2013-08-28 22:28:39 +04:00
|
|
|
self.retry = retry
|
2017-08-15 18:06:16 +03:00
|
|
|
self.verbose = verbose
|
|
|
|
self.usingTSan = usingTSan
|
2013-07-20 06:27:14 +04:00
|
|
|
|
|
|
|
self.appPath = kwargs.get("appPath")
|
|
|
|
self.xrePath = kwargs.get("xrePath")
|
2016-11-09 18:32:46 +03:00
|
|
|
self.utility_path = kwargs.get("utility_path")
|
2013-07-20 06:27:14 +04:00
|
|
|
self.testingModulesDir = kwargs.get("testingModulesDir")
|
|
|
|
self.debuggerInfo = kwargs.get("debuggerInfo")
|
2014-11-29 02:40:58 +03:00
|
|
|
self.jsDebuggerInfo = kwargs.get("jsDebuggerInfo")
|
2013-07-20 06:27:14 +04:00
|
|
|
self.pluginsPath = kwargs.get("pluginsPath")
|
|
|
|
self.httpdJSPath = kwargs.get("httpdJSPath")
|
|
|
|
self.headJSPath = kwargs.get("headJSPath")
|
|
|
|
self.testharnessdir = kwargs.get("testharnessdir")
|
|
|
|
self.profileName = kwargs.get("profileName")
|
|
|
|
self.singleFile = kwargs.get("singleFile")
|
|
|
|
self.env = copy.deepcopy(kwargs.get("env"))
|
|
|
|
self.symbolsPath = kwargs.get("symbolsPath")
|
|
|
|
self.logfiles = kwargs.get("logfiles")
|
|
|
|
self.xpcshell = kwargs.get("xpcshell")
|
|
|
|
self.xpcsRunArgs = kwargs.get("xpcsRunArgs")
|
2013-08-21 20:26:46 +04:00
|
|
|
self.failureManifest = kwargs.get("failureManifest")
|
2016-06-08 16:41:04 +03:00
|
|
|
self.jscovdir = kwargs.get("jscovdir")
|
2015-06-11 21:21:13 +03:00
|
|
|
self.stack_fixer_function = kwargs.get("stack_fixer_function")
|
2016-05-26 22:09:46 +03:00
|
|
|
self._rootTempDir = kwargs.get("tempDir")
|
2017-08-15 18:06:16 +03:00
|
|
|
self.cleanup_dir_list = kwargs.get("cleanup_dir_list")
|
|
|
|
self.pStdout = kwargs.get("pStdout")
|
|
|
|
self.pStderr = kwargs.get("pStderr")
|
|
|
|
self.keep_going = kwargs.get("keep_going")
|
|
|
|
self.log = kwargs.get("log")
|
|
|
|
self.app_dir_key = kwargs.get("app_dir_key")
|
|
|
|
self.interactive = kwargs.get("interactive")
|
2020-05-11 19:54:14 +03:00
|
|
|
self.rootPrefsFile = kwargs.get("rootPrefsFile")
|
|
|
|
self.extraPrefs = kwargs.get("extraPrefs")
|
2019-05-31 18:54:58 +03:00
|
|
|
self.verboseIfFails = kwargs.get("verboseIfFails")
|
2020-04-08 21:19:23 +03:00
|
|
|
self.headless = kwargs.get("headless")
|
2021-02-22 22:38:12 +03:00
|
|
|
self.runFailures = kwargs.get("runFailures")
|
|
|
|
self.timeoutAsPass = kwargs.get("timeoutAsPass")
|
|
|
|
self.crashAsPass = kwargs.get("crashAsPass")
|
|
|
|
if self.runFailures:
|
|
|
|
retry = False
|
2013-07-20 06:27:14 +04:00
|
|
|
|
2020-05-11 19:54:14 +03:00
|
|
|
# Default the test prefsFile to the rootPrefsFile.
|
|
|
|
self.prefsFile = self.rootPrefsFile
|
|
|
|
|
2013-07-20 06:27:14 +04:00
|
|
|
# only one of these will be set to 1. adding them to the totals in
|
|
|
|
# the harness
|
|
|
|
self.passCount = 0
|
|
|
|
self.todoCount = 0
|
|
|
|
self.failCount = 0
|
|
|
|
|
2014-10-22 23:53:42 +04:00
|
|
|
# Context for output processing
|
2013-07-23 06:44:25 +04:00
|
|
|
self.output_lines = []
|
|
|
|
self.has_failure_output = False
|
2013-09-13 03:48:43 +04:00
|
|
|
self.saw_proc_start = False
|
|
|
|
self.saw_proc_end = False
|
2018-10-24 16:56:57 +03:00
|
|
|
self.command = None
|
2014-10-22 23:53:42 +04:00
|
|
|
self.harness_timeout = kwargs.get("harness_timeout")
|
|
|
|
self.timedout = False
|
2013-07-23 06:44:25 +04:00
|
|
|
|
2013-07-20 06:27:14 +04:00
|
|
|
# event from main thread to signal work done
|
2017-08-15 18:06:16 +03:00
|
|
|
self.event = kwargs.get("event")
|
2017-09-14 16:51:27 +03:00
|
|
|
self.done = False # explicitly set flag so we don't rely on thread.isAlive
|
2013-07-20 06:27:14 +04:00
|
|
|
|
|
|
|
def run(self):
|
|
|
|
try:
|
|
|
|
self.run_test()
|
|
|
|
except Exception as e:
|
|
|
|
self.exception = e
|
|
|
|
self.traceback = traceback.format_exc()
|
|
|
|
else:
|
|
|
|
self.exception = None
|
|
|
|
self.traceback = None
|
2013-08-28 22:28:39 +04:00
|
|
|
if self.retry:
|
2014-10-22 23:53:42 +04:00
|
|
|
self.log.info(
|
|
|
|
"%s failed or timed out, will retry." % self.test_object["id"]
|
|
|
|
)
|
2013-09-02 00:02:51 +04:00
|
|
|
self.done = True
|
2013-07-20 06:27:14 +04:00
|
|
|
self.event.set()
|
|
|
|
|
|
|
|
def kill(self, proc):
|
|
|
|
"""
|
|
|
|
Simple wrapper to kill a process.
|
|
|
|
On a remote system, this is overloaded to handle remote process communication.
|
|
|
|
"""
|
|
|
|
return proc.kill()
|
|
|
|
|
|
|
|
def removeDir(self, dirname):
|
|
|
|
"""
|
|
|
|
Simple wrapper to remove (recursively) a given directory.
|
|
|
|
On a remote system, we need to overload this to work on the remote filesystem.
|
|
|
|
"""
|
2016-11-09 18:32:46 +03:00
|
|
|
mozfile.remove(dirname)
|
2013-07-20 06:27:14 +04:00
|
|
|
|
|
|
|
def poll(self, proc):
|
|
|
|
"""
|
|
|
|
Simple wrapper to check if a process has terminated.
|
|
|
|
On a remote system, this is overloaded to handle remote process communication.
|
|
|
|
"""
|
|
|
|
return proc.poll()
|
|
|
|
|
|
|
|
def createLogFile(self, test_file, stdout):
|
|
|
|
"""
|
|
|
|
For a given test file and stdout buffer, create a log file.
|
|
|
|
On a remote system we have to fix the test name since it can contain directories.
|
|
|
|
"""
|
|
|
|
with open(test_file + ".log", "w") as f:
|
|
|
|
f.write(stdout)
|
|
|
|
|
|
|
|
def getReturnCode(self, proc):
|
|
|
|
"""
|
|
|
|
Simple wrapper to get the return code for a given process.
|
|
|
|
On a remote system we overload this to work with the remote process management.
|
|
|
|
"""
|
2018-11-07 01:33:47 +03:00
|
|
|
if proc is not None and hasattr(proc, "returncode"):
|
|
|
|
return proc.returncode
|
|
|
|
return -1
|
2013-07-20 06:27:14 +04:00
|
|
|
|
|
|
|
def communicate(self, proc):
|
|
|
|
"""
|
|
|
|
Simple wrapper to communicate with a process.
|
|
|
|
On a remote system, this is overloaded to handle remote process communication.
|
|
|
|
"""
|
2013-09-13 03:48:43 +04:00
|
|
|
# Processing of incremental output put here to
|
|
|
|
# sidestep issues on remote platforms, where what we know
|
|
|
|
# as proc is a file pulled off of a device.
|
2013-09-20 18:48:07 +04:00
|
|
|
if proc.stdout:
|
|
|
|
while True:
|
|
|
|
line = proc.stdout.readline()
|
|
|
|
if not line:
|
|
|
|
break
|
|
|
|
self.process_line(line)
|
|
|
|
|
|
|
|
if self.saw_proc_start and not self.saw_proc_end:
|
|
|
|
self.has_failure_output = True
|
2013-09-13 03:48:43 +04:00
|
|
|
|
2013-07-20 06:27:14 +04:00
|
|
|
return proc.communicate()
|
|
|
|
|
2014-06-19 22:17:26 +04:00
|
|
|
def launchProcess(self, cmd, stdout, stderr, env, cwd, timeout=None):
|
2013-07-20 06:27:14 +04:00
|
|
|
"""
|
|
|
|
Simple wrapper to launch a process.
|
|
|
|
On a remote system, this is more complex and we need to overload this function.
|
|
|
|
"""
|
2016-11-09 22:50:45 +03:00
|
|
|
# timeout is needed by remote xpcshell to extend the
|
2018-04-26 19:43:08 +03:00
|
|
|
# remote device timeout. It is not used in this function.
|
2020-07-09 19:49:48 +03:00
|
|
|
if six.PY3:
|
|
|
|
cwd = six.ensure_str(cwd)
|
|
|
|
for i in range(len(cmd)):
|
|
|
|
cmd[i] = six.ensure_str(cmd[i])
|
|
|
|
|
2020-07-19 21:06:17 +03:00
|
|
|
env = ensure_subprocess_env(env)
|
2013-07-20 06:27:14 +04:00
|
|
|
if HAVE_PSUTIL:
|
|
|
|
popen_func = psutil.Popen
|
|
|
|
else:
|
|
|
|
popen_func = Popen
|
2021-05-11 18:30:41 +03:00
|
|
|
|
|
|
|
cleanup_hack = None
|
|
|
|
if mozinfo.isWin and sys.version_info[0] == 3 and sys.version_info < (3, 7, 5):
|
|
|
|
# Hack to work around https://bugs.python.org/issue37380
|
|
|
|
cleanup_hack = subprocess._cleanup
|
|
|
|
|
|
|
|
try:
|
|
|
|
if cleanup_hack:
|
|
|
|
subprocess._cleanup = lambda: None
|
|
|
|
proc = popen_func(cmd, stdout=stdout, stderr=stderr, env=env, cwd=cwd)
|
|
|
|
finally:
|
|
|
|
if cleanup_hack:
|
|
|
|
subprocess._cleanup = cleanup_hack
|
2013-07-20 06:27:14 +04:00
|
|
|
return proc
|
|
|
|
|
2013-11-21 21:22:16 +04:00
|
|
|
def checkForCrashes(self, dump_directory, symbols_path, test_name=None):
|
|
|
|
"""
|
|
|
|
Simple wrapper to check for crashes.
|
|
|
|
On a remote system, this is more complex and we need to overload this function.
|
|
|
|
"""
|
2021-02-22 22:38:12 +03:00
|
|
|
quiet = False
|
|
|
|
if self.crashAsPass:
|
|
|
|
quiet = True
|
|
|
|
|
2019-06-05 21:41:54 +03:00
|
|
|
return mozcrash.log_crashes(
|
2021-02-22 22:38:12 +03:00
|
|
|
self.log, dump_directory, symbols_path, test=test_name, quiet=quiet
|
2019-06-05 21:41:54 +03:00
|
|
|
)
|
2013-11-21 21:22:16 +04:00
|
|
|
|
2013-07-20 06:27:14 +04:00
|
|
|
def logCommand(self, name, completeCmd, testdir):
|
2014-10-22 23:53:42 +04:00
|
|
|
self.log.info("%s | full command: %r" % (name, completeCmd))
|
|
|
|
self.log.info("%s | current directory: %r" % (name, testdir))
|
2013-07-20 06:27:14 +04:00
|
|
|
# Show only those environment variables that are changed from
|
|
|
|
# the ambient environment.
|
2020-07-09 19:49:48 +03:00
|
|
|
changedEnv = set("%s=%s" % i for i in six.iteritems(self.env)) - set(
|
|
|
|
"%s=%s" % i for i in six.iteritems(os.environ)
|
2020-10-26 21:34:53 +03:00
|
|
|
)
|
2014-10-22 23:53:42 +04:00
|
|
|
self.log.info("%s | environment: %s" % (name, list(changedEnv)))
|
Bug 1503613: Make 'mach xpcshell-test' print the command in shell syntax. r=ted
When passed the `--verbose` flag, `mach xpcshell-test` already prints the
current directory, environment variables, and command used for the test.
However, it prints them in Python syntax. This seems like a good thing to do, as
it's a clear indication of what the test is actually doing. But if one wants to
reproduce a problem by running the xpcshell command directly, it's an
error-prone pain to convert from Python lists of strings to the Bourne shell
syntax.
Fortunately, the Python 2.7 `pipes` module has a function, `pipes.quote`, which
produces properly quoted Unix shell commands, ready to be copied and pasted into
one's terminal.
Unfortunately, the xpcshell tests still clean up the $obj/temp/xpc-plugins-FOO
directory, so this still isn't quite ready to go.
Differential Revision: https://phabricator.services.mozilla.com/D10392
--HG--
extra : moz-landing-system : lando
2018-12-19 18:37:32 +03:00
|
|
|
shell_command_tokens = [
|
|
|
|
pipes.quote(tok) for tok in list(changedEnv) + completeCmd
|
|
|
|
]
|
|
|
|
self.log.info(
|
|
|
|
"%s | as shell command: (cd %s; %s)"
|
|
|
|
% (name, pipes.quote(testdir), " ".join(shell_command_tokens))
|
2020-10-26 21:34:53 +03:00
|
|
|
)
|
2014-10-22 23:53:42 +04:00
|
|
|
|
|
|
|
def killTimeout(self, proc):
|
2018-11-07 01:33:47 +03:00
|
|
|
if proc is not None and hasattr(proc, "pid"):
|
|
|
|
mozcrash.kill_and_get_minidump(
|
|
|
|
proc.pid, self.tempDir, utility_path=self.utility_path
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
self.log.info("not killing -- proc or pid unknown")
|
2014-10-22 23:53:42 +04:00
|
|
|
|
|
|
|
def postCheck(self, proc):
|
|
|
|
"""Checks for a still-running test process, kills it and fails the test if found.
|
|
|
|
We can sometimes get here before the process has terminated, which would
|
|
|
|
cause removeDir() to fail - so check for the process and kill it if needed.
|
|
|
|
"""
|
|
|
|
if proc and self.poll(proc) is None:
|
2016-11-09 18:32:46 +03:00
|
|
|
if HAVE_PSUTIL:
|
|
|
|
try:
|
|
|
|
self.kill(proc)
|
|
|
|
except psutil.NoSuchProcess:
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
self.kill(proc)
|
2014-10-22 23:53:42 +04:00
|
|
|
message = "%s | Process still running after test!" % self.test_object["id"]
|
|
|
|
if self.retry:
|
|
|
|
self.log.info(message)
|
|
|
|
return
|
|
|
|
|
|
|
|
self.log.error(message)
|
Bug 1282522 - Ensure stderr is printed in case of a shutdown crash in a passing xpcshell test. r=gps
There were two assumptions preventing this output from being logged, both
related to the case a test passes and xpcshell returns 0. The first was
that we would not find crash dumps in this case, and would therefore not
need to log the full output of the test process (in the case xpcshell
returned non-zero or a test failed, we would log this output prior to checking
for crashes). The second was that if a test was eligible to retry, we wouldn't
need to store a test's output at all, because this output would only relate to
a failure that we would consider non-fatal.
The first assumption does not hold because it's possible to fatally assert
at shutdown in tests spawning child processes without causing a test failure
or non-zero exit code.
The second assumption followed from the first, and is violated when the first
is violated, because in this case we would consider a found crash fatal even
when a test was eligible to retry.
This patch reverses these assumptions and logs the full output of a test that
passes but produces crash dumps. It's not clear that the existing code intended
for a crash to ever be considered fatal when a test was eligible to retry, but
to change this criteria now would reduce our effective test coverage by
ignoring crashes that are now considered fatal, so after this patch we continue
to consider this scenario fatal. If it is determined these crashes are related
to these tests running in parallel with other tests, or they are not relevant
for some other reason, these tests should be run sequentially, or this criteria
should be changed.
MozReview-Commit-ID: 2PaFSGx2MVR
--HG--
extra : rebase_source : 34c0d1f13f4256928906729b1f3667bc395b2c56
2016-09-13 09:02:44 +03:00
|
|
|
self.log_full_output()
|
2014-10-22 23:53:42 +04:00
|
|
|
self.failCount = 1
|
|
|
|
|
|
|
|
def testTimeout(self, proc):
|
|
|
|
if self.test_object["expected"] == "pass":
|
|
|
|
expected = "PASS"
|
|
|
|
else:
|
|
|
|
expected = "FAIL"
|
|
|
|
|
|
|
|
if self.retry:
|
|
|
|
self.log.test_end(
|
|
|
|
self.test_object["id"],
|
|
|
|
"TIMEOUT",
|
|
|
|
expected="TIMEOUT",
|
|
|
|
message="Test timed out",
|
|
|
|
)
|
|
|
|
else:
|
2021-02-22 22:38:12 +03:00
|
|
|
result = "TIMEOUT"
|
|
|
|
if self.timeoutAsPass:
|
|
|
|
expected = "FAIL"
|
|
|
|
result = "FAIL"
|
2014-10-22 23:53:42 +04:00
|
|
|
self.failCount = 1
|
|
|
|
self.log.test_end(
|
|
|
|
self.test_object["id"],
|
2021-02-22 22:38:12 +03:00
|
|
|
result,
|
2014-10-22 23:53:42 +04:00
|
|
|
expected=expected,
|
|
|
|
message="Test timed out",
|
|
|
|
)
|
Bug 1282522 - Ensure stderr is printed in case of a shutdown crash in a passing xpcshell test. r=gps
There were two assumptions preventing this output from being logged, both
related to the case a test passes and xpcshell returns 0. The first was
that we would not find crash dumps in this case, and would therefore not
need to log the full output of the test process (in the case xpcshell
returned non-zero or a test failed, we would log this output prior to checking
for crashes). The second was that if a test was eligible to retry, we wouldn't
need to store a test's output at all, because this output would only relate to
a failure that we would consider non-fatal.
The first assumption does not hold because it's possible to fatally assert
at shutdown in tests spawning child processes without causing a test failure
or non-zero exit code.
The second assumption followed from the first, and is violated when the first
is violated, because in this case we would consider a found crash fatal even
when a test was eligible to retry.
This patch reverses these assumptions and logs the full output of a test that
passes but produces crash dumps. It's not clear that the existing code intended
for a crash to ever be considered fatal when a test was eligible to retry, but
to change this criteria now would reduce our effective test coverage by
ignoring crashes that are now considered fatal, so after this patch we continue
to consider this scenario fatal. If it is determined these crashes are related
to these tests running in parallel with other tests, or they are not relevant
for some other reason, these tests should be run sequentially, or this criteria
should be changed.
MozReview-Commit-ID: 2PaFSGx2MVR
--HG--
extra : rebase_source : 34c0d1f13f4256928906729b1f3667bc395b2c56
2016-09-13 09:02:44 +03:00
|
|
|
self.log_full_output()
|
2013-07-20 06:27:14 +04:00
|
|
|
|
2013-09-02 00:02:51 +04:00
|
|
|
self.done = True
|
2014-10-22 23:53:42 +04:00
|
|
|
self.timedout = True
|
|
|
|
self.killTimeout(proc)
|
|
|
|
self.log.info("xpcshell return code: %s" % self.getReturnCode(proc))
|
|
|
|
self.postCheck(proc)
|
|
|
|
self.clean_temp_dirs(self.test_object["path"])
|
2013-07-20 06:27:14 +04:00
|
|
|
|
2020-05-11 19:54:14 +03:00
|
|
|
def updateTestPrefsFile(self):
|
|
|
|
# If the Manifest file has some additiona prefs, merge the
|
|
|
|
# prefs set in the user.js file stored in the _rootTempdir
|
|
|
|
# with the prefs from the manifest and the prefs specified
|
|
|
|
# in the extraPrefs option.
|
|
|
|
if "prefs" in self.test_object:
|
|
|
|
# Merge the user preferences in a fake profile dir in a
|
|
|
|
# local temporary dir (self.tempDir is the remoteTmpDir
|
|
|
|
# for the RemoteXPCShellTestThread subclass and so we
|
|
|
|
# can't use that tempDir here).
|
|
|
|
localTempDir = mkdtemp(prefix="xpc-other-", dir=self._rootTempDir)
|
|
|
|
|
|
|
|
filename = "user.js"
|
|
|
|
interpolation = {"server": "dummyserver"}
|
|
|
|
profile = Profile(profile=localTempDir, restore=False)
|
|
|
|
profile.merge(self._rootTempDir, interpolation=interpolation)
|
|
|
|
|
|
|
|
prefs = self.test_object["prefs"].strip().split()
|
|
|
|
name = self.test_object["id"]
|
2020-07-07 18:02:12 +03:00
|
|
|
if self.verbose:
|
|
|
|
self.log.info(
|
|
|
|
"%s: Per-test extra prefs will be set:\n {}".format(
|
|
|
|
"\n ".join(prefs)
|
2020-10-26 21:34:53 +03:00
|
|
|
)
|
2020-07-07 18:02:12 +03:00
|
|
|
% name
|
|
|
|
)
|
2020-05-11 19:54:14 +03:00
|
|
|
|
|
|
|
profile.set_preferences(parse_preferences(prefs), filename=filename)
|
|
|
|
# Make sure that the extra prefs form the command line are overriding
|
|
|
|
# any prefs inherited from the shared profile data or the manifest prefs.
|
|
|
|
profile.set_preferences(
|
|
|
|
parse_preferences(self.extraPrefs), filename=filename
|
|
|
|
)
|
|
|
|
return os.path.join(profile.profile, filename)
|
|
|
|
|
|
|
|
# Return the root prefsFile if there is no other prefs to merge.
|
|
|
|
return self.rootPrefsFile
|
|
|
|
|
2013-07-20 06:27:14 +04:00
|
|
|
def buildCmdTestFile(self, name):
|
|
|
|
"""
|
|
|
|
Build the command line arguments for the test file.
|
|
|
|
On a remote system, this may be overloaded to use a remote path structure.
|
|
|
|
"""
|
|
|
|
return ["-e", 'const _TEST_FILE = ["%s"];' % name.replace("\\", "/")]
|
|
|
|
|
|
|
|
def setupTempDir(self):
|
2016-05-26 22:09:46 +03:00
|
|
|
tempDir = mkdtemp(prefix="xpc-other-", dir=self._rootTempDir)
|
2013-07-20 06:27:14 +04:00
|
|
|
self.env["XPCSHELL_TEST_TEMP_DIR"] = tempDir
|
|
|
|
if self.interactive:
|
2014-10-22 23:53:42 +04:00
|
|
|
self.log.info("temp dir is %s" % tempDir)
|
2013-07-20 06:27:14 +04:00
|
|
|
return tempDir
|
|
|
|
|
|
|
|
def setupPluginsDir(self):
|
|
|
|
if not os.path.isdir(self.pluginsPath):
|
|
|
|
return None
|
|
|
|
|
2016-05-26 22:09:46 +03:00
|
|
|
pluginsDir = mkdtemp(prefix="xpc-plugins-", dir=self._rootTempDir)
|
2019-10-02 17:19:59 +03:00
|
|
|
retries = 0
|
|
|
|
while not os.path.isdir(pluginsDir) and retries < 5:
|
|
|
|
self.log.info("plugins temp directory %s missing; waiting..." % pluginsDir)
|
|
|
|
time.sleep(1)
|
|
|
|
retries += 1
|
2013-07-20 06:27:14 +04:00
|
|
|
# shutil.copytree requires dst to not exist. Deleting the tempdir
|
|
|
|
# would make a race condition possible in a concurrent environment,
|
|
|
|
# so we are using dir_utils.copy_tree which accepts an existing dst
|
|
|
|
dir_util.copy_tree(self.pluginsPath, pluginsDir)
|
|
|
|
if self.interactive:
|
2014-10-22 23:53:42 +04:00
|
|
|
self.log.info("plugins dir is %s" % pluginsDir)
|
2013-07-20 06:27:14 +04:00
|
|
|
return pluginsDir
|
|
|
|
|
|
|
|
def setupProfileDir(self):
|
|
|
|
"""
|
|
|
|
Create a temporary folder for the profile and set appropriate environment variables.
|
|
|
|
When running check-interactive and check-one, the directory is well-defined and
|
|
|
|
retained for inspection once the tests complete.
|
|
|
|
|
|
|
|
On a remote system, this may be overloaded to use a remote path structure.
|
|
|
|
"""
|
|
|
|
if self.interactive or self.singleFile:
|
|
|
|
profileDir = os.path.join(gettempdir(), self.profileName, "xpcshellprofile")
|
|
|
|
try:
|
|
|
|
# This could be left over from previous runs
|
|
|
|
self.removeDir(profileDir)
|
2018-01-31 22:32:08 +03:00
|
|
|
except Exception:
|
2013-07-20 06:27:14 +04:00
|
|
|
pass
|
|
|
|
os.makedirs(profileDir)
|
|
|
|
else:
|
2016-05-26 22:09:46 +03:00
|
|
|
profileDir = mkdtemp(prefix="xpc-profile-", dir=self._rootTempDir)
|
2013-07-20 06:27:14 +04:00
|
|
|
self.env["XPCSHELL_TEST_PROFILE_DIR"] = profileDir
|
|
|
|
if self.interactive or self.singleFile:
|
2014-10-22 23:53:42 +04:00
|
|
|
self.log.info("profile dir is %s" % profileDir)
|
2013-07-20 06:27:14 +04:00
|
|
|
return profileDir
|
|
|
|
|
2015-08-20 23:06:00 +03:00
|
|
|
def setupMozinfoJS(self):
|
|
|
|
mozInfoJSPath = os.path.join(self.profileDir, "mozinfo.json")
|
|
|
|
mozInfoJSPath = mozInfoJSPath.replace("\\", "\\\\")
|
|
|
|
mozinfo.output_to_file(mozInfoJSPath)
|
|
|
|
return mozInfoJSPath
|
|
|
|
|
2018-10-24 16:56:57 +03:00
|
|
|
def buildCmdHead(self):
|
2013-07-20 06:27:14 +04:00
|
|
|
"""
|
2017-01-17 18:27:55 +03:00
|
|
|
Build the command line arguments for the head files,
|
2013-07-20 06:27:14 +04:00
|
|
|
along with the address of the webserver which some tests require.
|
|
|
|
|
2017-09-14 16:51:27 +03:00
|
|
|
On a remote system, this is overloaded to resolve quoting issues over a
|
|
|
|
secondary command line.
|
2013-07-20 06:27:14 +04:00
|
|
|
"""
|
2018-10-24 16:56:57 +03:00
|
|
|
headfiles = self.getHeadFiles(self.test_object)
|
2014-10-31 00:57:38 +03:00
|
|
|
cmdH = ", ".join(['"' + f.replace("\\", "/") + '"' for f in headfiles])
|
2014-11-29 02:40:58 +03:00
|
|
|
|
|
|
|
dbgport = 0 if self.jsDebuggerInfo is None else self.jsDebuggerInfo.port
|
|
|
|
|
2018-10-24 16:56:57 +03:00
|
|
|
return [
|
2017-09-14 16:51:27 +03:00
|
|
|
"-e",
|
|
|
|
"const _HEAD_FILES = [%s];" % cmdH,
|
|
|
|
"-e",
|
|
|
|
"const _JSDEBUGGER_PORT = %d;" % dbgport,
|
|
|
|
]
|
2013-07-20 06:27:14 +04:00
|
|
|
|
2017-01-17 18:27:55 +03:00
|
|
|
def getHeadFiles(self, test):
|
|
|
|
"""Obtain lists of head- files. Returns a list of head files."""
|
2020-10-26 21:34:53 +03:00
|
|
|
|
2013-07-20 06:27:14 +04:00
|
|
|
def sanitize_list(s, kind):
|
|
|
|
for f in s.strip().split(" "):
|
|
|
|
f = f.strip()
|
|
|
|
if len(f) < 1:
|
|
|
|
continue
|
|
|
|
|
2016-01-19 16:48:53 +03:00
|
|
|
path = os.path.normpath(os.path.join(test["here"], f))
|
2013-07-20 06:27:14 +04:00
|
|
|
if not os.path.exists(path):
|
|
|
|
raise Exception("%s file does not exist: %s" % (kind, path))
|
|
|
|
|
|
|
|
if not os.path.isfile(path):
|
|
|
|
raise Exception("%s file is not a file: %s" % (kind, path))
|
|
|
|
|
|
|
|
yield path
|
|
|
|
|
2016-01-19 16:48:53 +03:00
|
|
|
headlist = test.get("head", "")
|
2017-01-17 18:27:55 +03:00
|
|
|
return list(sanitize_list(headlist, "head"))
|
2013-07-20 06:27:14 +04:00
|
|
|
|
2015-08-27 15:05:50 +03:00
|
|
|
def buildXpcsCmd(self):
|
2013-07-20 06:27:14 +04:00
|
|
|
"""
|
2017-09-14 16:51:27 +03:00
|
|
|
Load the root head.js file as the first file in our test path, before other head,
|
|
|
|
and test files. On a remote system, we overload this to add additional command
|
|
|
|
line arguments, so this gets overloaded.
|
2013-07-20 06:27:14 +04:00
|
|
|
"""
|
|
|
|
# - NOTE: if you rename/add any of the constants set here, update
|
|
|
|
# do_load_child_test_harness() in head.js
|
|
|
|
if not self.appPath:
|
|
|
|
self.appPath = self.xrePath
|
|
|
|
|
2018-10-24 16:56:57 +03:00
|
|
|
xpcsCmd = [
|
2013-07-20 06:27:14 +04:00
|
|
|
self.xpcshell,
|
|
|
|
"-g",
|
|
|
|
self.xrePath,
|
|
|
|
"-a",
|
|
|
|
self.appPath,
|
|
|
|
"-m",
|
2015-08-20 23:06:00 +03:00
|
|
|
"-e",
|
|
|
|
'const _HEAD_JS_PATH = "%s";' % self.headJSPath,
|
|
|
|
"-e",
|
|
|
|
'const _MOZINFO_JS_PATH = "%s";' % self.mozInfoJSPath,
|
2018-10-26 20:46:03 +03:00
|
|
|
"-e",
|
|
|
|
'const _PREFS_FILE = "%s";' % self.prefsFile.replace("\\", "\\\\"),
|
2013-07-20 06:27:14 +04:00
|
|
|
]
|
|
|
|
|
|
|
|
if self.testingModulesDir:
|
|
|
|
# Escape backslashes in string literal.
|
|
|
|
sanitized = self.testingModulesDir.replace("\\", "\\\\")
|
|
|
|
xpcsCmd.extend(["-e", 'const _TESTING_MODULES_DIR = "%s";' % sanitized])
|
|
|
|
|
2018-10-24 16:56:57 +03:00
|
|
|
xpcsCmd.extend(["-f", os.path.join(self.testharnessdir, "head.js")])
|
2013-07-20 06:27:14 +04:00
|
|
|
|
|
|
|
if self.debuggerInfo:
|
2018-10-24 16:56:57 +03:00
|
|
|
xpcsCmd = [self.debuggerInfo.path] + self.debuggerInfo.args + xpcsCmd
|
2013-07-20 06:27:14 +04:00
|
|
|
|
|
|
|
# Automation doesn't specify a pluginsPath and xpcshell defaults to
|
|
|
|
# $APPDIR/plugins. We do the same here so we can carry on with
|
|
|
|
# setting up every test with its own plugins directory.
|
|
|
|
if not self.pluginsPath:
|
|
|
|
self.pluginsPath = os.path.join(self.appPath, "plugins")
|
|
|
|
|
|
|
|
self.pluginsDir = self.setupPluginsDir()
|
|
|
|
if self.pluginsDir:
|
2018-10-24 16:56:57 +03:00
|
|
|
xpcsCmd.extend(["-p", self.pluginsDir])
|
|
|
|
|
|
|
|
return xpcsCmd
|
2013-07-20 06:27:14 +04:00
|
|
|
|
2014-10-22 23:53:42 +04:00
|
|
|
def cleanupDir(self, directory, name):
|
2013-08-28 22:28:39 +04:00
|
|
|
if not os.path.exists(directory):
|
|
|
|
return
|
|
|
|
|
2017-09-14 16:51:27 +03:00
|
|
|
# up to TRY_LIMIT attempts (one every second), because
|
|
|
|
# the Windows filesystem is slow to react to the changes
|
|
|
|
TRY_LIMIT = 25
|
2013-07-20 06:27:14 +04:00
|
|
|
try_count = 0
|
|
|
|
while try_count < TRY_LIMIT:
|
|
|
|
try:
|
|
|
|
self.removeDir(directory)
|
|
|
|
except OSError:
|
2014-10-22 23:53:42 +04:00
|
|
|
self.log.info("Failed to remove directory: %s. Waiting." % directory)
|
2013-07-20 06:27:14 +04:00
|
|
|
# We suspect the filesystem may still be making changes. Wait a
|
|
|
|
# little bit and try again.
|
|
|
|
time.sleep(1)
|
|
|
|
try_count += 1
|
|
|
|
else:
|
|
|
|
# removed fine
|
|
|
|
return
|
|
|
|
|
2013-09-03 21:00:11 +04:00
|
|
|
# we try cleaning up again later at the end of the run
|
|
|
|
self.cleanup_dir_list.append(directory)
|
2013-07-20 06:27:14 +04:00
|
|
|
|
2014-10-22 23:53:42 +04:00
|
|
|
def clean_temp_dirs(self, name):
|
2013-08-28 22:28:39 +04:00
|
|
|
# We don't want to delete the profile when running check-interactive
|
|
|
|
# or check-one.
|
|
|
|
if self.profileDir and not self.interactive and not self.singleFile:
|
2014-10-22 23:53:42 +04:00
|
|
|
self.cleanupDir(self.profileDir, name)
|
2013-08-28 22:28:39 +04:00
|
|
|
|
2014-10-22 23:53:42 +04:00
|
|
|
self.cleanupDir(self.tempDir, name)
|
2013-08-28 22:28:39 +04:00
|
|
|
|
|
|
|
if self.pluginsDir:
|
2014-10-22 23:53:42 +04:00
|
|
|
self.cleanupDir(self.pluginsDir, name)
|
2013-07-23 06:44:25 +04:00
|
|
|
|
|
|
|
def parse_output(self, output):
|
|
|
|
"""Parses process output for structured messages and saves output as it is
|
|
|
|
read. Sets self.has_failure_output in case of evidence of a failure"""
|
|
|
|
for line_string in output.splitlines():
|
2013-09-13 03:48:43 +04:00
|
|
|
self.process_line(line_string)
|
2013-07-23 06:44:25 +04:00
|
|
|
|
2013-09-13 03:48:43 +04:00
|
|
|
if self.saw_proc_start and not self.saw_proc_end:
|
|
|
|
self.has_failure_output = True
|
2013-07-23 06:44:25 +04:00
|
|
|
|
2015-06-11 21:21:13 +03:00
|
|
|
def fix_text_output(self, line):
|
2021-05-06 22:51:36 +03:00
|
|
|
line = cleanup_encoding(line)
|
2015-06-11 21:21:13 +03:00
|
|
|
if self.stack_fixer_function is not None:
|
2021-06-14 18:31:42 +03:00
|
|
|
line = self.stack_fixer_function(line)
|
|
|
|
|
|
|
|
if isinstance(line, bytes):
|
|
|
|
line = line.decode("utf-8")
|
2021-05-06 22:51:36 +03:00
|
|
|
return line
|
2015-06-11 21:21:13 +03:00
|
|
|
|
2014-10-22 23:53:42 +04:00
|
|
|
def log_line(self, line):
|
|
|
|
"""Log a line of output (either a parser json object or text output from
|
|
|
|
the test process"""
|
2020-07-09 19:49:48 +03:00
|
|
|
if isinstance(line, six.string_types) or isinstance(line, bytes):
|
2015-06-11 21:21:13 +03:00
|
|
|
line = self.fix_text_output(line).rstrip("\r\n")
|
2014-10-22 23:53:42 +04:00
|
|
|
self.log.process_output(self.proc_ident, line, command=self.command)
|
|
|
|
else:
|
|
|
|
if "message" in line:
|
2015-06-11 21:21:13 +03:00
|
|
|
line["message"] = self.fix_text_output(line["message"])
|
2014-10-22 23:53:42 +04:00
|
|
|
if "xpcshell_process" in line:
|
2017-09-14 16:51:27 +03:00
|
|
|
line["thread"] = " ".join(
|
|
|
|
[current_thread().name, line["xpcshell_process"]]
|
|
|
|
)
|
2014-10-22 23:53:42 +04:00
|
|
|
else:
|
|
|
|
line["thread"] = current_thread().name
|
|
|
|
self.log.log_raw(line)
|
2013-09-19 04:04:01 +04:00
|
|
|
|
Bug 1282522 - Ensure stderr is printed in case of a shutdown crash in a passing xpcshell test. r=gps
There were two assumptions preventing this output from being logged, both
related to the case a test passes and xpcshell returns 0. The first was
that we would not find crash dumps in this case, and would therefore not
need to log the full output of the test process (in the case xpcshell
returned non-zero or a test failed, we would log this output prior to checking
for crashes). The second was that if a test was eligible to retry, we wouldn't
need to store a test's output at all, because this output would only relate to
a failure that we would consider non-fatal.
The first assumption does not hold because it's possible to fatally assert
at shutdown in tests spawning child processes without causing a test failure
or non-zero exit code.
The second assumption followed from the first, and is violated when the first
is violated, because in this case we would consider a found crash fatal even
when a test was eligible to retry.
This patch reverses these assumptions and logs the full output of a test that
passes but produces crash dumps. It's not clear that the existing code intended
for a crash to ever be considered fatal when a test was eligible to retry, but
to change this criteria now would reduce our effective test coverage by
ignoring crashes that are now considered fatal, so after this patch we continue
to consider this scenario fatal. If it is determined these crashes are related
to these tests running in parallel with other tests, or they are not relevant
for some other reason, these tests should be run sequentially, or this criteria
should be changed.
MozReview-Commit-ID: 2PaFSGx2MVR
--HG--
extra : rebase_source : 34c0d1f13f4256928906729b1f3667bc395b2c56
2016-09-13 09:02:44 +03:00
|
|
|
def log_full_output(self):
|
|
|
|
"""Logs any buffered output from the test process, and clears the buffer."""
|
|
|
|
if not self.output_lines:
|
2014-10-22 23:53:42 +04:00
|
|
|
return
|
|
|
|
self.log.info(">>>>>>>")
|
Bug 1282522 - Ensure stderr is printed in case of a shutdown crash in a passing xpcshell test. r=gps
There were two assumptions preventing this output from being logged, both
related to the case a test passes and xpcshell returns 0. The first was
that we would not find crash dumps in this case, and would therefore not
need to log the full output of the test process (in the case xpcshell
returned non-zero or a test failed, we would log this output prior to checking
for crashes). The second was that if a test was eligible to retry, we wouldn't
need to store a test's output at all, because this output would only relate to
a failure that we would consider non-fatal.
The first assumption does not hold because it's possible to fatally assert
at shutdown in tests spawning child processes without causing a test failure
or non-zero exit code.
The second assumption followed from the first, and is violated when the first
is violated, because in this case we would consider a found crash fatal even
when a test was eligible to retry.
This patch reverses these assumptions and logs the full output of a test that
passes but produces crash dumps. It's not clear that the existing code intended
for a crash to ever be considered fatal when a test was eligible to retry, but
to change this criteria now would reduce our effective test coverage by
ignoring crashes that are now considered fatal, so after this patch we continue
to consider this scenario fatal. If it is determined these crashes are related
to these tests running in parallel with other tests, or they are not relevant
for some other reason, these tests should be run sequentially, or this criteria
should be changed.
MozReview-Commit-ID: 2PaFSGx2MVR
--HG--
extra : rebase_source : 34c0d1f13f4256928906729b1f3667bc395b2c56
2016-09-13 09:02:44 +03:00
|
|
|
for line in self.output_lines:
|
2014-10-22 23:53:42 +04:00
|
|
|
self.log_line(line)
|
|
|
|
self.log.info("<<<<<<<")
|
Bug 1282522 - Ensure stderr is printed in case of a shutdown crash in a passing xpcshell test. r=gps
There were two assumptions preventing this output from being logged, both
related to the case a test passes and xpcshell returns 0. The first was
that we would not find crash dumps in this case, and would therefore not
need to log the full output of the test process (in the case xpcshell
returned non-zero or a test failed, we would log this output prior to checking
for crashes). The second was that if a test was eligible to retry, we wouldn't
need to store a test's output at all, because this output would only relate to
a failure that we would consider non-fatal.
The first assumption does not hold because it's possible to fatally assert
at shutdown in tests spawning child processes without causing a test failure
or non-zero exit code.
The second assumption followed from the first, and is violated when the first
is violated, because in this case we would consider a found crash fatal even
when a test was eligible to retry.
This patch reverses these assumptions and logs the full output of a test that
passes but produces crash dumps. It's not clear that the existing code intended
for a crash to ever be considered fatal when a test was eligible to retry, but
to change this criteria now would reduce our effective test coverage by
ignoring crashes that are now considered fatal, so after this patch we continue
to consider this scenario fatal. If it is determined these crashes are related
to these tests running in parallel with other tests, or they are not relevant
for some other reason, these tests should be run sequentially, or this criteria
should be changed.
MozReview-Commit-ID: 2PaFSGx2MVR
--HG--
extra : rebase_source : 34c0d1f13f4256928906729b1f3667bc395b2c56
2016-09-13 09:02:44 +03:00
|
|
|
self.output_lines = []
|
2013-07-23 06:44:25 +04:00
|
|
|
|
2014-10-22 23:53:42 +04:00
|
|
|
def report_message(self, message):
|
2015-07-16 17:38:40 +03:00
|
|
|
"""Stores or logs a json log message in mozlog format."""
|
2014-10-22 23:53:42 +04:00
|
|
|
if self.verbose:
|
|
|
|
self.log_line(message)
|
2013-09-13 03:48:43 +04:00
|
|
|
else:
|
Bug 1282522 - Ensure stderr is printed in case of a shutdown crash in a passing xpcshell test. r=gps
There were two assumptions preventing this output from being logged, both
related to the case a test passes and xpcshell returns 0. The first was
that we would not find crash dumps in this case, and would therefore not
need to log the full output of the test process (in the case xpcshell
returned non-zero or a test failed, we would log this output prior to checking
for crashes). The second was that if a test was eligible to retry, we wouldn't
need to store a test's output at all, because this output would only relate to
a failure that we would consider non-fatal.
The first assumption does not hold because it's possible to fatally assert
at shutdown in tests spawning child processes without causing a test failure
or non-zero exit code.
The second assumption followed from the first, and is violated when the first
is violated, because in this case we would consider a found crash fatal even
when a test was eligible to retry.
This patch reverses these assumptions and logs the full output of a test that
passes but produces crash dumps. It's not clear that the existing code intended
for a crash to ever be considered fatal when a test was eligible to retry, but
to change this criteria now would reduce our effective test coverage by
ignoring crashes that are now considered fatal, so after this patch we continue
to consider this scenario fatal. If it is determined these crashes are related
to these tests running in parallel with other tests, or they are not relevant
for some other reason, these tests should be run sequentially, or this criteria
should be changed.
MozReview-Commit-ID: 2PaFSGx2MVR
--HG--
extra : rebase_source : 34c0d1f13f4256928906729b1f3667bc395b2c56
2016-09-13 09:02:44 +03:00
|
|
|
self.output_lines.append(message)
|
2013-09-13 03:48:43 +04:00
|
|
|
|
|
|
|
def process_line(self, line_string):
|
|
|
|
"""Parses a single line of output, determining its significance and
|
|
|
|
reporting a message.
|
|
|
|
"""
|
2014-10-22 23:53:42 +04:00
|
|
|
if not line_string.strip():
|
|
|
|
return
|
|
|
|
|
2013-09-13 03:48:43 +04:00
|
|
|
try:
|
|
|
|
line_object = json.loads(line_string)
|
|
|
|
if not isinstance(line_object, dict):
|
|
|
|
self.report_message(line_string)
|
|
|
|
return
|
|
|
|
except ValueError:
|
|
|
|
self.report_message(line_string)
|
|
|
|
return
|
|
|
|
|
2014-10-22 23:53:42 +04:00
|
|
|
if (
|
|
|
|
"action" not in line_object
|
|
|
|
or line_object["action"] not in EXPECTED_LOG_ACTIONS
|
|
|
|
):
|
|
|
|
# The test process output JSON.
|
2013-09-13 03:48:43 +04:00
|
|
|
self.report_message(line_string)
|
|
|
|
return
|
2013-07-23 06:44:25 +04:00
|
|
|
|
2013-09-13 03:48:43 +04:00
|
|
|
action = line_object["action"]
|
2014-10-22 23:53:42 +04:00
|
|
|
|
|
|
|
self.has_failure_output = (
|
|
|
|
self.has_failure_output
|
|
|
|
or "expected" in line_object
|
|
|
|
or action == "log"
|
|
|
|
and line_object["level"] == "ERROR"
|
|
|
|
)
|
|
|
|
|
2013-09-13 03:48:43 +04:00
|
|
|
self.report_message(line_object)
|
2013-07-23 06:44:25 +04:00
|
|
|
|
2014-10-22 23:53:42 +04:00
|
|
|
if action == "log" and line_object["message"] == "CHILD-TEST-STARTED":
|
2017-09-14 16:51:27 +03:00
|
|
|
self.saw_proc_start = True
|
2014-10-22 23:53:42 +04:00
|
|
|
elif action == "log" and line_object["message"] == "CHILD-TEST-COMPLETED":
|
2013-09-13 03:48:43 +04:00
|
|
|
self.saw_proc_end = True
|
2013-07-23 06:44:25 +04:00
|
|
|
|
2013-07-20 06:27:14 +04:00
|
|
|
def run_test(self):
|
|
|
|
"""Run an individual xpcshell test."""
|
|
|
|
global gotSIGINT
|
|
|
|
|
2014-10-22 23:53:42 +04:00
|
|
|
name = self.test_object["id"]
|
|
|
|
path = self.test_object["path"]
|
2013-07-20 06:27:14 +04:00
|
|
|
|
|
|
|
# Check for skipped tests
|
|
|
|
if "disabled" in self.test_object:
|
2014-10-22 23:53:42 +04:00
|
|
|
message = self.test_object["disabled"]
|
|
|
|
if not message:
|
|
|
|
message = "disabled from xpcshell manifest"
|
|
|
|
self.log.test_start(name)
|
|
|
|
self.log.test_end(name, "SKIP", message=message)
|
2013-07-20 06:27:14 +04:00
|
|
|
|
2013-08-28 22:28:39 +04:00
|
|
|
self.retry = False
|
2013-07-20 06:27:14 +04:00
|
|
|
self.keep_going = True
|
|
|
|
return
|
|
|
|
|
|
|
|
# Check for known-fail tests
|
2014-10-22 23:53:42 +04:00
|
|
|
expect_pass = self.test_object["expected"] == "pass"
|
2013-07-20 06:27:14 +04:00
|
|
|
|
|
|
|
# By default self.appPath will equal the gre dir. If specified in the
|
|
|
|
# xpcshell.ini file, set a different app dir for this test.
|
|
|
|
if self.app_dir_key and self.app_dir_key in self.test_object:
|
|
|
|
rel_app_dir = self.test_object[self.app_dir_key]
|
|
|
|
rel_app_dir = os.path.join(self.xrePath, rel_app_dir)
|
|
|
|
self.appPath = os.path.abspath(rel_app_dir)
|
|
|
|
else:
|
|
|
|
self.appPath = None
|
|
|
|
|
2014-10-22 23:53:42 +04:00
|
|
|
test_dir = os.path.dirname(path)
|
2013-07-20 06:27:14 +04:00
|
|
|
|
|
|
|
# Create a profile and a temp dir that the JS harness can stick
|
|
|
|
# a profile and temporary data in
|
|
|
|
self.profileDir = self.setupProfileDir()
|
|
|
|
self.tempDir = self.setupTempDir()
|
2015-08-20 23:06:00 +03:00
|
|
|
self.mozInfoJSPath = self.setupMozinfoJS()
|
|
|
|
|
2020-05-11 19:54:14 +03:00
|
|
|
# Setup per-manifest prefs and write them into the tempdir.
|
|
|
|
self.prefsFile = self.updateTestPrefsFile()
|
|
|
|
|
2018-10-24 16:56:57 +03:00
|
|
|
# The order of the command line is important:
|
|
|
|
# 1) Arguments for xpcshell itself
|
|
|
|
self.command = self.buildXpcsCmd()
|
2013-07-20 06:27:14 +04:00
|
|
|
|
2018-10-24 16:56:57 +03:00
|
|
|
# 2) Arguments for the head files
|
|
|
|
self.command.extend(self.buildCmdHead())
|
2013-07-20 06:27:14 +04:00
|
|
|
|
2018-10-24 16:56:57 +03:00
|
|
|
# 3) Arguments for the test file
|
|
|
|
self.command.extend(self.buildCmdTestFile(path))
|
|
|
|
self.command.extend(["-e", 'const _TEST_NAME = "%s";' % name])
|
2016-06-08 16:41:04 +03:00
|
|
|
|
2018-10-24 16:56:57 +03:00
|
|
|
# 4) Arguments for code coverage
|
2016-06-08 16:41:04 +03:00
|
|
|
if self.jscovdir:
|
2018-10-24 16:56:57 +03:00
|
|
|
self.command.extend(
|
|
|
|
["-e", 'const _JSCOV_DIR = "%s";' % self.jscovdir.replace("\\", "/")]
|
|
|
|
)
|
|
|
|
|
|
|
|
# 5) Runtime arguments
|
|
|
|
if "debug" in self.test_object:
|
|
|
|
self.command.append("-d")
|
|
|
|
|
|
|
|
self.command.extend(self.xpcsRunArgs)
|
2013-07-20 06:27:14 +04:00
|
|
|
|
2014-09-29 07:50:52 +04:00
|
|
|
if self.test_object.get("dmd") == "true":
|
|
|
|
self.env["PYTHON"] = sys.executable
|
|
|
|
self.env["BREAKPAD_SYMBOLS_PATH"] = self.symbolsPath
|
|
|
|
|
2016-05-09 06:33:36 +03:00
|
|
|
if self.test_object.get("subprocess") == "true":
|
|
|
|
self.env["PYTHON"] = sys.executable
|
|
|
|
|
2020-04-08 21:19:23 +03:00
|
|
|
if (
|
|
|
|
self.test_object.get("headless", "true" if self.headless else None)
|
|
|
|
== "true"
|
|
|
|
):
|
2017-04-04 17:22:00 +03:00
|
|
|
self.env["MOZ_HEADLESS"] = "1"
|
2017-09-14 16:51:27 +03:00
|
|
|
self.env["DISPLAY"] = "77" # Set a fake display.
|
2017-04-04 17:22:00 +03:00
|
|
|
|
2014-10-22 23:53:42 +04:00
|
|
|
testTimeoutInterval = self.harness_timeout
|
2013-10-16 16:08:00 +04:00
|
|
|
# Allow a test to request a multiple of the timeout if it is expected to take long
|
|
|
|
if "requesttimeoutfactor" in self.test_object:
|
|
|
|
testTimeoutInterval *= int(self.test_object["requesttimeoutfactor"])
|
|
|
|
|
2013-07-20 06:27:14 +04:00
|
|
|
testTimer = None
|
2014-11-29 02:40:58 +03:00
|
|
|
if not self.interactive and not self.debuggerInfo and not self.jsDebuggerInfo:
|
2014-10-22 23:53:42 +04:00
|
|
|
testTimer = Timer(testTimeoutInterval, lambda: self.testTimeout(proc))
|
2013-07-20 06:27:14 +04:00
|
|
|
testTimer.start()
|
|
|
|
|
|
|
|
proc = None
|
2014-10-22 23:53:42 +04:00
|
|
|
process_output = None
|
2013-07-20 06:27:14 +04:00
|
|
|
|
|
|
|
try:
|
2014-10-22 23:53:42 +04:00
|
|
|
self.log.test_start(name)
|
2013-07-20 06:27:14 +04:00
|
|
|
if self.verbose:
|
2018-10-24 16:56:57 +03:00
|
|
|
self.logCommand(name, self.command, test_dir)
|
2013-07-20 06:27:14 +04:00
|
|
|
|
2018-10-24 16:56:57 +03:00
|
|
|
proc = self.launchProcess(
|
|
|
|
self.command,
|
2017-09-14 16:51:27 +03:00
|
|
|
stdout=self.pStdout,
|
|
|
|
stderr=self.pStderr,
|
|
|
|
env=self.env,
|
|
|
|
cwd=test_dir,
|
|
|
|
timeout=testTimeoutInterval,
|
|
|
|
)
|
2013-07-20 06:27:14 +04:00
|
|
|
|
2014-10-22 23:53:42 +04:00
|
|
|
if hasattr(proc, "pid"):
|
|
|
|
self.proc_ident = proc.pid
|
|
|
|
else:
|
|
|
|
# On mobile, "proc" is just a file.
|
|
|
|
self.proc_ident = name
|
|
|
|
|
2013-07-20 06:27:14 +04:00
|
|
|
if self.interactive:
|
2014-10-22 23:53:42 +04:00
|
|
|
self.log.info("%s | Process ID: %d" % (name, self.proc_ident))
|
2013-07-20 06:27:14 +04:00
|
|
|
|
2014-10-22 23:53:42 +04:00
|
|
|
# Communicate returns a tuple of (stdout, stderr), however we always
|
|
|
|
# redirect stderr to stdout, so the second element is ignored.
|
|
|
|
process_output, _ = self.communicate(proc)
|
2013-07-20 06:27:14 +04:00
|
|
|
|
|
|
|
if self.interactive:
|
|
|
|
# Not sure what else to do here...
|
|
|
|
self.keep_going = True
|
|
|
|
return
|
|
|
|
|
|
|
|
if testTimer:
|
|
|
|
testTimer.cancel()
|
|
|
|
|
2014-10-22 23:53:42 +04:00
|
|
|
if process_output:
|
|
|
|
# For the remote case, stdout is not yet depleted, so we parse
|
|
|
|
# it here all at once.
|
|
|
|
self.parse_output(process_output)
|
|
|
|
|
|
|
|
return_code = self.getReturnCode(proc)
|
2015-11-25 14:38:20 +03:00
|
|
|
|
|
|
|
# TSan'd processes return 66 if races are detected. This isn't
|
|
|
|
# good in the sense that there's no way to distinguish between
|
|
|
|
# a process that would normally have returned zero but has races,
|
|
|
|
# and a race-free process that returns 66. But I don't see how
|
|
|
|
# to do better. This ambiguity is at least constrained to the
|
|
|
|
# with-TSan case. It doesn't affect normal builds.
|
|
|
|
#
|
|
|
|
# This also assumes that the magic value 66 isn't overridden by
|
|
|
|
# a TSAN_OPTIONS=exitcode=<number> environment variable setting.
|
|
|
|
#
|
|
|
|
TSAN_EXIT_CODE_WITH_RACES = 66
|
|
|
|
|
|
|
|
return_code_ok = return_code == 0 or (
|
|
|
|
self.usingTSan and return_code == TSAN_EXIT_CODE_WITH_RACES
|
|
|
|
)
|
|
|
|
passed = (not self.has_failure_output) and return_code_ok
|
2014-10-22 23:53:42 +04:00
|
|
|
|
|
|
|
status = "PASS" if passed else "FAIL"
|
|
|
|
expected = "PASS" if expect_pass else "FAIL"
|
|
|
|
message = "xpcshell return code: %d" % return_code
|
|
|
|
|
|
|
|
if self.timedout:
|
|
|
|
return
|
2013-07-20 06:27:14 +04:00
|
|
|
|
2014-10-22 23:53:42 +04:00
|
|
|
if status != expected:
|
2013-08-28 22:28:39 +04:00
|
|
|
if self.retry:
|
2014-10-22 23:53:42 +04:00
|
|
|
self.log.test_end(
|
|
|
|
name,
|
|
|
|
status,
|
|
|
|
expected=status,
|
|
|
|
message="Test failed or timed out, will retry",
|
|
|
|
)
|
|
|
|
self.clean_temp_dirs(path)
|
2019-05-31 18:54:58 +03:00
|
|
|
if self.verboseIfFails and not self.verbose:
|
|
|
|
self.log_full_output()
|
2013-08-28 22:28:39 +04:00
|
|
|
return
|
|
|
|
|
2014-10-22 23:53:42 +04:00
|
|
|
self.log.test_end(name, status, expected=expected, message=message)
|
Bug 1282522 - Ensure stderr is printed in case of a shutdown crash in a passing xpcshell test. r=gps
There were two assumptions preventing this output from being logged, both
related to the case a test passes and xpcshell returns 0. The first was
that we would not find crash dumps in this case, and would therefore not
need to log the full output of the test process (in the case xpcshell
returned non-zero or a test failed, we would log this output prior to checking
for crashes). The second was that if a test was eligible to retry, we wouldn't
need to store a test's output at all, because this output would only relate to
a failure that we would consider non-fatal.
The first assumption does not hold because it's possible to fatally assert
at shutdown in tests spawning child processes without causing a test failure
or non-zero exit code.
The second assumption followed from the first, and is violated when the first
is violated, because in this case we would consider a found crash fatal even
when a test was eligible to retry.
This patch reverses these assumptions and logs the full output of a test that
passes but produces crash dumps. It's not clear that the existing code intended
for a crash to ever be considered fatal when a test was eligible to retry, but
to change this criteria now would reduce our effective test coverage by
ignoring crashes that are now considered fatal, so after this patch we continue
to consider this scenario fatal. If it is determined these crashes are related
to these tests running in parallel with other tests, or they are not relevant
for some other reason, these tests should be run sequentially, or this criteria
should be changed.
MozReview-Commit-ID: 2PaFSGx2MVR
--HG--
extra : rebase_source : 34c0d1f13f4256928906729b1f3667bc395b2c56
2016-09-13 09:02:44 +03:00
|
|
|
self.log_full_output()
|
2013-08-06 21:28:29 +04:00
|
|
|
|
2013-07-20 06:27:14 +04:00
|
|
|
self.failCount += 1
|
2013-08-21 20:26:46 +04:00
|
|
|
|
|
|
|
if self.failureManifest:
|
|
|
|
with open(self.failureManifest, "a") as f:
|
|
|
|
f.write("[%s]\n" % self.test_object["path"])
|
|
|
|
for k, v in self.test_object.items():
|
|
|
|
f.write("%s = %s\n" % (k, v))
|
|
|
|
|
2013-07-20 06:27:14 +04:00
|
|
|
else:
|
2015-11-25 14:38:20 +03:00
|
|
|
# If TSan reports a race, dump the output, else we can't
|
|
|
|
# diagnose what the problem was. See comments above about
|
|
|
|
# the significance of TSAN_EXIT_CODE_WITH_RACES.
|
|
|
|
if self.usingTSan and return_code == TSAN_EXIT_CODE_WITH_RACES:
|
Bug 1282522 - Ensure stderr is printed in case of a shutdown crash in a passing xpcshell test. r=gps
There were two assumptions preventing this output from being logged, both
related to the case a test passes and xpcshell returns 0. The first was
that we would not find crash dumps in this case, and would therefore not
need to log the full output of the test process (in the case xpcshell
returned non-zero or a test failed, we would log this output prior to checking
for crashes). The second was that if a test was eligible to retry, we wouldn't
need to store a test's output at all, because this output would only relate to
a failure that we would consider non-fatal.
The first assumption does not hold because it's possible to fatally assert
at shutdown in tests spawning child processes without causing a test failure
or non-zero exit code.
The second assumption followed from the first, and is violated when the first
is violated, because in this case we would consider a found crash fatal even
when a test was eligible to retry.
This patch reverses these assumptions and logs the full output of a test that
passes but produces crash dumps. It's not clear that the existing code intended
for a crash to ever be considered fatal when a test was eligible to retry, but
to change this criteria now would reduce our effective test coverage by
ignoring crashes that are now considered fatal, so after this patch we continue
to consider this scenario fatal. If it is determined these crashes are related
to these tests running in parallel with other tests, or they are not relevant
for some other reason, these tests should be run sequentially, or this criteria
should be changed.
MozReview-Commit-ID: 2PaFSGx2MVR
--HG--
extra : rebase_source : 34c0d1f13f4256928906729b1f3667bc395b2c56
2016-09-13 09:02:44 +03:00
|
|
|
self.log_full_output()
|
2015-11-25 14:38:20 +03:00
|
|
|
|
2014-10-22 23:53:42 +04:00
|
|
|
self.log.test_end(name, status, expected=expected, message=message)
|
|
|
|
if self.verbose:
|
Bug 1282522 - Ensure stderr is printed in case of a shutdown crash in a passing xpcshell test. r=gps
There were two assumptions preventing this output from being logged, both
related to the case a test passes and xpcshell returns 0. The first was
that we would not find crash dumps in this case, and would therefore not
need to log the full output of the test process (in the case xpcshell
returned non-zero or a test failed, we would log this output prior to checking
for crashes). The second was that if a test was eligible to retry, we wouldn't
need to store a test's output at all, because this output would only relate to
a failure that we would consider non-fatal.
The first assumption does not hold because it's possible to fatally assert
at shutdown in tests spawning child processes without causing a test failure
or non-zero exit code.
The second assumption followed from the first, and is violated when the first
is violated, because in this case we would consider a found crash fatal even
when a test was eligible to retry.
This patch reverses these assumptions and logs the full output of a test that
passes but produces crash dumps. It's not clear that the existing code intended
for a crash to ever be considered fatal when a test was eligible to retry, but
to change this criteria now would reduce our effective test coverage by
ignoring crashes that are now considered fatal, so after this patch we continue
to consider this scenario fatal. If it is determined these crashes are related
to these tests running in parallel with other tests, or they are not relevant
for some other reason, these tests should be run sequentially, or this criteria
should be changed.
MozReview-Commit-ID: 2PaFSGx2MVR
--HG--
extra : rebase_source : 34c0d1f13f4256928906729b1f3667bc395b2c56
2016-09-13 09:02:44 +03:00
|
|
|
self.log_full_output()
|
2012-12-20 12:43:19 +04:00
|
|
|
|
2013-08-28 22:28:39 +04:00
|
|
|
self.retry = False
|
2013-03-21 17:19:34 +04:00
|
|
|
|
2014-10-22 23:53:42 +04:00
|
|
|
if expect_pass:
|
2013-07-20 06:27:14 +04:00
|
|
|
self.passCount = 1
|
|
|
|
else:
|
|
|
|
self.todoCount = 1
|
2013-06-07 23:30:11 +04:00
|
|
|
|
2013-11-21 21:22:16 +04:00
|
|
|
if self.checkForCrashes(self.tempDir, self.symbolsPath, test_name=name):
|
2013-08-28 22:28:39 +04:00
|
|
|
if self.retry:
|
2014-10-22 23:53:42 +04:00
|
|
|
self.clean_temp_dirs(path)
|
2013-08-28 22:28:39 +04:00
|
|
|
return
|
|
|
|
|
Bug 1282522 - Ensure stderr is printed in case of a shutdown crash in a passing xpcshell test. r=gps
There were two assumptions preventing this output from being logged, both
related to the case a test passes and xpcshell returns 0. The first was
that we would not find crash dumps in this case, and would therefore not
need to log the full output of the test process (in the case xpcshell
returned non-zero or a test failed, we would log this output prior to checking
for crashes). The second was that if a test was eligible to retry, we wouldn't
need to store a test's output at all, because this output would only relate to
a failure that we would consider non-fatal.
The first assumption does not hold because it's possible to fatally assert
at shutdown in tests spawning child processes without causing a test failure
or non-zero exit code.
The second assumption followed from the first, and is violated when the first
is violated, because in this case we would consider a found crash fatal even
when a test was eligible to retry.
This patch reverses these assumptions and logs the full output of a test that
passes but produces crash dumps. It's not clear that the existing code intended
for a crash to ever be considered fatal when a test was eligible to retry, but
to change this criteria now would reduce our effective test coverage by
ignoring crashes that are now considered fatal, so after this patch we continue
to consider this scenario fatal. If it is determined these crashes are related
to these tests running in parallel with other tests, or they are not relevant
for some other reason, these tests should be run sequentially, or this criteria
should be changed.
MozReview-Commit-ID: 2PaFSGx2MVR
--HG--
extra : rebase_source : 34c0d1f13f4256928906729b1f3667bc395b2c56
2016-09-13 09:02:44 +03:00
|
|
|
# If we assert during shutdown there's a chance the test has passed
|
|
|
|
# but we haven't logged full output, so do so here.
|
|
|
|
self.log_full_output()
|
2013-07-20 06:27:14 +04:00
|
|
|
self.failCount = 1
|
2013-03-21 17:19:34 +04:00
|
|
|
|
2014-10-22 23:53:42 +04:00
|
|
|
if self.logfiles and process_output:
|
|
|
|
self.createLogFile(name, process_output)
|
2012-09-10 22:15:00 +04:00
|
|
|
|
2013-07-20 06:27:14 +04:00
|
|
|
finally:
|
2014-10-22 23:53:42 +04:00
|
|
|
self.postCheck(proc)
|
|
|
|
self.clean_temp_dirs(path)
|
2013-07-20 06:27:14 +04:00
|
|
|
|
|
|
|
if gotSIGINT:
|
2014-10-22 23:53:42 +04:00
|
|
|
self.log.error("Received SIGINT (control-C) during test execution")
|
2013-07-20 06:27:14 +04:00
|
|
|
if self.keep_going:
|
|
|
|
gotSIGINT = False
|
|
|
|
else:
|
|
|
|
self.keep_going = False
|
|
|
|
return
|
|
|
|
|
|
|
|
self.keep_going = True
|
2010-09-10 21:20:38 +04:00
|
|
|
|
2017-09-14 16:51:27 +03:00
|
|
|
|
2010-01-15 20:22:54 +03:00
|
|
|
class XPCShellTests(object):
|
2012-12-20 12:43:19 +04:00
|
|
|
def __init__(self, log=None):
|
2014-10-22 23:53:42 +04:00
|
|
|
""" Initializes node status and logger. """
|
|
|
|
self.log = log
|
|
|
|
self.harness_timeout = HARNESS_TIMEOUT
|
2013-10-20 18:03:07 +04:00
|
|
|
self.nodeProc = {}
|
2020-03-25 16:36:00 +03:00
|
|
|
self.http3ServerProc = {}
|
2013-02-21 00:45:14 +04:00
|
|
|
|
2015-08-27 15:05:50 +03:00
|
|
|
def getTestManifest(self, manifest):
|
|
|
|
if isinstance(manifest, TestManifest):
|
|
|
|
return manifest
|
|
|
|
elif manifest is not None:
|
|
|
|
manifest = os.path.normpath(os.path.abspath(manifest))
|
|
|
|
if os.path.isfile(manifest):
|
|
|
|
return TestManifest([manifest], strict=True)
|
|
|
|
else:
|
|
|
|
ini_path = os.path.join(manifest, "xpcshell.ini")
|
|
|
|
else:
|
|
|
|
ini_path = os.path.join(SCRIPT_DIR, "tests", "xpcshell.ini")
|
|
|
|
|
|
|
|
if os.path.exists(ini_path):
|
|
|
|
return TestManifest([ini_path], strict=True)
|
|
|
|
else:
|
2020-04-07 18:19:37 +03:00
|
|
|
self.log.error(
|
|
|
|
"Failed to find manifest at %s; use --manifest "
|
|
|
|
"to set path explicitly." % ini_path
|
|
|
|
)
|
2015-08-27 15:05:50 +03:00
|
|
|
sys.exit(1)
|
|
|
|
|
2017-06-20 17:52:33 +03:00
|
|
|
def normalizeTest(self, root, test_object):
|
|
|
|
path = test_object.get("file_relpath", test_object["relpath"])
|
2019-12-03 00:19:45 +03:00
|
|
|
if "dupe-manifest" in test_object and "ancestor_manifest" in test_object:
|
2017-09-14 16:51:27 +03:00
|
|
|
test_object["id"] = "%s:%s" % (
|
2019-12-03 00:19:45 +03:00
|
|
|
os.path.basename(test_object["ancestor_manifest"]),
|
|
|
|
path,
|
|
|
|
)
|
2017-06-20 17:52:33 +03:00
|
|
|
else:
|
|
|
|
test_object["id"] = path
|
|
|
|
|
2017-06-23 15:25:21 +03:00
|
|
|
if root:
|
|
|
|
test_object["manifest"] = os.path.relpath(test_object["manifest"], root)
|
2017-06-20 17:52:33 +03:00
|
|
|
|
|
|
|
if os.sep != "/":
|
|
|
|
for key in ("id", "manifest"):
|
|
|
|
test_object[key] = test_object[key].replace(os.sep, "/")
|
|
|
|
|
|
|
|
return test_object
|
|
|
|
|
2017-08-16 15:55:55 +03:00
|
|
|
def buildTestList(self, test_tags=None, test_paths=None, verify=False):
|
2018-10-03 18:40:48 +03:00
|
|
|
"""Reads the xpcshell.ini manifest and set self.alltests to an array.
|
2013-02-21 00:45:14 +04:00
|
|
|
|
2018-10-03 18:40:48 +03:00
|
|
|
Given the parameters, this method compiles a list of tests to be run
|
|
|
|
that matches the criteria set by parameters.
|
|
|
|
|
|
|
|
If any chunking of tests are to occur, it is also done in this method.
|
2015-08-27 15:05:50 +03:00
|
|
|
|
2018-10-03 18:40:48 +03:00
|
|
|
If no tests are added to the list of tests to be run, an error
|
|
|
|
is logged. A sys.exit() signal is sent to the caller.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
test_tags (list, optional): list of strings.
|
|
|
|
test_paths (list, optional): list of strings derived from the command
|
|
|
|
line argument provided by user, specifying
|
|
|
|
tests to be run.
|
|
|
|
verify (bool, optional): boolean value.
|
|
|
|
"""
|
2015-08-27 15:05:50 +03:00
|
|
|
if test_paths is None:
|
|
|
|
test_paths = []
|
|
|
|
|
|
|
|
mp = self.getTestManifest(self.manifest)
|
2017-06-23 15:25:21 +03:00
|
|
|
|
|
|
|
root = mp.rootdir
|
|
|
|
if build and not root:
|
|
|
|
root = build.topsrcdir
|
|
|
|
normalize = partial(self.normalizeTest, root)
|
2013-02-21 00:45:14 +04:00
|
|
|
|
2015-03-24 16:21:11 +03:00
|
|
|
filters = []
|
2015-03-19 23:15:33 +03:00
|
|
|
if test_tags:
|
|
|
|
filters.append(tags(test_tags))
|
|
|
|
|
2020-10-16 19:37:45 +03:00
|
|
|
path_filter = None
|
2015-08-27 15:05:50 +03:00
|
|
|
if test_paths:
|
2020-10-16 19:37:45 +03:00
|
|
|
path_filter = pathprefix(test_paths)
|
|
|
|
filters.append(path_filter)
|
2015-08-27 15:05:50 +03:00
|
|
|
|
2021-01-23 00:14:27 +03:00
|
|
|
noDefaultFilters = False
|
|
|
|
if self.runFailures:
|
|
|
|
filters.append(failures(self.runFailures))
|
|
|
|
noDefaultFilters = True
|
|
|
|
|
2019-03-13 09:14:52 +03:00
|
|
|
if self.totalChunks > 1:
|
2015-03-24 16:21:11 +03:00
|
|
|
filters.append(chunk_by_slice(self.thisChunk, self.totalChunks))
|
2014-02-03 07:30:43 +04:00
|
|
|
try:
|
2020-07-09 19:49:48 +03:00
|
|
|
self.alltests = list(
|
2021-01-23 00:14:27 +03:00
|
|
|
map(
|
|
|
|
normalize,
|
|
|
|
mp.active_tests(
|
|
|
|
filters=filters,
|
|
|
|
noDefaultFilters=noDefaultFilters,
|
|
|
|
**mozinfo.info
|
|
|
|
),
|
|
|
|
)
|
2020-10-26 21:34:53 +03:00
|
|
|
)
|
2014-02-03 07:30:43 +04:00
|
|
|
except TypeError:
|
|
|
|
sys.stderr.write("*** offending mozinfo.info: %s\n" % repr(mozinfo.info))
|
|
|
|
raise
|
2013-02-21 00:45:14 +04:00
|
|
|
|
2020-10-16 19:37:45 +03:00
|
|
|
if path_filter and path_filter.missing:
|
|
|
|
self.log.warning(
|
|
|
|
"The following path(s) didn't resolve any tests:\n {}".format(
|
|
|
|
" \n".join(sorted(path_filter.missing))
|
2020-10-26 21:34:53 +03:00
|
|
|
)
|
|
|
|
)
|
2020-10-16 19:37:45 +03:00
|
|
|
|
2015-03-19 23:15:33 +03:00
|
|
|
if len(self.alltests) == 0:
|
2020-10-16 19:37:45 +03:00
|
|
|
if (
|
|
|
|
test_paths
|
|
|
|
and path_filter.missing == set(test_paths)
|
|
|
|
and os.environ.get("MOZ_AUTOMATION") == "1"
|
|
|
|
):
|
|
|
|
# This can happen in CI when a manifest doesn't exist due to a
|
|
|
|
# build config variable in moz.build traversal. Don't generate
|
|
|
|
# an error in this case. Adding a todo count avoids mozharness
|
|
|
|
# raising an error.
|
|
|
|
self.todoCount += len(path_filter.missing)
|
|
|
|
else:
|
|
|
|
self.log.error(
|
|
|
|
"no tests to run using specified "
|
|
|
|
"combination of filters: {}".format(mp.fmt_filters())
|
|
|
|
)
|
|
|
|
sys.exit(1)
|
2015-03-19 23:15:33 +03:00
|
|
|
|
2019-03-13 09:14:52 +03:00
|
|
|
if len(self.alltests) == 1 and not verify:
|
|
|
|
self.singleFile = os.path.basename(self.alltests[0]["path"])
|
|
|
|
else:
|
|
|
|
self.singleFile = None
|
|
|
|
|
2015-08-22 03:16:11 +03:00
|
|
|
if self.dump_tests:
|
|
|
|
self.dump_tests = os.path.expanduser(self.dump_tests)
|
|
|
|
assert os.path.exists(os.path.dirname(self.dump_tests))
|
|
|
|
with open(self.dump_tests, "w") as dumpFile:
|
|
|
|
dumpFile.write(json.dumps({"active_tests": self.alltests}))
|
|
|
|
|
|
|
|
self.log.info("Dumping active_tests to %s file." % self.dump_tests)
|
|
|
|
sys.exit()
|
|
|
|
|
2013-02-21 00:45:14 +04:00
|
|
|
def setAbsPath(self):
|
|
|
|
"""
|
2017-09-14 16:51:27 +03:00
|
|
|
Set the absolute path for xpcshell, httpdjspath and xrepath. These 3 variables
|
|
|
|
depend on input from the command line and we need to allow for absolute paths.
|
2013-02-21 00:45:14 +04:00
|
|
|
This function is overloaded for a remote solution as os.path* won't work remotely.
|
|
|
|
"""
|
|
|
|
self.testharnessdir = os.path.dirname(os.path.abspath(__file__))
|
|
|
|
self.headJSPath = self.testharnessdir.replace("\\", "/") + "/head.js"
|
|
|
|
self.xpcshell = os.path.abspath(self.xpcshell)
|
|
|
|
|
|
|
|
if self.xrePath is None:
|
|
|
|
self.xrePath = os.path.dirname(self.xpcshell)
|
2014-09-29 22:51:29 +04:00
|
|
|
if mozinfo.isMac:
|
|
|
|
# Check if we're run from an OSX app bundle and override
|
|
|
|
# self.xrePath if we are.
|
2017-09-14 16:51:27 +03:00
|
|
|
appBundlePath = os.path.join(
|
|
|
|
os.path.dirname(os.path.dirname(self.xpcshell)), "Resources"
|
|
|
|
)
|
2014-09-29 22:51:29 +04:00
|
|
|
if os.path.exists(os.path.join(appBundlePath, "application.ini")):
|
|
|
|
self.xrePath = appBundlePath
|
2013-02-21 00:45:14 +04:00
|
|
|
else:
|
2013-02-21 00:45:14 +04:00
|
|
|
self.xrePath = os.path.abspath(self.xrePath)
|
|
|
|
|
2015-01-03 03:29:57 +03:00
|
|
|
# httpd.js belongs in xrePath/components, which is Contents/Resources on mac
|
|
|
|
self.httpdJSPath = os.path.join(self.xrePath, "components", "httpd.js")
|
|
|
|
self.httpdJSPath = self.httpdJSPath.replace("\\", "/")
|
|
|
|
|
2013-02-21 00:45:14 +04:00
|
|
|
if self.mozInfo is None:
|
|
|
|
self.mozInfo = os.path.join(self.testharnessdir, "mozinfo.json")
|
|
|
|
|
2018-11-08 17:50:32 +03:00
|
|
|
def buildPrefsFile(self, extraPrefs):
|
2018-10-26 20:46:03 +03:00
|
|
|
# Create the prefs.js file
|
|
|
|
|
2020-04-29 22:16:43 +03:00
|
|
|
# In test packages used in CI, the profile_data directory is installed
|
|
|
|
# in the SCRIPT_DIR.
|
|
|
|
profile_data_dir = os.path.join(SCRIPT_DIR, "profile_data")
|
2018-10-26 20:46:03 +03:00
|
|
|
# If possible, read profile data from topsrcdir. This prevents us from
|
|
|
|
# requiring a re-build to pick up newly added extensions in the
|
|
|
|
# <profile>/extensions directory.
|
|
|
|
if build:
|
|
|
|
path = os.path.join(build.topsrcdir, "testing", "profiles")
|
|
|
|
if os.path.isdir(path):
|
|
|
|
profile_data_dir = path
|
2020-04-29 22:16:43 +03:00
|
|
|
# Still not found? Look for testing/profiles relative to testing/xpcshell.
|
|
|
|
if not os.path.isdir(profile_data_dir):
|
|
|
|
path = os.path.abspath(os.path.join(SCRIPT_DIR, "..", "profiles"))
|
|
|
|
if os.path.isdir(path):
|
|
|
|
profile_data_dir = path
|
2018-10-26 20:46:03 +03:00
|
|
|
|
|
|
|
with open(os.path.join(profile_data_dir, "profiles.json"), "r") as fh:
|
|
|
|
base_profiles = json.load(fh)["xpcshell"]
|
|
|
|
|
|
|
|
# values to use when interpolating preferences
|
|
|
|
interpolation = {
|
|
|
|
"server": "dummyserver",
|
|
|
|
}
|
|
|
|
|
|
|
|
profile = Profile(profile=self.tempDir, restore=False)
|
|
|
|
for name in base_profiles:
|
|
|
|
path = os.path.join(profile_data_dir, name)
|
|
|
|
profile.merge(path, interpolation=interpolation)
|
|
|
|
|
2018-11-08 17:50:32 +03:00
|
|
|
# add command line prefs
|
|
|
|
prefs = parse_preferences(extraPrefs)
|
|
|
|
profile.set_preferences(prefs)
|
|
|
|
|
2018-10-26 20:46:03 +03:00
|
|
|
self.prefsFile = os.path.join(profile.profile, "user.js")
|
2018-11-09 01:08:16 +03:00
|
|
|
return prefs
|
2018-10-26 20:46:03 +03:00
|
|
|
|
2013-02-21 00:45:14 +04:00
|
|
|
def buildCoreEnvironment(self):
|
|
|
|
"""
|
2017-09-14 16:51:27 +03:00
|
|
|
Add environment variables likely to be used across all platforms, including
|
|
|
|
remote systems.
|
2013-02-21 00:45:14 +04:00
|
|
|
"""
|
|
|
|
# Make assertions fatal
|
|
|
|
self.env["XPCOM_DEBUG_BREAK"] = "stack-and-abort"
|
2013-09-11 15:52:29 +04:00
|
|
|
# Crash reporting interferes with debugging
|
|
|
|
if not self.debuggerInfo:
|
|
|
|
self.env["MOZ_CRASHREPORTER"] = "1"
|
2013-02-21 00:45:14 +04:00
|
|
|
# Don't launch the crash reporter client
|
|
|
|
self.env["MOZ_CRASHREPORTER_NO_REPORT"] = "1"
|
2014-09-22 17:48:00 +04:00
|
|
|
# Don't permit remote connections by default.
|
|
|
|
# MOZ_DISABLE_NONLOCAL_CONNECTIONS can be set to "0" to temporarily
|
|
|
|
# enable non-local connections for the purposes of local testing.
|
|
|
|
# Don't override the user's choice here. See bug 1049688.
|
|
|
|
self.env.setdefault("MOZ_DISABLE_NONLOCAL_CONNECTIONS", "1")
|
2017-04-07 21:53:19 +03:00
|
|
|
if self.mozInfo.get("topsrcdir") is not None:
|
|
|
|
self.env["MOZ_DEVELOPER_REPO_DIR"] = self.mozInfo["topsrcdir"].encode()
|
2017-07-13 03:01:56 +03:00
|
|
|
if self.mozInfo.get("topobjdir") is not None:
|
|
|
|
self.env["MOZ_DEVELOPER_OBJ_DIR"] = self.mozInfo["topobjdir"].encode()
|
2013-02-21 00:45:14 +04:00
|
|
|
|
2017-06-07 22:30:09 +03:00
|
|
|
# Disable the content process sandbox for the xpcshell tests. They
|
|
|
|
# currently attempt to do things like bind() sockets, which is not
|
|
|
|
# compatible with the sandbox.
|
|
|
|
self.env["MOZ_DISABLE_CONTENT_SANDBOX"] = "1"
|
|
|
|
|
2020-06-05 01:56:34 +03:00
|
|
|
if self.mozInfo.get("socketprocess_networking"):
|
|
|
|
self.env["MOZ_FORCE_USE_SOCKET_PROCESS"] = "1"
|
|
|
|
else:
|
|
|
|
self.env["MOZ_DISABLE_SOCKET_PROCESS"] = "1"
|
|
|
|
|
2019-06-29 12:50:20 +03:00
|
|
|
if self.enable_webrender:
|
|
|
|
self.env["MOZ_WEBRENDER"] = "1"
|
|
|
|
self.env["MOZ_ACCELERATED"] = "1"
|
|
|
|
else:
|
|
|
|
self.env["MOZ_WEBRENDER"] = "0"
|
|
|
|
|
2013-02-21 00:45:14 +04:00
|
|
|
def buildEnvironment(self):
|
|
|
|
"""
|
2017-09-14 16:51:27 +03:00
|
|
|
Create and returns a dictionary of self.env to include all the appropriate env
|
|
|
|
variables and values. On a remote system, we overload this to set different
|
|
|
|
values and are missing things like os.environ and PATH.
|
2013-02-21 00:45:14 +04:00
|
|
|
"""
|
|
|
|
self.env = dict(os.environ)
|
|
|
|
self.buildCoreEnvironment()
|
|
|
|
if sys.platform == "win32":
|
|
|
|
self.env["PATH"] = self.env["PATH"] + ";" + self.xrePath
|
|
|
|
elif sys.platform in ("os2emx", "os2knix"):
|
|
|
|
os.environ["BEGINLIBPATH"] = self.xrePath + ";" + self.env["BEGINLIBPATH"]
|
|
|
|
os.environ["LIBPATHSTRICT"] = "T"
|
|
|
|
elif sys.platform == "osx" or sys.platform == "darwin":
|
2014-09-29 22:51:29 +04:00
|
|
|
self.env["DYLD_LIBRARY_PATH"] = os.path.join(
|
|
|
|
os.path.dirname(self.xrePath), "MacOS"
|
|
|
|
)
|
2017-09-14 16:51:27 +03:00
|
|
|
else: # unix or linux?
|
|
|
|
if "LD_LIBRARY_PATH" not in self.env or self.env["LD_LIBRARY_PATH"] is None:
|
2013-02-21 00:45:14 +04:00
|
|
|
self.env["LD_LIBRARY_PATH"] = self.xrePath
|
|
|
|
else:
|
|
|
|
self.env["LD_LIBRARY_PATH"] = ":".join(
|
|
|
|
[self.xrePath, self.env["LD_LIBRARY_PATH"]]
|
|
|
|
)
|
|
|
|
|
2015-08-07 09:58:05 +03:00
|
|
|
usingASan = "asan" in self.mozInfo and self.mozInfo["asan"]
|
|
|
|
usingTSan = "tsan" in self.mozInfo and self.mozInfo["tsan"]
|
|
|
|
if usingASan or usingTSan:
|
|
|
|
# symbolizer support
|
2017-03-10 07:08:34 +03:00
|
|
|
llvmsym = os.path.join(
|
2020-10-07 16:49:41 +03:00
|
|
|
self.xrePath, "llvm-symbolizer" + self.mozInfo["bin_suffix"]
|
|
|
|
)
|
2013-11-21 21:26:28 +04:00
|
|
|
if os.path.isfile(llvmsym):
|
2015-08-07 09:58:05 +03:00
|
|
|
if usingASan:
|
|
|
|
self.env["ASAN_SYMBOLIZER_PATH"] = llvmsym
|
|
|
|
else:
|
2015-11-25 14:38:20 +03:00
|
|
|
oldTSanOptions = self.env.get("TSAN_OPTIONS", "")
|
2017-09-14 16:51:27 +03:00
|
|
|
self.env["TSAN_OPTIONS"] = "external_symbolizer_path={} {}".format(
|
|
|
|
llvmsym, oldTSanOptions
|
|
|
|
)
|
2015-08-07 09:58:05 +03:00
|
|
|
self.log.info("runxpcshelltests.py | using symbolizer at %s" % llvmsym)
|
2013-11-21 21:26:28 +04:00
|
|
|
else:
|
2017-09-14 16:51:27 +03:00
|
|
|
self.log.error(
|
|
|
|
"TEST-UNEXPECTED-FAIL | runxpcshelltests.py | "
|
|
|
|
"Failed to find symbolizer at %s" % llvmsym
|
|
|
|
)
|
2013-11-21 21:26:28 +04:00
|
|
|
|
2013-02-21 00:45:14 +04:00
|
|
|
return self.env
|
|
|
|
|
|
|
|
def getPipes(self):
|
|
|
|
"""
|
|
|
|
Determine the value of the stdout and stderr for the test.
|
|
|
|
Return value is a list (pStdout, pStderr).
|
|
|
|
"""
|
|
|
|
if self.interactive:
|
|
|
|
pStdout = None
|
|
|
|
pStderr = None
|
|
|
|
else:
|
2014-07-02 15:52:00 +04:00
|
|
|
if self.debuggerInfo and self.debuggerInfo.interactive:
|
2013-02-21 00:45:14 +04:00
|
|
|
pStdout = None
|
|
|
|
pStderr = None
|
|
|
|
else:
|
|
|
|
if sys.platform == "os2emx":
|
|
|
|
pStdout = None
|
|
|
|
else:
|
|
|
|
pStdout = PIPE
|
|
|
|
pStderr = STDOUT
|
|
|
|
return pStdout, pStderr
|
|
|
|
|
|
|
|
def verifyDirPath(self, dirname):
|
|
|
|
"""
|
|
|
|
Simple wrapper to get the absolute path for a given directory name.
|
|
|
|
On a remote system, we need to overload this to work on the remote filesystem.
|
|
|
|
"""
|
|
|
|
return os.path.abspath(dirname)
|
|
|
|
|
|
|
|
def trySetupNode(self):
|
|
|
|
"""
|
2016-07-16 01:13:49 +03:00
|
|
|
Run node for HTTP/2 tests, if available, and updates mozinfo as appropriate.
|
2013-02-21 00:45:14 +04:00
|
|
|
"""
|
2017-02-05 03:21:58 +03:00
|
|
|
if os.getenv("MOZ_ASSUME_NODE_RUNNING", None):
|
|
|
|
self.log.info("Assuming required node servers are already running")
|
|
|
|
if not os.getenv("MOZHTTP2_PORT", None):
|
2017-09-14 16:51:27 +03:00
|
|
|
self.log.warning(
|
|
|
|
"MOZHTTP2_PORT environment variable not set. "
|
|
|
|
"Tests requiring http/2 will fail."
|
|
|
|
)
|
2017-02-05 03:21:58 +03:00
|
|
|
return
|
2013-02-21 00:45:14 +04:00
|
|
|
|
|
|
|
# We try to find the node executable in the path given to us by the user in
|
|
|
|
# the MOZ_NODE_PATH environment variable
|
2017-02-05 03:21:58 +03:00
|
|
|
nodeBin = os.getenv("MOZ_NODE_PATH", None)
|
2019-03-22 15:44:52 +03:00
|
|
|
if not nodeBin and build:
|
|
|
|
nodeBin = build.substs.get("NODEJS")
|
2017-02-05 03:21:58 +03:00
|
|
|
if not nodeBin:
|
2017-09-14 16:51:27 +03:00
|
|
|
self.log.warning(
|
|
|
|
"MOZ_NODE_PATH environment variable not set. "
|
|
|
|
"Tests requiring http/2 will fail."
|
|
|
|
)
|
2017-02-05 03:21:58 +03:00
|
|
|
return
|
2013-02-21 00:45:14 +04:00
|
|
|
|
2017-02-05 03:21:58 +03:00
|
|
|
if not os.path.exists(nodeBin) or not os.path.isfile(nodeBin):
|
|
|
|
error = "node not found at MOZ_NODE_PATH %s" % (nodeBin)
|
|
|
|
self.log.error(error)
|
|
|
|
raise IOError(error)
|
|
|
|
|
|
|
|
self.log.info("Found node at %s" % (nodeBin,))
|
|
|
|
|
|
|
|
def startServer(name, serverJs):
|
|
|
|
if not os.path.exists(serverJs):
|
|
|
|
error = "%s not found at %s" % (name, serverJs)
|
|
|
|
self.log.error(error)
|
|
|
|
raise IOError(error)
|
|
|
|
|
|
|
|
# OK, we found our server, let's try to get it running
|
|
|
|
self.log.info("Found %s at %s" % (name, serverJs))
|
|
|
|
try:
|
|
|
|
# We pipe stdin to node because the server will exit when its
|
|
|
|
# stdin reaches EOF
|
2020-07-19 21:06:17 +03:00
|
|
|
self.env = ensure_subprocess_env(self.env)
|
2017-02-05 03:21:58 +03:00
|
|
|
process = Popen(
|
|
|
|
[nodeBin, serverJs],
|
|
|
|
stdin=PIPE,
|
|
|
|
stdout=PIPE,
|
2020-07-09 19:49:48 +03:00
|
|
|
stderr=PIPE,
|
|
|
|
env=self.env,
|
|
|
|
cwd=os.getcwd(),
|
|
|
|
universal_newlines=True,
|
|
|
|
)
|
2017-02-05 03:21:58 +03:00
|
|
|
self.nodeProc[name] = process
|
|
|
|
|
|
|
|
# Check to make sure the server starts properly by waiting for it to
|
|
|
|
# tell us it's started
|
|
|
|
msg = process.stdout.readline()
|
|
|
|
if "server listening" in msg:
|
2019-05-31 20:29:53 +03:00
|
|
|
searchObj = re.search(
|
|
|
|
r"HTTP2 server listening on ports ([0-9]+),([0-9]+)", msg, 0
|
|
|
|
)
|
2017-02-05 03:21:58 +03:00
|
|
|
if searchObj:
|
2020-01-19 23:28:10 +03:00
|
|
|
self.env["MOZHTTP2_PORT"] = searchObj.group(1)
|
|
|
|
self.env["MOZNODE_EXEC_PORT"] = searchObj.group(2)
|
2017-09-08 04:15:35 +03:00
|
|
|
except OSError as e:
|
2017-02-05 03:21:58 +03:00
|
|
|
# This occurs if the subprocess couldn't be started
|
|
|
|
self.log.error("Could not run %s server: %s" % (name, str(e)))
|
|
|
|
raise
|
|
|
|
|
|
|
|
myDir = os.path.split(os.path.abspath(__file__))[0]
|
|
|
|
startServer("moz-http2", os.path.join(myDir, "moz-http2", "moz-http2.js"))
|
2013-02-21 00:45:14 +04:00
|
|
|
|
|
|
|
def shutdownNode(self):
|
|
|
|
"""
|
|
|
|
Shut down our node process, if it exists
|
|
|
|
"""
|
2020-07-09 19:49:48 +03:00
|
|
|
for name, proc in six.iteritems(self.nodeProc):
|
2013-10-20 18:03:07 +04:00
|
|
|
self.log.info("Node %s server shutting down ..." % name)
|
2015-01-23 20:09:47 +03:00
|
|
|
if proc.poll() is not None:
|
|
|
|
self.log.info("Node server %s already dead %s" % (name, proc.poll()))
|
|
|
|
else:
|
|
|
|
proc.terminate()
|
2017-09-14 16:51:27 +03:00
|
|
|
|
2015-01-23 20:09:47 +03:00
|
|
|
def dumpOutput(fd, label):
|
|
|
|
firstTime = True
|
|
|
|
for msg in fd:
|
|
|
|
if firstTime:
|
2017-09-14 16:51:27 +03:00
|
|
|
firstTime = False
|
2015-01-23 20:09:47 +03:00
|
|
|
self.log.info("Process %s" % label)
|
|
|
|
self.log.info(msg)
|
2020-10-26 21:34:53 +03:00
|
|
|
|
2015-01-23 20:09:47 +03:00
|
|
|
dumpOutput(proc.stdout, "stdout")
|
|
|
|
dumpOutput(proc.stderr, "stderr")
|
2020-07-23 15:18:24 +03:00
|
|
|
self.nodeProc = {}
|
2013-02-21 00:45:14 +04:00
|
|
|
|
2020-03-25 16:36:00 +03:00
|
|
|
def startHttp3Server(self):
|
|
|
|
"""
|
|
|
|
Start a Http3 test server.
|
|
|
|
"""
|
|
|
|
binSuffix = ""
|
|
|
|
if sys.platform == "win32":
|
|
|
|
binSuffix = ".exe"
|
|
|
|
|
2020-07-21 23:47:27 +03:00
|
|
|
http3ServerPath = self.http3server
|
|
|
|
if not http3ServerPath:
|
|
|
|
http3ServerPath = os.path.join(
|
|
|
|
SCRIPT_DIR, "http3server", "http3server" + binSuffix
|
|
|
|
)
|
|
|
|
if build:
|
|
|
|
http3ServerPath = os.path.join(
|
|
|
|
build.topobjdir, "dist", "bin", "http3server" + binSuffix
|
|
|
|
)
|
2020-03-25 16:36:00 +03:00
|
|
|
|
|
|
|
if not os.path.exists(http3ServerPath):
|
|
|
|
self.log.warning(
|
|
|
|
"Http3 server not found at "
|
|
|
|
+ http3ServerPath
|
|
|
|
+ ". Tests requiring http/3 will fail."
|
|
|
|
)
|
|
|
|
return
|
|
|
|
|
|
|
|
# OK, we found our server, let's try to get it running
|
|
|
|
self.log.info("Found %s" % (http3ServerPath))
|
|
|
|
try:
|
|
|
|
dbPath = os.path.join(SCRIPT_DIR, "http3server", "http3serverDB")
|
|
|
|
if build:
|
|
|
|
dbPath = os.path.join(
|
|
|
|
build.topsrcdir, "netwerk", "test", "http3serverDB"
|
|
|
|
)
|
|
|
|
self.log.info("Using %s" % (dbPath))
|
|
|
|
# We pipe stdin to the server because it will exit when its stdin
|
|
|
|
# reaches EOF
|
2020-07-19 21:06:17 +03:00
|
|
|
self.env = ensure_subprocess_env(self.env)
|
2020-03-25 16:36:00 +03:00
|
|
|
process = Popen(
|
|
|
|
[http3ServerPath, dbPath],
|
|
|
|
stdin=PIPE,
|
|
|
|
stdout=PIPE,
|
2020-07-09 19:49:48 +03:00
|
|
|
stderr=PIPE,
|
|
|
|
env=self.env,
|
|
|
|
cwd=os.getcwd(),
|
|
|
|
universal_newlines=True,
|
|
|
|
)
|
2020-03-25 16:36:00 +03:00
|
|
|
self.http3ServerProc["http3Server"] = process
|
|
|
|
|
|
|
|
# Check to make sure the server starts properly by waiting for it to
|
|
|
|
# tell us it's started
|
|
|
|
msg = process.stdout.readline()
|
|
|
|
if "server listening" in msg:
|
|
|
|
searchObj = re.search(
|
2021-07-13 12:24:25 +03:00
|
|
|
r"HTTP3 server listening on ports ([0-9]+), ([0-9]+), ([0-9]+) and ([0-9]+)."
|
|
|
|
" EchConfig is @([\x00-\x7F]+)@",
|
2020-11-11 22:43:43 +03:00
|
|
|
msg,
|
|
|
|
0,
|
2020-03-25 16:36:00 +03:00
|
|
|
)
|
|
|
|
if searchObj:
|
|
|
|
self.env["MOZHTTP3_PORT"] = searchObj.group(1)
|
2020-11-01 18:48:45 +03:00
|
|
|
self.env["MOZHTTP3_PORT_FAILED"] = searchObj.group(2)
|
2021-07-13 12:24:25 +03:00
|
|
|
self.env["MOZHTTP3_PORT_ECH"] = searchObj.group(3)
|
|
|
|
self.env["MOZHTTP3_PORT_NO_RESPONSE"] = searchObj.group(4)
|
|
|
|
self.env["MOZHTTP3_ECH"] = searchObj.group(5)
|
2020-03-25 16:36:00 +03:00
|
|
|
except OSError as e:
|
|
|
|
# This occurs if the subprocess couldn't be started
|
|
|
|
self.log.error("Could not run the http3 server: %s" % (str(e)))
|
|
|
|
|
|
|
|
def shutdownHttp3Server(self):
|
|
|
|
"""
|
|
|
|
Shutdown our http3Server process, if it exists
|
|
|
|
"""
|
2020-07-09 19:49:48 +03:00
|
|
|
for name, proc in six.iteritems(self.http3ServerProc):
|
2020-03-25 16:36:00 +03:00
|
|
|
self.log.info("%s server shutting down ..." % name)
|
|
|
|
if proc.poll() is not None:
|
|
|
|
self.log.info("Http3 server %s already dead %s" % (name, proc.poll()))
|
|
|
|
else:
|
|
|
|
proc.terminate()
|
|
|
|
retries = 0
|
|
|
|
while proc.poll() is None:
|
|
|
|
time.sleep(0.1)
|
|
|
|
retries += 1
|
|
|
|
if retries > 40:
|
|
|
|
self.log.info("Killing proc")
|
|
|
|
proc.kill()
|
|
|
|
break
|
|
|
|
|
|
|
|
def dumpOutput(fd, label):
|
|
|
|
firstTime = True
|
|
|
|
for msg in fd:
|
|
|
|
if firstTime:
|
|
|
|
firstTime = False
|
|
|
|
self.log.info("Process %s" % label)
|
|
|
|
self.log.info(msg)
|
2020-10-26 21:34:53 +03:00
|
|
|
|
2020-03-25 16:36:00 +03:00
|
|
|
dumpOutput(proc.stdout, "stdout")
|
|
|
|
dumpOutput(proc.stderr, "stderr")
|
2020-07-23 15:18:24 +03:00
|
|
|
self.http3ServerProc = {}
|
2020-03-25 16:36:00 +03:00
|
|
|
|
2013-07-20 06:27:14 +04:00
|
|
|
def buildXpcsRunArgs(self):
|
|
|
|
"""
|
|
|
|
Add arguments to run the test or make it interactive.
|
|
|
|
"""
|
|
|
|
if self.interactive:
|
|
|
|
self.xpcsRunArgs = [
|
2017-09-14 16:51:27 +03:00
|
|
|
"-e",
|
|
|
|
'print("To start the test, type |_execute_test();|.");',
|
|
|
|
"-i",
|
|
|
|
]
|
2013-07-20 06:27:14 +04:00
|
|
|
else:
|
|
|
|
self.xpcsRunArgs = ["-e", "_execute_test(); quit(0);"]
|
|
|
|
|
|
|
|
def addTestResults(self, test):
|
|
|
|
self.passCount += test.passCount
|
|
|
|
self.failCount += test.failCount
|
|
|
|
self.todoCount += test.todoCount
|
2014-10-22 23:53:42 +04:00
|
|
|
|
2019-08-07 01:13:50 +03:00
|
|
|
def updateMozinfo(self, prefs, options):
|
2017-08-16 15:55:53 +03:00
|
|
|
# Handle filenames in mozInfo
|
|
|
|
if not isinstance(self.mozInfo, dict):
|
|
|
|
mozInfoFile = self.mozInfo
|
|
|
|
if not os.path.isfile(mozInfoFile):
|
2017-09-14 16:51:27 +03:00
|
|
|
self.log.error(
|
|
|
|
"Error: couldn't find mozinfo.json at '%s'. Perhaps you "
|
|
|
|
"need to use --build-info-json?" % mozInfoFile
|
|
|
|
)
|
2017-08-16 15:55:53 +03:00
|
|
|
return False
|
|
|
|
self.mozInfo = json.load(open(mozInfoFile))
|
|
|
|
|
|
|
|
# mozinfo.info is used as kwargs. Some builds are done with
|
|
|
|
# an older Python that can't handle Unicode keys in kwargs.
|
|
|
|
# All of the keys in question should be ASCII.
|
|
|
|
fixedInfo = {}
|
|
|
|
for k, v in self.mozInfo.items():
|
2020-07-09 19:49:48 +03:00
|
|
|
if isinstance(k, bytes):
|
|
|
|
k = k.decode("utf-8")
|
2017-08-16 15:55:53 +03:00
|
|
|
fixedInfo[k] = v
|
|
|
|
self.mozInfo = fixedInfo
|
|
|
|
|
2019-05-24 21:41:14 +03:00
|
|
|
self.mozInfo["fission"] = prefs.get("fission.autostart", False)
|
2019-10-09 02:47:48 +03:00
|
|
|
|
2021-06-15 11:52:39 +03:00
|
|
|
self.mozInfo["serviceworker_e10s"] = True
|
2019-10-09 02:47:48 +03:00
|
|
|
|
2019-08-07 01:13:50 +03:00
|
|
|
self.mozInfo["verify"] = options.get("verify", False)
|
2019-06-29 12:51:14 +03:00
|
|
|
self.mozInfo["webrender"] = self.enable_webrender
|
2018-11-09 01:08:16 +03:00
|
|
|
|
2020-05-26 17:30:26 +03:00
|
|
|
self.mozInfo["socketprocess_networking"] = prefs.get(
|
|
|
|
"network.http.network_access_on_socket_process.enabled", False
|
|
|
|
)
|
|
|
|
|
2017-08-16 15:55:53 +03:00
|
|
|
mozinfo.update(self.mozInfo)
|
|
|
|
|
|
|
|
return True
|
|
|
|
|
2020-07-30 23:27:14 +03:00
|
|
|
def runSelfTest(self):
|
|
|
|
import selftest
|
|
|
|
import unittest
|
2020-10-26 21:34:53 +03:00
|
|
|
|
2020-07-30 23:27:14 +03:00
|
|
|
this = self
|
|
|
|
|
|
|
|
class XPCShellTestsTests(selftest.XPCShellTestsTests):
|
|
|
|
def __init__(self, name):
|
|
|
|
unittest.TestCase.__init__(self, name)
|
|
|
|
self.testing_modules = this.testingModulesDir
|
|
|
|
self.xpcshellBin = this.xpcshell
|
|
|
|
self.utility_path = this.utility_path
|
|
|
|
self.symbols_path = this.symbolsPath
|
|
|
|
|
|
|
|
old_info = dict(mozinfo.info)
|
|
|
|
try:
|
|
|
|
suite = unittest.TestLoader().loadTestsFromTestCase(XPCShellTestsTests)
|
|
|
|
return unittest.TextTestRunner(verbosity=2).run(suite).wasSuccessful()
|
|
|
|
finally:
|
|
|
|
# The self tests modify mozinfo, so we need to reset it.
|
|
|
|
mozinfo.info.clear()
|
|
|
|
mozinfo.update(old_info)
|
|
|
|
|
2017-08-15 18:06:16 +03:00
|
|
|
def runTests(self, options, testClass=XPCShellTestThread, mobileArgs=None):
|
|
|
|
"""
|
|
|
|
Run xpcshell tests.
|
2013-02-21 00:45:14 +04:00
|
|
|
"""
|
|
|
|
|
|
|
|
global gotSIGINT
|
|
|
|
|
2017-08-16 15:55:55 +03:00
|
|
|
# Number of times to repeat test(s) in --verify mode
|
2017-09-26 22:20:28 +03:00
|
|
|
VERIFY_REPEAT = 10
|
2017-08-16 15:55:55 +03:00
|
|
|
|
2017-08-15 18:06:16 +03:00
|
|
|
if isinstance(options, Namespace):
|
|
|
|
options = vars(options)
|
|
|
|
|
2013-02-21 00:45:14 +04:00
|
|
|
# Try to guess modules directory.
|
|
|
|
# This somewhat grotesque hack allows the buildbot machines to find the
|
|
|
|
# modules directory without having to configure the buildbot hosts. This
|
|
|
|
# code path should never be executed in local runs because the build system
|
|
|
|
# should always set this argument.
|
2017-08-15 18:06:16 +03:00
|
|
|
if not options.get("testingModulesDir"):
|
2015-09-18 17:00:40 +03:00
|
|
|
possible = os.path.join(here, os.path.pardir, "modules")
|
2013-02-21 00:45:14 +04:00
|
|
|
|
|
|
|
if os.path.isdir(possible):
|
|
|
|
testingModulesDir = possible
|
|
|
|
|
2017-08-15 18:06:16 +03:00
|
|
|
if options.get("rerun_failures"):
|
|
|
|
if os.path.exists(options.get("failure_manifest")):
|
2017-09-14 16:51:27 +03:00
|
|
|
rerun_manifest = os.path.join(
|
|
|
|
os.path.dirname(options["failure_manifest"]), "rerun.ini"
|
|
|
|
)
|
2017-08-15 18:06:16 +03:00
|
|
|
shutil.copyfile(options["failure_manifest"], rerun_manifest)
|
|
|
|
os.remove(options["failure_manifest"])
|
2015-08-27 15:05:50 +03:00
|
|
|
else:
|
2020-04-07 18:19:37 +03:00
|
|
|
self.log.error("No failures were found to re-run.")
|
2015-08-27 15:05:50 +03:00
|
|
|
sys.exit(1)
|
|
|
|
|
2017-08-15 18:06:16 +03:00
|
|
|
if options.get("testingModulesDir"):
|
2013-02-21 00:45:14 +04:00
|
|
|
# The resource loader expects native paths. Depending on how we were
|
|
|
|
# invoked, a UNIX style path may sneak in on Windows. We try to
|
|
|
|
# normalize that.
|
2017-08-15 18:06:16 +03:00
|
|
|
testingModulesDir = os.path.normpath(options["testingModulesDir"])
|
2013-02-21 00:45:14 +04:00
|
|
|
|
|
|
|
if not os.path.isabs(testingModulesDir):
|
|
|
|
testingModulesDir = os.path.abspath(testingModulesDir)
|
|
|
|
|
|
|
|
if not testingModulesDir.endswith(os.path.sep):
|
|
|
|
testingModulesDir += os.path.sep
|
|
|
|
|
2014-07-02 15:52:00 +04:00
|
|
|
self.debuggerInfo = None
|
|
|
|
|
2017-08-15 18:06:16 +03:00
|
|
|
if options.get("debugger"):
|
|
|
|
self.debuggerInfo = mozdebug.get_debugger_info(
|
|
|
|
options.get("debugger"),
|
2017-09-14 16:51:27 +03:00
|
|
|
options.get("debuggerArgs"),
|
|
|
|
options.get("debuggerInteractive"),
|
|
|
|
)
|
2014-07-02 15:52:00 +04:00
|
|
|
|
2014-11-29 02:40:58 +03:00
|
|
|
self.jsDebuggerInfo = None
|
2017-08-15 18:06:16 +03:00
|
|
|
if options.get("jsDebugger"):
|
2014-11-29 02:40:58 +03:00
|
|
|
# A namedtuple let's us keep .port instead of ['port']
|
|
|
|
JSDebuggerInfo = namedtuple("JSDebuggerInfo", ["port"])
|
2017-08-15 18:06:16 +03:00
|
|
|
self.jsDebuggerInfo = JSDebuggerInfo(port=options["jsDebuggerPort"])
|
2020-10-26 21:34:53 +03:00
|
|
|
|
2017-08-15 18:06:16 +03:00
|
|
|
self.xpcshell = options.get("xpcshell")
|
2020-07-21 23:47:27 +03:00
|
|
|
self.http3server = options.get("http3server")
|
2017-08-15 18:06:16 +03:00
|
|
|
self.xrePath = options.get("xrePath")
|
|
|
|
self.utility_path = options.get("utility_path")
|
|
|
|
self.appPath = options.get("appPath")
|
|
|
|
self.symbolsPath = options.get("symbolsPath")
|
|
|
|
self.tempDir = os.path.normpath(options.get("tempDir") or tempfile.gettempdir())
|
|
|
|
self.manifest = options.get("manifest")
|
|
|
|
self.dump_tests = options.get("dump_tests")
|
|
|
|
self.interactive = options.get("interactive")
|
|
|
|
self.verbose = options.get("verbose")
|
2019-05-31 18:54:58 +03:00
|
|
|
self.verboseIfFails = options.get("verboseIfFails")
|
2017-08-15 18:06:16 +03:00
|
|
|
self.keepGoing = options.get("keepGoing")
|
|
|
|
self.logfiles = options.get("logfiles")
|
2020-07-30 23:27:14 +03:00
|
|
|
self.totalChunks = options.get("totalChunks", 1)
|
2017-08-15 18:06:16 +03:00
|
|
|
self.thisChunk = options.get("thisChunk")
|
|
|
|
self.profileName = options.get("profileName") or "xpcshell"
|
|
|
|
self.mozInfo = options.get("mozInfo")
|
2013-02-21 00:45:14 +04:00
|
|
|
self.testingModulesDir = testingModulesDir
|
2017-08-15 18:06:16 +03:00
|
|
|
self.pluginsPath = options.get("pluginsPath")
|
|
|
|
self.sequential = options.get("sequential")
|
|
|
|
self.failure_manifest = options.get("failure_manifest")
|
|
|
|
self.threadCount = options.get("threadCount") or NUM_THREADS
|
|
|
|
self.jscovdir = options.get("jscovdir")
|
2019-06-29 12:50:20 +03:00
|
|
|
self.enable_webrender = options.get("enable_webrender")
|
2020-04-08 21:19:23 +03:00
|
|
|
self.headless = options.get("headless")
|
2021-01-23 00:14:27 +03:00
|
|
|
self.runFailures = options.get("runFailures")
|
2021-02-22 22:38:12 +03:00
|
|
|
self.timeoutAsPass = options.get("timeoutAsPass")
|
|
|
|
self.crashAsPass = options.get("crashAsPass")
|
2013-02-21 00:45:14 +04:00
|
|
|
|
|
|
|
self.testCount = 0
|
|
|
|
self.passCount = 0
|
|
|
|
self.failCount = 0
|
|
|
|
self.todoCount = 0
|
|
|
|
|
|
|
|
self.setAbsPath()
|
2018-11-09 01:08:16 +03:00
|
|
|
prefs = self.buildPrefsFile(options.get("extraPrefs") or [])
|
2013-02-21 00:45:14 +04:00
|
|
|
self.buildXpcsRunArgs()
|
|
|
|
|
2013-07-20 06:27:14 +04:00
|
|
|
self.event = Event()
|
|
|
|
|
2019-08-07 01:13:50 +03:00
|
|
|
if not self.updateMozinfo(prefs, options):
|
2017-08-16 15:55:53 +03:00
|
|
|
return False
|
2016-11-08 23:57:21 +03:00
|
|
|
|
2020-07-30 23:27:14 +03:00
|
|
|
if options.get("self_test"):
|
|
|
|
if not self.runSelfTest():
|
|
|
|
return False
|
|
|
|
|
2020-02-05 17:53:56 +03:00
|
|
|
if (
|
|
|
|
"tsan" in self.mozInfo
|
|
|
|
and self.mozInfo["tsan"]
|
|
|
|
and not options.get("threadCount")
|
|
|
|
):
|
|
|
|
# TSan requires significantly more memory, so reduce the amount of parallel
|
|
|
|
# tests we run to avoid OOMs and timeouts.
|
2020-12-11 19:05:25 +03:00
|
|
|
# pylint --py3k W1619
|
2020-02-05 17:53:56 +03:00
|
|
|
self.threadCount = self.threadCount / 2
|
|
|
|
|
2015-06-11 21:21:13 +03:00
|
|
|
self.stack_fixer_function = None
|
2016-11-09 18:32:46 +03:00
|
|
|
if self.utility_path and os.path.exists(self.utility_path):
|
2017-09-14 16:51:27 +03:00
|
|
|
self.stack_fixer_function = get_stack_fixer_function(
|
|
|
|
self.utility_path, self.symbolsPath
|
|
|
|
)
|
2015-06-11 21:21:13 +03:00
|
|
|
|
2013-11-21 21:26:28 +04:00
|
|
|
# buildEnvironment() needs mozInfo, so we call it after mozInfo is initialized.
|
|
|
|
self.buildEnvironment()
|
|
|
|
|
2013-02-21 00:45:14 +04:00
|
|
|
# The appDirKey is a optional entry in either the default or individual test
|
|
|
|
# sections that defines a relative application directory for test runs. If
|
|
|
|
# defined we pass 'grePath/$appDirKey' for the -a parameter of the xpcshell
|
|
|
|
# test harness.
|
|
|
|
appDirKey = None
|
|
|
|
if "appname" in self.mozInfo:
|
|
|
|
appDirKey = self.mozInfo["appname"] + "-appdir"
|
|
|
|
|
2020-01-19 23:28:10 +03:00
|
|
|
# We have to do this before we run tests that depend on having the node
|
|
|
|
# http/2 server.
|
|
|
|
self.trySetupNode()
|
|
|
|
|
2020-03-25 16:36:00 +03:00
|
|
|
self.startHttp3Server()
|
|
|
|
|
2013-02-21 00:45:14 +04:00
|
|
|
pStdout, pStderr = self.getPipes()
|
|
|
|
|
2017-09-14 16:51:27 +03:00
|
|
|
self.buildTestList(
|
|
|
|
options.get("test_tags"), options.get("testPaths"), options.get("verify")
|
|
|
|
)
|
2013-08-21 03:07:33 +04:00
|
|
|
if self.singleFile:
|
|
|
|
self.sequential = True
|
2013-02-21 00:45:14 +04:00
|
|
|
|
2017-08-15 18:06:16 +03:00
|
|
|
if options.get("shuffle"):
|
2013-02-21 00:45:14 +04:00
|
|
|
random.shuffle(self.alltests)
|
|
|
|
|
2013-07-20 06:27:14 +04:00
|
|
|
self.cleanup_dir_list = []
|
|
|
|
|
|
|
|
kwargs = {
|
|
|
|
"appPath": self.appPath,
|
|
|
|
"xrePath": self.xrePath,
|
2016-11-09 18:32:46 +03:00
|
|
|
"utility_path": self.utility_path,
|
2013-07-20 06:27:14 +04:00
|
|
|
"testingModulesDir": self.testingModulesDir,
|
|
|
|
"debuggerInfo": self.debuggerInfo,
|
2014-11-29 02:40:58 +03:00
|
|
|
"jsDebuggerInfo": self.jsDebuggerInfo,
|
2013-07-20 06:27:14 +04:00
|
|
|
"pluginsPath": self.pluginsPath,
|
|
|
|
"httpdJSPath": self.httpdJSPath,
|
|
|
|
"headJSPath": self.headJSPath,
|
2016-05-26 22:09:46 +03:00
|
|
|
"tempDir": self.tempDir,
|
2013-07-20 06:27:14 +04:00
|
|
|
"testharnessdir": self.testharnessdir,
|
|
|
|
"profileName": self.profileName,
|
|
|
|
"singleFile": self.singleFile,
|
2017-09-14 16:51:27 +03:00
|
|
|
"env": self.env, # making a copy of this in the testthreads
|
2013-07-20 06:27:14 +04:00
|
|
|
"symbolsPath": self.symbolsPath,
|
|
|
|
"logfiles": self.logfiles,
|
|
|
|
"xpcshell": self.xpcshell,
|
|
|
|
"xpcsRunArgs": self.xpcsRunArgs,
|
2015-08-27 15:05:50 +03:00
|
|
|
"failureManifest": self.failure_manifest,
|
2016-06-08 16:41:04 +03:00
|
|
|
"jscovdir": self.jscovdir,
|
2014-10-22 23:53:42 +04:00
|
|
|
"harness_timeout": self.harness_timeout,
|
2015-06-11 21:21:13 +03:00
|
|
|
"stack_fixer_function": self.stack_fixer_function,
|
2017-08-15 18:06:16 +03:00
|
|
|
"event": self.event,
|
|
|
|
"cleanup_dir_list": self.cleanup_dir_list,
|
|
|
|
"pStdout": pStdout,
|
|
|
|
"pStderr": pStderr,
|
|
|
|
"keep_going": self.keepGoing,
|
|
|
|
"log": self.log,
|
|
|
|
"interactive": self.interactive,
|
2018-10-26 20:46:03 +03:00
|
|
|
"app_dir_key": appDirKey,
|
2020-05-11 19:54:14 +03:00
|
|
|
"rootPrefsFile": self.prefsFile,
|
|
|
|
"extraPrefs": options.get("extraPrefs") or [],
|
2019-05-31 18:54:58 +03:00
|
|
|
"verboseIfFails": self.verboseIfFails,
|
2020-04-08 21:19:23 +03:00
|
|
|
"headless": self.headless,
|
2021-02-22 22:38:12 +03:00
|
|
|
"runFailures": self.runFailures,
|
|
|
|
"timeoutAsPass": self.timeoutAsPass,
|
|
|
|
"crashAsPass": self.crashAsPass,
|
2013-07-20 06:27:14 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if self.sequential:
|
|
|
|
# Allow user to kill hung xpcshell subprocess with SIGINT
|
|
|
|
# when we are only running tests sequentially.
|
|
|
|
signal.signal(signal.SIGINT, markGotSIGINT)
|
2013-02-21 00:45:14 +04:00
|
|
|
|
2013-07-20 06:27:14 +04:00
|
|
|
if self.debuggerInfo:
|
|
|
|
# Force a sequential run
|
|
|
|
self.sequential = True
|
|
|
|
|
|
|
|
# If we have an interactive debugger, disable SIGINT entirely.
|
2014-07-02 15:52:00 +04:00
|
|
|
if self.debuggerInfo.interactive:
|
2013-07-20 06:27:14 +04:00
|
|
|
signal.signal(signal.SIGINT, lambda signum, frame: None)
|
|
|
|
|
2015-04-10 21:45:52 +03:00
|
|
|
if "lldb" in self.debuggerInfo.path:
|
|
|
|
# Ask people to start debugging using 'process launch', see bug 952211.
|
|
|
|
self.log.info(
|
|
|
|
"It appears that you're using LLDB to debug this test. "
|
2017-09-14 16:51:27 +03:00
|
|
|
+ "Please use the 'process launch' command instead of "
|
|
|
|
"the 'run' command to start xpcshell."
|
|
|
|
)
|
2015-04-10 21:45:52 +03:00
|
|
|
|
2014-11-29 02:40:58 +03:00
|
|
|
if self.jsDebuggerInfo:
|
|
|
|
# The js debugger magic needs more work to do the right thing
|
|
|
|
# if debugging multiple files.
|
|
|
|
if len(self.alltests) != 1:
|
|
|
|
self.log.error(
|
|
|
|
"Error: --jsdebugger can only be used with a single test!"
|
|
|
|
)
|
|
|
|
return False
|
|
|
|
|
2015-11-25 14:38:20 +03:00
|
|
|
# The test itself needs to know whether it is a tsan build, since
|
|
|
|
# that has an effect on interpretation of the process return value.
|
|
|
|
usingTSan = "tsan" in self.mozInfo and self.mozInfo["tsan"]
|
|
|
|
|
2013-07-20 06:27:14 +04:00
|
|
|
# create a queue of all tests that will run
|
|
|
|
tests_queue = deque()
|
|
|
|
# also a list for the tests that need to be run sequentially
|
|
|
|
sequential_tests = []
|
2017-08-16 15:55:55 +03:00
|
|
|
status = None
|
|
|
|
if not options.get("verify"):
|
|
|
|
for test_object in self.alltests:
|
|
|
|
# Test identifiers are provided for the convenience of logging. These
|
|
|
|
# start as path names but are rewritten in case tests from the same path
|
|
|
|
# are re-run.
|
2014-10-22 23:53:42 +04:00
|
|
|
|
2017-08-16 15:55:55 +03:00
|
|
|
path = test_object["path"]
|
2014-10-22 23:53:42 +04:00
|
|
|
|
2017-08-16 15:55:55 +03:00
|
|
|
if self.singleFile and not path.endswith(self.singleFile):
|
|
|
|
continue
|
2013-02-21 00:45:14 +04:00
|
|
|
|
2017-08-16 15:55:55 +03:00
|
|
|
self.testCount += 1
|
2013-02-21 00:45:14 +04:00
|
|
|
|
2017-09-14 16:51:27 +03:00
|
|
|
test = testClass(
|
|
|
|
test_object,
|
|
|
|
verbose=self.verbose or test_object.get("verbose") == "true",
|
|
|
|
usingTSan=usingTSan,
|
|
|
|
mobileArgs=mobileArgs,
|
|
|
|
**kwargs
|
|
|
|
)
|
2017-08-16 15:55:55 +03:00
|
|
|
if "run-sequentially" in test_object or self.sequential:
|
|
|
|
sequential_tests.append(test)
|
|
|
|
else:
|
|
|
|
tests_queue.append(test)
|
|
|
|
|
|
|
|
status = self.runTestList(
|
|
|
|
tests_queue, sequential_tests, testClass, mobileArgs, **kwargs
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
#
|
|
|
|
# Test verification: Run each test many times, in various configurations,
|
|
|
|
# in hopes of finding intermittent failures.
|
|
|
|
#
|
2012-02-28 07:53:00 +04:00
|
|
|
|
2017-08-16 15:55:55 +03:00
|
|
|
def step1():
|
|
|
|
# Run tests sequentially. Parallel mode would also work, except that
|
|
|
|
# the logging system gets confused when 2 or more tests with the same
|
|
|
|
# name run at the same time.
|
|
|
|
sequential_tests = []
|
2020-07-09 19:49:48 +03:00
|
|
|
for i in range(VERIFY_REPEAT):
|
2017-08-16 15:55:55 +03:00
|
|
|
self.testCount += 1
|
2017-09-14 16:51:27 +03:00
|
|
|
test = testClass(
|
|
|
|
test_object, retry=False, mobileArgs=mobileArgs, **kwargs
|
|
|
|
)
|
2017-08-16 15:55:55 +03:00
|
|
|
sequential_tests.append(test)
|
2017-09-14 16:51:27 +03:00
|
|
|
status = self.runTestList(
|
|
|
|
tests_queue, sequential_tests, testClass, mobileArgs, **kwargs
|
|
|
|
)
|
2017-08-16 15:55:55 +03:00
|
|
|
return status
|
|
|
|
|
2017-09-26 22:20:28 +03:00
|
|
|
def step2():
|
|
|
|
# Run tests sequentially, with MOZ_CHAOSMODE enabled.
|
|
|
|
sequential_tests = []
|
2020-09-03 00:13:40 +03:00
|
|
|
self.env["MOZ_CHAOSMODE"] = "0xfb"
|
2020-07-09 19:49:48 +03:00
|
|
|
for i in range(VERIFY_REPEAT):
|
2017-09-26 22:20:28 +03:00
|
|
|
self.testCount += 1
|
|
|
|
test = testClass(
|
|
|
|
test_object, retry=False, mobileArgs=mobileArgs, **kwargs
|
|
|
|
)
|
|
|
|
sequential_tests.append(test)
|
|
|
|
status = self.runTestList(
|
|
|
|
tests_queue, sequential_tests, testClass, mobileArgs, **kwargs
|
|
|
|
)
|
|
|
|
return status
|
|
|
|
|
2017-08-16 15:55:55 +03:00
|
|
|
steps = [
|
|
|
|
("1. Run each test %d times, sequentially." % VERIFY_REPEAT, step1),
|
2017-09-26 22:20:28 +03:00
|
|
|
(
|
|
|
|
"2. Run each test %d times, sequentially, in chaos mode."
|
|
|
|
% VERIFY_REPEAT,
|
|
|
|
step2,
|
|
|
|
),
|
2017-08-16 15:55:55 +03:00
|
|
|
]
|
|
|
|
startTime = datetime.now()
|
|
|
|
maxTime = timedelta(seconds=options["verifyMaxTime"])
|
|
|
|
for test_object in self.alltests:
|
|
|
|
stepResults = {}
|
|
|
|
for (descr, step) in steps:
|
|
|
|
stepResults[descr] = "not run / incomplete"
|
|
|
|
finalResult = "PASSED"
|
|
|
|
for (descr, step) in steps:
|
|
|
|
if (datetime.now() - startTime) > maxTime:
|
|
|
|
self.log.info(
|
|
|
|
"::: Test verification is taking too long: Giving up!"
|
|
|
|
)
|
2017-09-14 16:51:27 +03:00
|
|
|
self.log.info(
|
|
|
|
"::: So far, all checks passed, but not "
|
|
|
|
"all checks were run."
|
|
|
|
)
|
2017-08-16 15:55:55 +03:00
|
|
|
break
|
|
|
|
self.log.info(":::")
|
|
|
|
self.log.info('::: Running test verification step "%s"...' % descr)
|
|
|
|
self.log.info(":::")
|
|
|
|
status = step()
|
2017-09-14 16:51:27 +03:00
|
|
|
if status is not True:
|
2017-08-16 15:55:55 +03:00
|
|
|
stepResults[descr] = "FAIL"
|
|
|
|
finalResult = "FAILED!"
|
|
|
|
break
|
|
|
|
stepResults[descr] = "Pass"
|
|
|
|
self.log.info(":::")
|
|
|
|
self.log.info(
|
|
|
|
"::: Test verification summary for: %s" % test_object["path"]
|
|
|
|
)
|
|
|
|
self.log.info(":::")
|
|
|
|
for descr in sorted(stepResults.keys()):
|
|
|
|
self.log.info("::: %s : %s" % (descr, stepResults[descr]))
|
|
|
|
self.log.info(":::")
|
|
|
|
self.log.info("::: Test verification %s" % finalResult)
|
|
|
|
self.log.info(":::")
|
2018-02-15 10:40:00 +03:00
|
|
|
|
|
|
|
self.shutdownNode()
|
2020-03-25 16:36:00 +03:00
|
|
|
self.shutdownHttp3Server()
|
2017-08-16 15:55:53 +03:00
|
|
|
|
|
|
|
return status
|
|
|
|
|
2021-03-25 00:49:39 +03:00
|
|
|
def start_test(self, test):
|
|
|
|
test.start()
|
|
|
|
|
|
|
|
def test_ended(self, test):
|
|
|
|
pass
|
|
|
|
|
2017-08-16 15:55:53 +03:00
|
|
|
def runTestList(
|
|
|
|
self, tests_queue, sequential_tests, testClass, mobileArgs, **kwargs
|
|
|
|
):
|
|
|
|
|
2013-08-21 03:07:33 +04:00
|
|
|
if self.sequential:
|
2014-10-22 23:53:42 +04:00
|
|
|
self.log.info("Running tests sequentially.")
|
2013-07-20 06:27:14 +04:00
|
|
|
else:
|
2017-03-06 07:39:00 +03:00
|
|
|
self.log.info("Using at most %d threads." % self.threadCount)
|
2013-07-20 06:27:14 +04:00
|
|
|
|
2017-03-06 07:39:00 +03:00
|
|
|
# keep a set of threadCount running tests and start running the
|
|
|
|
# tests in the queue at most threadCount at a time
|
2013-07-20 06:27:14 +04:00
|
|
|
running_tests = set()
|
|
|
|
keep_going = True
|
|
|
|
exceptions = []
|
|
|
|
tracebacks = []
|
2017-08-16 15:55:53 +03:00
|
|
|
self.try_again_list = []
|
2017-02-22 22:34:29 +03:00
|
|
|
|
|
|
|
tests_by_manifest = defaultdict(list)
|
|
|
|
for test in self.alltests:
|
2020-06-04 17:50:15 +03:00
|
|
|
group = test["manifest"]
|
2020-06-23 18:10:39 +03:00
|
|
|
if "ancestor_manifest" in test:
|
|
|
|
ancestor_manifest = normsep(test["ancestor_manifest"])
|
|
|
|
# Only change the group id if ancestor is not the generated root manifest.
|
|
|
|
if "/" in ancestor_manifest:
|
|
|
|
group = "{}:{}".format(ancestor_manifest, group)
|
2020-06-04 17:50:15 +03:00
|
|
|
tests_by_manifest[group].append(test["id"])
|
|
|
|
|
2017-11-28 01:20:06 +03:00
|
|
|
self.log.suite_start(tests_by_manifest, name="xpcshell")
|
2014-10-22 23:53:42 +04:00
|
|
|
|
2013-07-20 06:27:14 +04:00
|
|
|
while tests_queue or running_tests:
|
|
|
|
# if we're not supposed to continue and all of the running tests
|
|
|
|
# are done, stop
|
|
|
|
if not keep_going and not running_tests:
|
2013-03-27 04:15:24 +04:00
|
|
|
break
|
|
|
|
|
2013-07-20 06:27:14 +04:00
|
|
|
# if there's room to run more tests, start running them
|
2017-03-06 07:39:00 +03:00
|
|
|
while (
|
|
|
|
keep_going and tests_queue and (len(running_tests) < self.threadCount)
|
|
|
|
):
|
2013-07-20 06:27:14 +04:00
|
|
|
test = tests_queue.popleft()
|
|
|
|
running_tests.add(test)
|
2021-03-25 00:49:39 +03:00
|
|
|
self.start_test(test)
|
2013-07-20 06:27:14 +04:00
|
|
|
|
|
|
|
# queue is full (for now) or no more new tests,
|
|
|
|
# process the finished tests so far
|
|
|
|
|
|
|
|
# wait for at least one of the tests to finish
|
|
|
|
self.event.wait(1)
|
|
|
|
self.event.clear()
|
|
|
|
|
|
|
|
# find what tests are done (might be more than 1)
|
|
|
|
done_tests = set()
|
|
|
|
for test in running_tests:
|
2013-09-02 00:02:51 +04:00
|
|
|
if test.done:
|
2021-03-25 00:49:39 +03:00
|
|
|
self.test_ended(test)
|
2013-07-20 06:27:14 +04:00
|
|
|
done_tests.add(test)
|
2017-09-14 16:51:27 +03:00
|
|
|
test.join(
|
2020-10-26 21:34:53 +03:00
|
|
|
1
|
2017-09-14 16:51:27 +03:00
|
|
|
) # join with timeout so we don't hang on blocked threads
|
2013-08-28 22:28:39 +04:00
|
|
|
# if the test had trouble, we will try running it again
|
|
|
|
# at the end of the run
|
2013-09-04 00:56:08 +04:00
|
|
|
if test.retry or test.is_alive():
|
|
|
|
# if the join call timed out, test.is_alive => True
|
2013-08-28 22:28:39 +04:00
|
|
|
self.try_again_list.append(test.test_object)
|
|
|
|
continue
|
2013-07-20 06:27:14 +04:00
|
|
|
# did the test encounter any exception?
|
|
|
|
if test.exception:
|
|
|
|
exceptions.append(test.exception)
|
|
|
|
tracebacks.append(test.traceback)
|
|
|
|
# we won't add any more tests, will just wait for
|
|
|
|
# the currently running ones to finish
|
|
|
|
keep_going = False
|
|
|
|
keep_going = keep_going and test.keep_going
|
|
|
|
self.addTestResults(test)
|
|
|
|
|
|
|
|
# make room for new tests to run
|
|
|
|
running_tests.difference_update(done_tests)
|
|
|
|
|
|
|
|
if keep_going:
|
|
|
|
# run the other tests sequentially
|
|
|
|
for test in sequential_tests:
|
|
|
|
if not keep_going:
|
2017-09-14 16:51:27 +03:00
|
|
|
self.log.error(
|
|
|
|
"TEST-UNEXPECTED-FAIL | Received SIGINT (control-C), so "
|
|
|
|
"stopped run. (Use --keep-going to keep running tests "
|
|
|
|
"after killing one with SIGINT)"
|
|
|
|
)
|
2013-07-20 06:27:14 +04:00
|
|
|
break
|
2013-08-28 22:28:39 +04:00
|
|
|
# we don't want to retry these tests
|
|
|
|
test.retry = False
|
2021-03-25 00:49:39 +03:00
|
|
|
self.start_test(test)
|
2013-07-20 06:27:14 +04:00
|
|
|
test.join()
|
2021-03-25 00:49:39 +03:00
|
|
|
self.test_ended(test)
|
2013-08-17 00:42:04 +04:00
|
|
|
self.addTestResults(test)
|
2013-07-20 06:27:14 +04:00
|
|
|
# did the test encounter any exception?
|
|
|
|
if test.exception:
|
2013-08-17 00:42:04 +04:00
|
|
|
exceptions.append(test.exception)
|
|
|
|
tracebacks.append(test.traceback)
|
|
|
|
break
|
2013-07-20 06:27:14 +04:00
|
|
|
keep_going = test.keep_going
|
|
|
|
|
2013-08-28 22:28:39 +04:00
|
|
|
# retry tests that failed when run in parallel
|
|
|
|
if self.try_again_list:
|
|
|
|
self.log.info("Retrying tests that failed when run in parallel.")
|
|
|
|
for test_object in self.try_again_list:
|
2017-08-15 18:06:16 +03:00
|
|
|
test = testClass(
|
|
|
|
test_object,
|
2017-09-14 16:51:27 +03:00
|
|
|
retry=False,
|
|
|
|
verbose=self.verbose,
|
|
|
|
mobileArgs=mobileArgs,
|
|
|
|
**kwargs
|
|
|
|
)
|
2021-03-25 00:49:39 +03:00
|
|
|
self.start_test(test)
|
2013-08-28 22:28:39 +04:00
|
|
|
test.join()
|
2021-03-25 00:49:39 +03:00
|
|
|
self.test_ended(test)
|
2013-08-28 22:28:39 +04:00
|
|
|
self.addTestResults(test)
|
|
|
|
# did the test encounter any exception?
|
|
|
|
if test.exception:
|
|
|
|
exceptions.append(test.exception)
|
|
|
|
tracebacks.append(test.traceback)
|
|
|
|
break
|
|
|
|
keep_going = test.keep_going
|
|
|
|
|
2013-07-20 06:27:14 +04:00
|
|
|
# restore default SIGINT behaviour
|
|
|
|
signal.signal(signal.SIGINT, signal.SIG_DFL)
|
|
|
|
|
2013-09-03 21:00:11 +04:00
|
|
|
# Clean up any slacker directories that might be lying around
|
2013-07-20 06:27:14 +04:00
|
|
|
# Some might fail because of windows taking too long to unlock them.
|
2020-07-02 04:06:01 +03:00
|
|
|
# We don't do anything if this fails because the test machines will have
|
2013-07-20 06:27:14 +04:00
|
|
|
# their $TEMP dirs cleaned up on reboot anyway.
|
|
|
|
for directory in self.cleanup_dir_list:
|
|
|
|
try:
|
|
|
|
shutil.rmtree(directory)
|
2018-01-31 22:32:08 +03:00
|
|
|
except Exception:
|
2014-10-22 23:53:42 +04:00
|
|
|
self.log.info("%s could not be cleaned up." % directory)
|
2013-07-20 06:27:14 +04:00
|
|
|
|
|
|
|
if exceptions:
|
2014-10-22 23:53:42 +04:00
|
|
|
self.log.info("Following exceptions were raised:")
|
2013-07-20 06:27:14 +04:00
|
|
|
for t in tracebacks:
|
|
|
|
self.log.error(t)
|
|
|
|
raise exceptions[0]
|
2012-04-26 04:12:33 +04:00
|
|
|
|
2020-10-16 19:37:45 +03:00
|
|
|
if self.testCount == 0 and os.environ.get("MOZ_AUTOMATION") != "1":
|
2014-10-22 23:53:42 +04:00
|
|
|
self.log.error("No tests run. Did you pass an invalid --test-path?")
|
2013-02-21 00:45:14 +04:00
|
|
|
self.failCount = 1
|
2010-01-15 20:22:54 +03:00
|
|
|
|
2021-02-22 22:38:12 +03:00
|
|
|
# doing this allows us to pass the mozharness parsers that
|
|
|
|
# report an orange job for failCount>0
|
|
|
|
if self.runFailures:
|
|
|
|
passed = self.passCount
|
|
|
|
self.passCount = self.failCount
|
|
|
|
self.failCount = passed
|
|
|
|
|
2013-02-21 00:45:14 +04:00
|
|
|
self.log.info("INFO | Result summary:")
|
|
|
|
self.log.info("INFO | Passed: %d" % self.passCount)
|
|
|
|
self.log.info("INFO | Failed: %d" % self.failCount)
|
|
|
|
self.log.info("INFO | Todo: %d" % self.todoCount)
|
2013-08-28 22:28:39 +04:00
|
|
|
self.log.info("INFO | Retried: %d" % len(self.try_again_list))
|
2009-07-31 23:58:42 +04:00
|
|
|
|
2017-09-14 16:51:27 +03:00
|
|
|
if gotSIGINT and not keep_going:
|
|
|
|
self.log.error(
|
|
|
|
"TEST-UNEXPECTED-FAIL | Received SIGINT (control-C), so stopped run. "
|
|
|
|
"(Use --keep-going to keep running tests after "
|
|
|
|
"killing one with SIGINT)"
|
|
|
|
)
|
2013-02-21 00:45:14 +04:00
|
|
|
return False
|
2012-02-28 07:53:00 +04:00
|
|
|
|
2014-10-22 23:53:42 +04:00
|
|
|
self.log.suite_end()
|
2021-02-22 22:38:12 +03:00
|
|
|
return self.runFailures or self.failCount == 0
|
2009-03-11 18:56:58 +03:00
|
|
|
|
2010-03-13 01:57:29 +03:00
|
|
|
|
|
|
|
def main():
|
2015-08-27 15:05:50 +03:00
|
|
|
parser = parser_desktop()
|
|
|
|
options = parser.parse_args()
|
2014-10-22 23:53:42 +04:00
|
|
|
|
2015-07-16 17:38:40 +03:00
|
|
|
log = commandline.setup_logging("XPCShell", options, {"tbpl": sys.stdout})
|
2014-10-22 23:53:42 +04:00
|
|
|
|
2015-08-27 15:05:50 +03:00
|
|
|
if options.xpcshell is None:
|
2020-07-21 20:10:51 +03:00
|
|
|
log.error("Must provide path to xpcshell using --xpcshell")
|
|
|
|
sys.exit(1)
|
2009-03-11 18:56:58 +03:00
|
|
|
|
2014-10-22 23:53:42 +04:00
|
|
|
xpcsh = XPCShellTests(log)
|
2009-10-20 03:12:09 +04:00
|
|
|
|
2013-02-21 00:45:14 +04:00
|
|
|
if options.interactive and not options.testPath:
|
2020-07-21 20:10:51 +03:00
|
|
|
log.error("Error: You must specify a test filename in interactive mode!")
|
2013-02-21 00:45:14 +04:00
|
|
|
sys.exit(1)
|
2009-03-11 18:56:58 +03:00
|
|
|
|
2017-08-15 18:06:16 +03:00
|
|
|
if not xpcsh.runTests(options):
|
2013-02-21 00:45:14 +04:00
|
|
|
sys.exit(1)
|
2009-03-11 18:56:58 +03:00
|
|
|
|
2017-09-14 16:51:27 +03:00
|
|
|
|
2009-03-11 18:56:58 +03:00
|
|
|
if __name__ == "__main__":
|
2013-02-21 00:45:14 +04:00
|
|
|
main()
|