зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1429463 - Prototype ./mach try coverage tool. r=ahal,marco
--HG-- rename : tools/tryselect/test/test_fuzzy.py => tools/tryselect/test/test_tasks.py
This commit is contained in:
Родитель
d850d799a0
Коммит
3546d45bf6
|
@ -235,6 +235,16 @@ class TrySelect(MachCommandBase):
|
||||||
at = AutoTry(self.topsrcdir, self._mach_context)
|
at = AutoTry(self.topsrcdir, self._mach_context)
|
||||||
return at.run(**kwargs)
|
return at.run(**kwargs)
|
||||||
|
|
||||||
|
@SubCommand('try',
|
||||||
|
'coverage',
|
||||||
|
description='Select tasks on try using coverage data',
|
||||||
|
parser=get_parser('coverage'))
|
||||||
|
def try_coverage(self, **kwargs):
|
||||||
|
"""Select which tasks to use using coverage data.
|
||||||
|
"""
|
||||||
|
from tryselect.selectors.coverage import run_coverage_try
|
||||||
|
return run_coverage_try(**kwargs)
|
||||||
|
|
||||||
@SubCommand('try',
|
@SubCommand('try',
|
||||||
'release',
|
'release',
|
||||||
description='Push the current tree to try, configured for a staging release.',
|
description='Push the current tree to try, configured for a staging release.',
|
||||||
|
|
|
@ -0,0 +1,367 @@
|
||||||
|
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||||
|
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||||
|
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||||
|
|
||||||
|
from __future__ import absolute_import, print_function, unicode_literals
|
||||||
|
|
||||||
|
import collections
|
||||||
|
import json
|
||||||
|
import hashlib
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
import sqlite3
|
||||||
|
import subprocess
|
||||||
|
import requests
|
||||||
|
import datetime
|
||||||
|
|
||||||
|
|
||||||
|
from mozboot.util import get_state_dir
|
||||||
|
from mozbuild.base import MozbuildObject
|
||||||
|
from mozpack.files import FileFinder
|
||||||
|
from moztest.resolve import TestResolver
|
||||||
|
from mozversioncontrol import get_repository_object
|
||||||
|
|
||||||
|
from ..cli import BaseTryParser
|
||||||
|
from ..tasks import generate_tasks, filter_tasks_by_paths
|
||||||
|
from ..push import push_to_try
|
||||||
|
|
||||||
|
here = os.path.abspath(os.path.dirname(__file__))
|
||||||
|
build = MozbuildObject.from_environment(cwd=here)
|
||||||
|
vcs = get_repository_object(build.topsrcdir)
|
||||||
|
|
||||||
|
root_hash = hashlib.sha256(os.path.abspath(build.topsrcdir)).hexdigest()
|
||||||
|
cache_dir = os.path.join(get_state_dir()[0], 'cache', root_hash, 'chunk_mapping')
|
||||||
|
if not os.path.isdir(cache_dir):
|
||||||
|
os.makedirs(cache_dir)
|
||||||
|
CHUNK_MAPPING_FILE = os.path.join(cache_dir, 'chunk_mapping.sqlite')
|
||||||
|
CHUNK_MAPPING_TAG_FILE = os.path.join(cache_dir, 'chunk_mapping_tag.json')
|
||||||
|
|
||||||
|
# Maps from platform names in the chunk_mapping sqlite database to respective
|
||||||
|
# substrings in task names.
|
||||||
|
PLATFORM_MAP = {
|
||||||
|
'linux': 'linux64/opt',
|
||||||
|
'windows': 'windows10-64/opt',
|
||||||
|
}
|
||||||
|
|
||||||
|
# List of platform/build type combinations that are included in pushes by |mach try coverage|.
|
||||||
|
OPT_TASK_PATTERNS = [
|
||||||
|
'macosx64/opt',
|
||||||
|
'windows10-64/opt',
|
||||||
|
'windows7-32/opt',
|
||||||
|
'android-em-4.3-arm7-api-16/opt',
|
||||||
|
'linux64/opt',
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
class CoverageParser(BaseTryParser):
|
||||||
|
name = 'coverage'
|
||||||
|
arguments = []
|
||||||
|
common_groups = ['push', 'task']
|
||||||
|
templates = ['artifact', 'env', 'rebuild', 'chemspill-prio']
|
||||||
|
|
||||||
|
|
||||||
|
def read_test_manifests():
|
||||||
|
'''Uses TestResolver to read all test manifests in the tree.
|
||||||
|
|
||||||
|
Returns a (tests, support_files_map) tuple that describes the tests in the tree:
|
||||||
|
tests - a set of test file paths
|
||||||
|
support_files_map - a dict that maps from each support file to a list with
|
||||||
|
test files that require them it
|
||||||
|
'''
|
||||||
|
test_resolver = TestResolver.from_environment(cwd=here)
|
||||||
|
file_finder = FileFinder(build.topsrcdir)
|
||||||
|
support_files_map = collections.defaultdict(list)
|
||||||
|
tests = set()
|
||||||
|
|
||||||
|
for test in test_resolver.resolve_tests(build.topsrcdir):
|
||||||
|
tests.add(test['file_relpath'])
|
||||||
|
if 'support-files' not in test:
|
||||||
|
continue
|
||||||
|
|
||||||
|
for support_file_pattern in test['support-files'].split():
|
||||||
|
# Get the pattern relative to topsrcdir.
|
||||||
|
if support_file_pattern.startswith('!/'):
|
||||||
|
support_file_pattern = support_file_pattern[2:]
|
||||||
|
elif support_file_pattern.startswith('/'):
|
||||||
|
support_file_pattern = support_file_pattern[1:]
|
||||||
|
else:
|
||||||
|
support_file_pattern = os.path.normpath(os.path.join(test['dir_relpath'],
|
||||||
|
support_file_pattern))
|
||||||
|
|
||||||
|
# If it doesn't have a glob, then it's a single file.
|
||||||
|
if '*' not in support_file_pattern:
|
||||||
|
# Simple case: single support file, just add it here.
|
||||||
|
support_files_map[support_file_pattern].append(test['file_relpath'])
|
||||||
|
continue
|
||||||
|
|
||||||
|
for support_file, _ in file_finder.find(support_file_pattern):
|
||||||
|
support_files_map[support_file].append(test['file_relpath'])
|
||||||
|
|
||||||
|
return tests, support_files_map
|
||||||
|
|
||||||
|
|
||||||
|
# TODO cache the output of this function
|
||||||
|
all_tests, all_support_files = read_test_manifests()
|
||||||
|
|
||||||
|
|
||||||
|
def download_coverage_mapping(base_revision):
|
||||||
|
try:
|
||||||
|
with open(CHUNK_MAPPING_TAG_FILE, 'r') as f:
|
||||||
|
tags = json.load(f)
|
||||||
|
if tags['target_revision'] == base_revision:
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
print('Base revision changed.')
|
||||||
|
except (IOError, ValueError):
|
||||||
|
print('Chunk mapping file not found.')
|
||||||
|
|
||||||
|
CHUNK_MAPPING_URL_TEMPLATE = 'https://index.taskcluster.net/v1/task/project.releng.services.project.production.shipit_code_coverage.{}/artifacts/public/chunk_mapping.tar.xz' # noqa
|
||||||
|
JSON_PUSHES_URL_TEMPLATE = 'https://hg.mozilla.org/mozilla-central/json-pushes?version=2&tipsonly=1&tochange={}&startdate={}' # noqa
|
||||||
|
|
||||||
|
# Get pushes from at most one month ago.
|
||||||
|
PUSH_HISTORY_DAYS = 30
|
||||||
|
delta = datetime.timedelta(days=PUSH_HISTORY_DAYS)
|
||||||
|
start_time = (datetime.datetime.now() - delta).strftime('%Y-%m-%d')
|
||||||
|
pushes_url = JSON_PUSHES_URL_TEMPLATE.format(base_revision, start_time)
|
||||||
|
pushes = requests.get(pushes_url).json()['pushes']
|
||||||
|
|
||||||
|
print('Looking for coverage data. This might take a minute or two.')
|
||||||
|
print('Base revision:', base_revision)
|
||||||
|
for push_id in sorted(pushes.keys())[::-1]:
|
||||||
|
rev = pushes[push_id]['changesets'][0]
|
||||||
|
url = CHUNK_MAPPING_URL_TEMPLATE.format(rev)
|
||||||
|
print('push id: {},\trevision: {}'.format(push_id, rev))
|
||||||
|
|
||||||
|
r = requests.head(url)
|
||||||
|
if not r.ok:
|
||||||
|
continue
|
||||||
|
|
||||||
|
print('Chunk mapping found, downloading...')
|
||||||
|
r = requests.get(url, stream=True)
|
||||||
|
|
||||||
|
CHUNK_MAPPING_ARCHIVE = os.path.join(build.topsrcdir, 'chunk_mapping.tar.xz')
|
||||||
|
with open(CHUNK_MAPPING_ARCHIVE, 'wb') as f:
|
||||||
|
r.raw.decode_content = True
|
||||||
|
shutil.copyfileobj(r.raw, f)
|
||||||
|
|
||||||
|
subprocess.check_call(['tar', '-xJf', CHUNK_MAPPING_ARCHIVE,
|
||||||
|
'-C', os.path.dirname(CHUNK_MAPPING_FILE)])
|
||||||
|
os.remove(CHUNK_MAPPING_ARCHIVE)
|
||||||
|
assert os.path.isfile(CHUNK_MAPPING_FILE)
|
||||||
|
with open(CHUNK_MAPPING_TAG_FILE, 'w') as f:
|
||||||
|
json.dump({'target_revision': base_revision,
|
||||||
|
'chunk_mapping_revision': rev,
|
||||||
|
'download_date': start_time},
|
||||||
|
f)
|
||||||
|
return
|
||||||
|
raise Exception('Could not find suitable coverage data.')
|
||||||
|
|
||||||
|
|
||||||
|
def is_a_test(cursor, path):
|
||||||
|
'''Checks the all_tests global and the chunk mapping database to see if a
|
||||||
|
given file is a test file.
|
||||||
|
'''
|
||||||
|
if path in all_tests:
|
||||||
|
return True
|
||||||
|
|
||||||
|
cursor.execute('SELECT COUNT(*) from chunk_to_test WHERE path=?', (path,))
|
||||||
|
if cursor.fetchone()[0]:
|
||||||
|
return True
|
||||||
|
|
||||||
|
cursor.execute('SELECT COUNT(*) from file_to_test WHERE test=?', (path,))
|
||||||
|
if cursor.fetchone()[0]:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def tests_covering_file(cursor, path):
|
||||||
|
'''Returns a set of tests that cover a given source file.
|
||||||
|
'''
|
||||||
|
cursor.execute('SELECT test FROM file_to_test WHERE source=?', (path,))
|
||||||
|
return set(e[0] for e in cursor.fetchall())
|
||||||
|
|
||||||
|
|
||||||
|
def tests_in_chunk(cursor, platform, suite):
|
||||||
|
'''Returns a set of tests that are contained in a given chunk.
|
||||||
|
'''
|
||||||
|
cursor.execute('SELECT path FROM chunk_to_test WHERE platform=? AND chunk=?',
|
||||||
|
(platform, suite))
|
||||||
|
# Because of bug 1480103, some entries in this table contain both a file name and a test name,
|
||||||
|
# separated by a space. With the split, only the file name is kept.
|
||||||
|
return set(e[0].split(' ')[0] for e in cursor.fetchall())
|
||||||
|
|
||||||
|
|
||||||
|
def chunks_covering_file(cursor, path):
|
||||||
|
'''Returns a set of (platform, suite) tuples with the chunks that cover a given source file.
|
||||||
|
'''
|
||||||
|
cursor.execute('SELECT platform, chunk FROM file_to_chunk WHERE path=?', (path,))
|
||||||
|
return set(cursor.fetchall())
|
||||||
|
|
||||||
|
|
||||||
|
def tests_supported_by_file(path):
|
||||||
|
'''Returns a set of tests that are using the given file as a support-file.
|
||||||
|
'''
|
||||||
|
return set(all_support_files[path])
|
||||||
|
|
||||||
|
|
||||||
|
def find_tests(changed_files):
|
||||||
|
'''Finds both individual tests and test chunks that should be run to test code changes.
|
||||||
|
Argument: a list of file paths relative to the source checkout.
|
||||||
|
|
||||||
|
Returns: a (test_files, test_chunks) tuple with two sets.
|
||||||
|
test_files - contains tests that should be run to verify changes to changed_files.
|
||||||
|
test_chunks - contains (platform, suite) tuples with chunks that should be
|
||||||
|
run. These chunnks do not support running a subset of the tests (like
|
||||||
|
cppunit or gtest), so the whole chunk must be run.
|
||||||
|
'''
|
||||||
|
test_files = set()
|
||||||
|
test_chunks = set()
|
||||||
|
files_no_coverage = set()
|
||||||
|
|
||||||
|
with sqlite3.connect(CHUNK_MAPPING_FILE) as conn:
|
||||||
|
c = conn.cursor()
|
||||||
|
for path in changed_files:
|
||||||
|
# If path is a test, add it to the list and continue.
|
||||||
|
if is_a_test(c, path):
|
||||||
|
test_files.add(path)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Look at the chunk mapping and add all tests that cover this file.
|
||||||
|
tests = tests_covering_file(c, path)
|
||||||
|
chunks = chunks_covering_file(c, path)
|
||||||
|
# If we found tests covering this, then it's not a support-file, so
|
||||||
|
# save these and continue.
|
||||||
|
if tests or chunks:
|
||||||
|
test_files |= tests
|
||||||
|
test_chunks |= chunks
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Check if the path is a support-file for any test, by querying test manifests.
|
||||||
|
tests = tests_supported_by_file(path)
|
||||||
|
if tests:
|
||||||
|
test_files |= tests
|
||||||
|
continue
|
||||||
|
|
||||||
|
# There is no coverage information for this file.
|
||||||
|
files_no_coverage.add(path)
|
||||||
|
|
||||||
|
files_covered = set(changed_files) - files_no_coverage
|
||||||
|
test_files = set(s.replace('\\', '/') for s in test_files)
|
||||||
|
|
||||||
|
_print_found_tests(files_covered, files_no_coverage, test_files, test_chunks)
|
||||||
|
|
||||||
|
remaining_test_chunks = set()
|
||||||
|
# For all test_chunks, try to find the tests contained by them in the
|
||||||
|
# chunk_to_test mapping.
|
||||||
|
for platform, suite in test_chunks:
|
||||||
|
tests = tests_in_chunk(c, platform, suite)
|
||||||
|
if tests:
|
||||||
|
for test in tests:
|
||||||
|
test_files.add(test.replace('\\', '/'))
|
||||||
|
else:
|
||||||
|
remaining_test_chunks.add((platform, suite))
|
||||||
|
|
||||||
|
return test_files, remaining_test_chunks
|
||||||
|
|
||||||
|
|
||||||
|
def _print_found_tests(files_covered, files_no_coverage, test_files, test_chunks):
|
||||||
|
'''Print a summary of what will be run to the user's terminal.
|
||||||
|
'''
|
||||||
|
files_covered = sorted(files_covered)
|
||||||
|
files_no_coverage = sorted(files_no_coverage)
|
||||||
|
test_files = sorted(test_files)
|
||||||
|
test_chunks = sorted(test_chunks)
|
||||||
|
|
||||||
|
if files_covered:
|
||||||
|
print('Found {} modified source files with test coverage:'.format(len(files_covered)))
|
||||||
|
for covered in files_covered:
|
||||||
|
print('\t', covered)
|
||||||
|
|
||||||
|
if files_no_coverage:
|
||||||
|
print('Found {} modified source files with no coverage:'.format(len(files_no_coverage)))
|
||||||
|
for f in files_no_coverage:
|
||||||
|
print('\t', f)
|
||||||
|
|
||||||
|
if not files_covered:
|
||||||
|
print('No modified source files are covered by tests.')
|
||||||
|
elif not files_no_coverage:
|
||||||
|
print('All modified source files are covered by tests.')
|
||||||
|
|
||||||
|
if test_files:
|
||||||
|
print('Running {} individual test files.'.format(len(test_files)))
|
||||||
|
else:
|
||||||
|
print('Could not find any individual tests to run.')
|
||||||
|
|
||||||
|
if test_chunks:
|
||||||
|
print('Running {} test chunks.'.format(len(test_chunks)))
|
||||||
|
for platform, chunk in test_chunks:
|
||||||
|
print('\t', platform, chunk)
|
||||||
|
else:
|
||||||
|
print('Could not find any test chunks to run.')
|
||||||
|
|
||||||
|
|
||||||
|
def filter_tasks_by_chunks(tasks, chunks):
|
||||||
|
'''Find all tasks that will run the given chunks.
|
||||||
|
'''
|
||||||
|
selected_tasks = set()
|
||||||
|
for platform, suite in chunks:
|
||||||
|
platform = PLATFORM_MAP.get(platform, platform)
|
||||||
|
match = False
|
||||||
|
for task in tasks:
|
||||||
|
# Suite names taken from the chunk mapping are not consistent with the
|
||||||
|
# task names, so we're using string inclusion to find the tests.
|
||||||
|
if platform in task and suite in task:
|
||||||
|
selected_tasks.add(task)
|
||||||
|
match = True
|
||||||
|
if not match:
|
||||||
|
print('Warning: no task found for chunk', platform, suite)
|
||||||
|
|
||||||
|
return list(selected_tasks)
|
||||||
|
|
||||||
|
|
||||||
|
def is_opt_task(task):
|
||||||
|
'''True if the task runs on a supported platform and build type combination.
|
||||||
|
This is used to remove -ccov/asan/pgo tasks, along with all /debug tasks.
|
||||||
|
'''
|
||||||
|
return any(platform in task for platform in OPT_TASK_PATTERNS)
|
||||||
|
|
||||||
|
|
||||||
|
def run_coverage_try(templates={}, full=False, parameters=None,
|
||||||
|
push=True, message='{msg}', **kwargs):
|
||||||
|
|
||||||
|
download_coverage_mapping(vcs.base_ref)
|
||||||
|
|
||||||
|
changed_sources = vcs.get_outgoing_files()
|
||||||
|
test_files, test_chunks = find_tests(changed_sources)
|
||||||
|
if not test_files and not test_chunks:
|
||||||
|
print('ERROR Could not find any tests or chunks to run.')
|
||||||
|
return 1
|
||||||
|
|
||||||
|
all_tasks = generate_tasks(parameters, full, root=build.topsrcdir)
|
||||||
|
|
||||||
|
tasks_by_chunks = filter_tasks_by_chunks(all_tasks, test_chunks)
|
||||||
|
tasks_by_path = filter_tasks_by_paths(all_tasks, test_files)
|
||||||
|
tasks = filter(is_opt_task, set(tasks_by_path + tasks_by_chunks))
|
||||||
|
|
||||||
|
if not tasks:
|
||||||
|
print('ERROR Did not find any matching tasks after filtering.')
|
||||||
|
return 1
|
||||||
|
test_count_message = ('{test_count} test file{test_plural} that ' +
|
||||||
|
'cover{test_singular} these changes ' +
|
||||||
|
'({task_count} task{task_plural} to be scheduled)').format(
|
||||||
|
test_count=len(test_files),
|
||||||
|
test_plural='' if len(test_files) == 1 else 's',
|
||||||
|
test_singular='s' if len(test_files) == 1 else '',
|
||||||
|
task_count=len(tasks),
|
||||||
|
task_plural='' if len(tasks) == 1 else 's')
|
||||||
|
print('Found ' + test_count_message)
|
||||||
|
|
||||||
|
# Set the test paths to be run by setting MOZHARNESS_TEST_PATHS.
|
||||||
|
path_env = {'MOZHARNESS_TEST_PATHS': ':'.join(test_files)}
|
||||||
|
templates.setdefault('env', {}).update(path_env)
|
||||||
|
|
||||||
|
# Build commit message.
|
||||||
|
msg = 'try coverage - ' + test_count_message
|
||||||
|
return push_to_try('coverage', message.format(msg=msg), tasks, templates, push=push,
|
||||||
|
closed_tree=kwargs['closed_tree'])
|
|
@ -6,19 +6,17 @@ from __future__ import absolute_import, print_function, unicode_literals
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import platform
|
import platform
|
||||||
import re
|
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
from distutils.spawn import find_executable
|
from distutils.spawn import find_executable
|
||||||
|
|
||||||
from mozboot.util import get_state_dir
|
from mozboot.util import get_state_dir
|
||||||
from mozterm import Terminal
|
from mozterm import Terminal
|
||||||
from moztest.resolve import TestResolver, get_suite_definition
|
|
||||||
from six import string_types
|
from six import string_types
|
||||||
|
|
||||||
from .. import preset as pset
|
from .. import preset as pset
|
||||||
from ..cli import BaseTryParser
|
from ..cli import BaseTryParser
|
||||||
from ..tasks import generate_tasks
|
from ..tasks import generate_tasks, filter_tasks_by_paths
|
||||||
from ..push import check_working_directory, push_to_try, vcs
|
from ..push import check_working_directory, push_to_try, vcs
|
||||||
|
|
||||||
terminal = Terminal()
|
terminal = Terminal()
|
||||||
|
@ -176,27 +174,6 @@ def format_header():
|
||||||
return FZF_HEADER.format(shortcuts=', '.join(shortcuts), t=terminal)
|
return FZF_HEADER.format(shortcuts=', '.join(shortcuts), t=terminal)
|
||||||
|
|
||||||
|
|
||||||
def filter_by_paths(tasks, paths):
|
|
||||||
resolver = TestResolver.from_environment(cwd=here)
|
|
||||||
run_suites, run_tests = resolver.resolve_metadata(paths)
|
|
||||||
flavors = set([(t['flavor'], t.get('subsuite')) for t in run_tests])
|
|
||||||
|
|
||||||
task_regexes = set()
|
|
||||||
for flavor, subsuite in flavors:
|
|
||||||
suite = get_suite_definition(flavor, subsuite, strict=True)
|
|
||||||
if 'task_regex' not in suite:
|
|
||||||
print("warning: no tasks could be resolved from flavor '{}'{}".format(
|
|
||||||
flavor, " and subsuite '{}'".format(subsuite) if subsuite else ""))
|
|
||||||
continue
|
|
||||||
|
|
||||||
task_regexes.update(suite['task_regex'])
|
|
||||||
|
|
||||||
def match_task(task):
|
|
||||||
return any(re.search(pattern, task) for pattern in task_regexes)
|
|
||||||
|
|
||||||
return filter(match_task, tasks)
|
|
||||||
|
|
||||||
|
|
||||||
def run_fzf(cmd, tasks):
|
def run_fzf(cmd, tasks):
|
||||||
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
|
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
|
||||||
out = proc.communicate('\n'.join(tasks))[0].splitlines()
|
out = proc.communicate('\n'.join(tasks))[0].splitlines()
|
||||||
|
@ -225,7 +202,7 @@ def run_fuzzy_try(update=False, query=None, templates=None, full=False, paramete
|
||||||
all_tasks = generate_tasks(parameters, full, root=vcs.path)
|
all_tasks = generate_tasks(parameters, full, root=vcs.path)
|
||||||
|
|
||||||
if paths:
|
if paths:
|
||||||
all_tasks = filter_by_paths(all_tasks, paths)
|
all_tasks = filter_tasks_by_paths(all_tasks, paths)
|
||||||
if not all_tasks:
|
if not all_tasks:
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
|
|
|
@ -13,6 +13,7 @@ import sys
|
||||||
from mozboot.util import get_state_dir
|
from mozboot.util import get_state_dir
|
||||||
from mozbuild.base import MozbuildObject
|
from mozbuild.base import MozbuildObject
|
||||||
from mozpack.files import FileFinder
|
from mozpack.files import FileFinder
|
||||||
|
from moztest.resolve import TestResolver, get_suite_definition
|
||||||
|
|
||||||
import taskgraph
|
import taskgraph
|
||||||
from taskgraph.generator import TaskGraphGenerator
|
from taskgraph.generator import TaskGraphGenerator
|
||||||
|
@ -104,3 +105,24 @@ def generate_tasks(params, full, root):
|
||||||
with open(cache, 'w') as fh:
|
with open(cache, 'w') as fh:
|
||||||
fh.write('\n'.join(labels))
|
fh.write('\n'.join(labels))
|
||||||
return labels
|
return labels
|
||||||
|
|
||||||
|
|
||||||
|
def filter_tasks_by_paths(tasks, paths):
|
||||||
|
resolver = TestResolver.from_environment(cwd=here)
|
||||||
|
run_suites, run_tests = resolver.resolve_metadata(paths)
|
||||||
|
flavors = set([(t['flavor'], t.get('subsuite')) for t in run_tests])
|
||||||
|
|
||||||
|
task_regexes = set()
|
||||||
|
for flavor, subsuite in flavors:
|
||||||
|
suite = get_suite_definition(flavor, subsuite, strict=True)
|
||||||
|
if 'task_regex' not in suite:
|
||||||
|
print("warning: no tasks could be resolved from flavor '{}'{}".format(
|
||||||
|
flavor, " and subsuite '{}'".format(subsuite) if subsuite else ""))
|
||||||
|
continue
|
||||||
|
|
||||||
|
task_regexes.update(suite['task_regex'])
|
||||||
|
|
||||||
|
def match_task(task):
|
||||||
|
return any(re.search(pattern, task) for pattern in task_regexes)
|
||||||
|
|
||||||
|
return filter(match_task, tasks)
|
||||||
|
|
|
@ -3,5 +3,5 @@ subsuite=try
|
||||||
skip-if = python == 3
|
skip-if = python == 3
|
||||||
|
|
||||||
[test_again.py]
|
[test_again.py]
|
||||||
[test_fuzzy.py]
|
[test_tasks.py]
|
||||||
[test_templates.py]
|
[test_templates.py]
|
||||||
|
|
|
@ -8,7 +8,7 @@ import mozunit
|
||||||
import pytest
|
import pytest
|
||||||
from moztest.resolve import TestResolver
|
from moztest.resolve import TestResolver
|
||||||
|
|
||||||
from tryselect.selectors import fuzzy
|
from tryselect.tasks import filter_tasks_by_paths
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
|
@ -20,14 +20,14 @@ def patch_resolver(monkeypatch):
|
||||||
return inner
|
return inner
|
||||||
|
|
||||||
|
|
||||||
def test_filter_by_paths(patch_resolver):
|
def test_filter_tasks_by_paths(patch_resolver):
|
||||||
tasks = ['foobar/xpcshell-1', 'foobar/mochitest', 'foobar/xpcshell']
|
tasks = ['foobar/xpcshell-1', 'foobar/mochitest', 'foobar/xpcshell']
|
||||||
|
|
||||||
patch_resolver(['xpcshell'], {})
|
patch_resolver(['xpcshell'], {})
|
||||||
assert fuzzy.filter_by_paths(tasks, 'dummy') == []
|
assert filter_tasks_by_paths(tasks, 'dummy') == []
|
||||||
|
|
||||||
patch_resolver([], [{'flavor': 'xpcshell'}])
|
patch_resolver([], [{'flavor': 'xpcshell'}])
|
||||||
assert fuzzy.filter_by_paths(tasks, 'dummy') == ['foobar/xpcshell-1', 'foobar/xpcshell']
|
assert filter_tasks_by_paths(tasks, 'dummy') == ['foobar/xpcshell-1', 'foobar/xpcshell']
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
Загрузка…
Ссылка в новой задаче