Backed out changeset 3779175a4d7f (bug 1541147) for gecko decision bustage. CLOSED TREE

This commit is contained in:
Csoregi Natalia 2019-04-05 15:53:56 +03:00
Родитель c8538c029f
Коммит 7fc8245165
4 изменённых файлов: 1 добавлений и 317 удалений

Просмотреть файл

@ -6,7 +6,6 @@ from __future__ import absolute_import, print_function, unicode_literals
import argparse
import hashlib
import io
import itertools
import json
import logging
@ -14,11 +13,9 @@ import ntpath
import operator
import os
import re
import requests
import shutil
import subprocess
import sys
import tarfile
import tempfile
import xml.etree.ElementTree as ET
import yaml
@ -53,8 +50,6 @@ from mozbuild.backend import (
from mozversioncontrol import get_repository_object
from taskgraph.util.taskcluster import get_root_url
BUILD_WHAT_HELP = '''
What to build. Can be a top-level make target or a relative directory. If
multiple options are provided, they will be built serially. Takes dependency
@ -1343,6 +1338,7 @@ class PackageFrontend(MachCommandBase):
)
from requests.adapters import HTTPAdapter
import redo
import requests
from taskgraph.util.taskcluster import (
get_artifact_url,
@ -1772,264 +1768,6 @@ class StaticAnalysis(MachCommandBase):
rc = self.check_java(source, jobs, strip, verbose, skip_export=True)
return rc
@StaticAnalysisSubCommand('static-analysis', 'check-coverity',
'Run coverity static-analysis tool on the given files. '
'Can only be run by automation! '
'It\'s result is stored as an json file on the artifacts server.')
@CommandArgument('source', nargs='*', default=['.*'],
help='Source files to be analyzed by Coverity Static Analysis Tool. '
'This is ran only in automation.')
@CommandArgument('--output', '-o', default=None,
help='Write coverity output translated to json output in a file')
@CommandArgument('--coverity_output_path', '-co', default=None,
help='Path where to write coverity results as cov-results.json. '
'If no path is specified the default path from the coverity working directory, '
'~./mozbuild/coverity is used.')
@CommandArgument('--outgoing', default=False, action='store_true',
help='Run coverity on outgoing files from mercurial or git repository')
def check_coverity(self, source=None, output=None, coverity_output_path=None, outgoing=False, verbose=False):
self._set_log_level(verbose)
self.log_manager.enable_all_structured_loggers()
if 'MOZ_AUTOMATION' not in os.environ:
self.log(logging.INFO, 'static-analysis', {}, 'Coverity based static-analysis cannot be ran outside automation.')
return
# Use outgoing files instead of source files
if outgoing:
repo = get_repository_object(self.topsrcdir)
files = repo.get_outgoing_files()
source = map(os.path.abspath, files)
rc = self._build_compile_db(verbose=verbose)
rc = rc or self._build_export(jobs=2, verbose=verbose)
if rc != 0:
return rc
commands_list = self.get_files_with_commands(source)
if len(commands_list) == 0:
self.log(logging.INFO, 'static-analysis', {}, 'There are no files that need to be analyzed.')
return 1
rc = self.setup_coverity()
if rc != 0:
return rc
# First run cov-run-desktop --setup in order to setup the analysis env
cmd = [self.cov_run_desktop, '--setup']
self.log(logging.INFO, 'static-analysis', {}, 'Running {} --setup'.format(self.cov_run_desktop))
rc = self.run_process(args=cmd, cwd=self.cov_path, pass_thru=True)
if rc != 0:
self.log(logging.ERROR, 'static-analysis', {}, 'Running {} --setup failed!'.format(self.cov_run_desktop))
return rc
# Run cov-configure for clang
cmd = [self.cov_configure, '--clang']
self.log(logging.INFO, 'static-analysis', {}, 'Running {} --clang'.format(self.cov_configure))
rc = self.run_process(args=cmd, cwd=self.cov_path, pass_thru=True)
if rc != 0:
self.log(logging.ERROR, 'static-analysis', {}, 'Running {} --clang failed!'.format(self.cov_configure))
return rc
# For each element in commands_list run `cov-translate`
for element in commands_list:
cmd = [self.cov_translate, '--dir', self.cov_idir_path] + element['command'].split(' ')
self.log(logging.INFO, 'static-analysis', {}, 'Running Coverity Tranlate for {}'.format(cmd))
rc = self.run_process(args=cmd, cwd=element['directory'], pass_thru=True)
if rc != 0:
self.log(logging.ERROR, 'static-analysis', {}, 'Running Coverity Tranlate failed for {}'.format(cmd))
return cmd
if coverity_output_path is None:
cov_result = mozpath.join(self.cov_state_path, 'cov-results.json')
else:
cov_result = mozpath.join(coverity_output_path, 'cov-results.json')
# Once the capture is performed we need to do the actual Coverity Desktop analysis
cmd = [self.cov_run_desktop, '--json-output-v6', cov_result, '--strip-path', self.topsrcdir]
cmd += [element['file'] for element in commands_list]
self.log(logging.INFO, 'static-analysis', {}, 'Running Coverity Analysis for {}'.format(cmd))
rc = self.run_process(cmd, cwd=self.cov_state_path, pass_thru=True)
if rc != 0:
self.log(logging.ERROR, 'static-analysis', {}, 'Coverity Analysis failed!')
if output is not None:
self.dump_cov_artifact(cov_result, output)
def dump_cov_artifact(self, cov_results, output):
# Parse Coverity json into structured issues
with open(cov_results) as f:
result = json.load(f)
# Parse the issues to a standard json format
issues_dict = {'files': {}}
files_list = issues_dict['files']
def build_element(issue):
# We look only for main event
event_path = next((event for event in issue['events'] if event['main'] is True), None)
dict_issue = {
'line': issue['mainEventLineNumber'],
'flag': issue['checkerName'],
'message': event_path['eventDescription'],
'extra': []
}
# Embed all events into extra message
for event in issue['events']:
dict_issue['extra'].append({'file_path': event['strippedFilePathname'],
'line_number': event['lineNumber'],
'path_type': event['eventTag'],
'description': event['eventDescription']})
return dict_issue
for issue in result['issues']:
path = issue['strippedMainEventFilePathname'].strip('/')
if path in files_list:
files_list[path]['warnings'].append(build_element(issue))
else:
files_list[path] = {'warnings': [build_element(issue)]}
with open(output, 'w') as f:
json.dump(issues_dict, f)
def get_coverity_secrets(self):
secret_name = 'project/relman/coverity'
secrets_url = '{}/secrets/v1/secret/{}'.format(get_root_url(True), secret_name)
self.log(logging.INFO, 'static-analysis', {}, 'Using symbol upload token from the secrets service: "{}"'.format(secrets_url))
res = requests.get(secrets_url)
res.raise_for_status()
secret = res.json()
cov_config = secret['secret'] if 'secret' in secret else None
if cov_config is None:
self.log(logging.ERROR, 'static-analysis', {}, 'Ill formatted secret for Coverity. Aborting analysis.')
return 1
self.cov_analysis_url = cov_config.get('package_url')
self.cov_package_name = cov_config.get('package_name')
self.cov_url = cov_config.get('server_url')
self.cov_auth = cov_config.get('auth_key')
self.cov_package_ver = cov_config.get('package_ver')
self.cov_full_stack = cov_config.get('full_stack', False)
return 0
def download_coverity(self):
if self.cov_url is None or self.cov_analysis_url is None or self.cov_auth is None:
self.log(logging.ERROR, 'static-analysis', {}, 'Missing Coverity secret on try job!')
return 1
COVERITY_CONFIG = '''
{
"type": "Coverity configuration",
"format_version": 1,
"settings": {
"server": {
"host": "%s",
"ssl" : true,
"on_new_cert" : "trust",
"auth_key_file": "%s"
},
"stream": "Firefox",
"cov_run_desktop": {
"build_cmd": [],
"clean_cmd": []
}
}
}
'''
# Generate the coverity.conf and auth files
cov_auth_path = mozpath.join(self.cov_state_path, 'auth')
cov_setup_path = mozpath.join(self.cov_state_path, 'coverity.conf')
cov_conf = COVERITY_CONFIG % (self.cov_url, cov_auth_path)
def download(artifact_url, target):
resp = requests.get(artifact_url, verify=False, stream=True)
resp.raise_for_status()
# Extract archive into destination
with tarfile.open(fileobj=io.BytesIO(resp.content)) as tar:
tar.extractall(target)
download(self.cov_analysis_url, self.cov_state_path)
with open(cov_auth_path, 'w') as f:
f.write(self.cov_auth)
# Modify it's permission to 600
os.chmod(cov_auth_path, 0o600)
with open(cov_setup_path, 'a') as f:
f.write(cov_conf)
def setup_coverity(self, force_download=True):
rc, config, _ = self._get_config_environment()
rc = rc or self.get_coverity_secrets()
if rc != 0:
return rc
# Create a directory in mozbuild where we setup coverity
self.cov_state_path = mozpath.join(self._mach_context.state_dir, "coverity")
if force_download is True and os.path.exists(self.cov_state_path):
shutil.rmtree(self.cov_state_path)
os.mkdir(self.cov_state_path)
# Download everything that we need for Coverity from out private instance
self.download_coverity()
self.cov_path = mozpath.join(self.cov_state_path, self.cov_package_name)
self.cov_run_desktop = mozpath.join(self.cov_path, 'bin', 'cov-run-desktop')
self.cov_translate = mozpath.join(self.cov_path, 'bin', 'cov-translate')
self.cov_configure = mozpath.join(self.cov_path, 'bin', 'cov-configure')
self.cov_work_path = mozpath.join(self.cov_state_path, 'data-coverity')
self.cov_idir_path = mozpath.join(self.cov_work_path, self.cov_package_ver, 'idir')
if not os.path.exists(self.cov_path):
self.log(logging.ERROR, 'static-analysis', {}, 'Missing Coverity in {}'.format(self.cov_path))
return 1
return 0
def get_files_with_commands(self, source):
'''
Returns an array of dictionaries having file_path with build command
'''
compile_db = json.load(open(self._compile_db, 'r'))
commands_list = []
for f in source:
# It must be a C/C++ file
_, ext = os.path.splitext(f)
if ext.lower() not in self._format_include_extensions:
self.log(logging.INFO, 'static-analysis', {}, 'Skipping {}'.format(f))
continue
file_with_abspath = os.path.join(self.topsrcdir, f)
for f in compile_db:
# Found for a file that we are looking
if file_with_abspath == f['file']:
commands_list.append(f)
return commands_list
@StaticAnalysisSubCommand('static-analysis', 'check-java',
'Run infer on the java codebase.')
@CommandArgument('source', nargs='*', default=['mobile'],

Просмотреть файл

@ -96,7 +96,6 @@ treeherder:
'WR': 'WebRender standalone'
'Gd': 'Geckodriver'
'clang': 'Clang Tidy & Format'
'coverity': 'Coverity Static Analysis'
index:
products:

Просмотреть файл

@ -1,52 +0,0 @@
job-defaults:
platform: linux64/opt
attributes:
code-review: true
worker-type:
by-platform:
linux64.*: aws-provisioner-v1/gecko-t-linux-xlarge
worker:
docker-image: {in-tree: debian7-amd64-build}
max-run-time: 5400
treeherder:
kind: other
tier: 2
run:
using: run-task
tooltool-downloads: public
toolchains:
- linux64-clang
- linux64-rust
- linux64-cbindgen
- linux64-nasm
- linux64-node
when:
# Extension list from https://hg.mozilla.org/mozilla-central/file/tip/python/mozbuild/mozbuild/mach_commands.py#l1664
files-changed:
- '**/*.c'
- '**/*.cpp'
- '**/*.cc'
- '**/*.cxx'
- '**/*.m'
- '**/*.mm'
- '**/*.h'
- '**/*.hh'
- '**/*.hpp'
- '**/*.hxx'
coverity:
description: Run static-analysis (Coverity) on C/C++ patches
treeherder:
symbol: coverity(cvsa)
run:
command: >-
source $HOME/checkouts/gecko/taskcluster/scripts/misc/source-test-clang-setup.sh &&
cd $HOME/checkouts/gecko &&
./mach --log-no-times static-analysis check-coverity --outgoing --output $HOME/coverity.json
scopes:
- secrets:get:project/relman/coverity
worker:
artifacts:
- type: file
name: public/code-review/coverity.json
path: /builds/worker/coverity.json

Просмотреть файл

@ -17,7 +17,6 @@ kind-dependencies:
jobs-from:
- clang.yml
- coverity.yml
- cram.yml
- doc.yml
- file-metadata.yml