Bug 1650834 - Make tools/lint code black compatible r=ahal

Differential Revision: https://phabricator.services.mozilla.com/D82401
This commit is contained in:
Sylvestre Ledru 2020-07-07 08:09:31 +00:00
Родитель 4b8b29c396
Коммит 9c734f1175
34 изменённых файлов: 985 добавлений и 805 удалений

Просмотреть файл

@ -14,9 +14,7 @@ import sys
import xml.etree.ElementTree as ET
from mozpack.files import (
FileFinder,
)
from mozpack.files import FileFinder
import mozpack.path as mozpath
from mozlint import result
@ -28,30 +26,42 @@ GRADLE_LOCK_MAX_WAIT_SECONDS = 20 * 60
def setup(root, **setupargs):
if setupargs.get('substs', {}).get('MOZ_BUILD_APP') != 'mobile/android':
if setupargs.get("substs", {}).get("MOZ_BUILD_APP") != "mobile/android":
return 1
if 'topobjdir' not in setupargs:
print('Skipping {}: a configured Android build is required!'.format(setupargs['name']))
if "topobjdir" not in setupargs:
print(
"Skipping {}: a configured Android build is required!".format(
setupargs["name"]
)
)
return 1
return 0
def gradle(log, topsrcdir=None, topobjdir=None, tasks=[], extra_args=[], verbose=True):
sys.path.insert(0, os.path.join(topsrcdir, 'mobile', 'android'))
sys.path.insert(0, os.path.join(topsrcdir, "mobile", "android"))
from gradle import gradle_lock
with gradle_lock(topobjdir, max_wait_seconds=GRADLE_LOCK_MAX_WAIT_SECONDS):
# The android-lint parameter can be used by gradle tasks to run special
# logic when they are run for a lint using
# project.hasProperty('android-lint')
cmd_args = [sys.executable, os.path.join(topsrcdir, 'mach'),
'gradle', '--verbose', '-Pandroid-lint', '--'] + \
tasks + \
extra_args
cmd_args = (
[
sys.executable,
os.path.join(topsrcdir, "mach"),
"gradle",
"--verbose",
"-Pandroid-lint",
"--",
]
+ tasks
+ extra_args
)
cmd = ' '.join(six.moves.shlex_quote(arg) for arg in cmd_args)
cmd = " ".join(six.moves.shlex_quote(arg) for arg in cmd_args)
log.debug(cmd)
# Gradle and mozprocess do not get along well, so we use subprocess
@ -75,40 +85,45 @@ def gradle(log, topsrcdir=None, topobjdir=None, tasks=[], extra_args=[], verbose
def api_lint(config, **lintargs):
topsrcdir = lintargs['root']
topobjdir = lintargs['topobjdir']
topsrcdir = lintargs["root"]
topobjdir = lintargs["topobjdir"]
gradle(lintargs['log'], topsrcdir=topsrcdir, topobjdir=topobjdir,
tasks=lintargs['substs']['GRADLE_ANDROID_API_LINT_TASKS'],
extra_args=lintargs.get('extra_args') or [])
gradle(
lintargs["log"],
topsrcdir=topsrcdir,
topobjdir=topobjdir,
tasks=lintargs["substs"]["GRADLE_ANDROID_API_LINT_TASKS"],
extra_args=lintargs.get("extra_args") or [],
)
folder = lintargs['substs']['GRADLE_ANDROID_GECKOVIEW_APILINT_FOLDER']
folder = lintargs["substs"]["GRADLE_ANDROID_GECKOVIEW_APILINT_FOLDER"]
results = []
with open(os.path.join(topobjdir, folder, 'apilint-result.json')) as f:
with open(os.path.join(topobjdir, folder, "apilint-result.json")) as f:
issues = json.load(f)
for rule in ('compat_failures', 'failures'):
for rule in ("compat_failures", "failures"):
for r in issues[rule]:
err = {
'rule': r['rule'] if rule == 'failures' else 'compat_failures',
'path': mozpath.relpath(r['file'], topsrcdir),
'lineno': int(r['line']),
'column': int(r.get('column') or 0),
'message': r['msg'],
"rule": r["rule"] if rule == "failures" else "compat_failures",
"path": mozpath.relpath(r["file"], topsrcdir),
"lineno": int(r["line"]),
"column": int(r.get("column") or 0),
"message": r["msg"],
}
results.append(result.from_config(config, **err))
for r in issues['api_changes']:
for r in issues["api_changes"]:
err = {
'rule': 'api_changes',
'path': mozpath.relpath(r['file'], topsrcdir),
'lineno': int(r['line']),
'column': int(r.get('column') or 0),
'message': 'Unexpected api change. Please run ./gradlew {} for more '
'information'.format(
' '.join(lintargs['substs']['GRADLE_ANDROID_API_LINT_TASKS'])),
"rule": "api_changes",
"path": mozpath.relpath(r["file"], topsrcdir),
"lineno": int(r["line"]),
"column": int(r.get("column") or 0),
"message": "Unexpected api change. Please run ./gradlew {} for more "
"information".format(
" ".join(lintargs["substs"]["GRADLE_ANDROID_API_LINT_TASKS"])
),
}
results.append(result.from_config(config, **err))
@ -116,14 +131,18 @@ def api_lint(config, **lintargs):
def javadoc(config, **lintargs):
topsrcdir = lintargs['root']
topobjdir = lintargs['topobjdir']
topsrcdir = lintargs["root"]
topobjdir = lintargs["topobjdir"]
gradle(lintargs['log'], topsrcdir=topsrcdir, topobjdir=topobjdir,
tasks=lintargs['substs']['GRADLE_ANDROID_GECKOVIEW_DOCS_TASKS'],
extra_args=lintargs.get('extra_args') or [])
gradle(
lintargs["log"],
topsrcdir=topsrcdir,
topobjdir=topobjdir,
tasks=lintargs["substs"]["GRADLE_ANDROID_GECKOVIEW_DOCS_TASKS"],
extra_args=lintargs.get("extra_args") or [],
)
output_files = lintargs['substs']['GRADLE_ANDROID_GECKOVIEW_DOCS_OUTPUT_FILES']
output_files = lintargs["substs"]["GRADLE_ANDROID_GECKOVIEW_DOCS_OUTPUT_FILES"]
results = []
@ -133,42 +152,49 @@ def javadoc(config, **lintargs):
issues = json.load(f)
for issue in issues:
issue['path'] = issue['path'].replace(lintargs['root'], '')
issue["path"] = issue["path"].replace(lintargs["root"], "")
# We want warnings to be errors for linting purposes.
issue['level'] = 'error'
issue["level"] = "error"
results.append(result.from_config(config, **issue))
return results
def lint(config, **lintargs):
topsrcdir = lintargs['root']
topobjdir = lintargs['topobjdir']
topsrcdir = lintargs["root"]
topobjdir = lintargs["topobjdir"]
gradle(lintargs['log'], topsrcdir=topsrcdir, topobjdir=topobjdir,
tasks=lintargs['substs']['GRADLE_ANDROID_LINT_TASKS'],
extra_args=lintargs.get('extra_args') or [])
gradle(
lintargs["log"],
topsrcdir=topsrcdir,
topobjdir=topobjdir,
tasks=lintargs["substs"]["GRADLE_ANDROID_LINT_TASKS"],
extra_args=lintargs.get("extra_args") or [],
)
# It's surprising that this is the App variant name, but this is "withoutGeckoBinariesDebug"
# right now and the GeckoView variant name is "withGeckoBinariesDebug". This will be addressed
# as we unify variants.
path = os.path.join(
lintargs['topobjdir'],
'gradle/build/mobile/android/geckoview/reports',
'lint-results-{}.xml'.format(lintargs['substs']['GRADLE_ANDROID_GECKOVIEW_VARIANT_NAME']))
tree = ET.parse(open(path, 'rt'))
lintargs["topobjdir"],
"gradle/build/mobile/android/geckoview/reports",
"lint-results-{}.xml".format(
lintargs["substs"]["GRADLE_ANDROID_GECKOVIEW_VARIANT_NAME"]
),
)
tree = ET.parse(open(path, "rt"))
root = tree.getroot()
results = []
for issue in root.findall('issue'):
for issue in root.findall("issue"):
location = issue[0]
err = {
'level': issue.get('severity').lower(),
'rule': issue.get('id'),
'message': issue.get('message'),
'path': location.get('file').replace(lintargs['root'], ''),
'lineno': int(location.get('line') or 0),
"level": issue.get("severity").lower(),
"rule": issue.get("id"),
"message": issue.get("message"),
"path": location.get("file").replace(lintargs["root"], ""),
"lineno": int(location.get("line") or 0),
}
results.append(result.from_config(config, **err))
@ -176,40 +202,46 @@ def lint(config, **lintargs):
def _parse_checkstyle_output(config, topsrcdir=None, report_path=None):
tree = ET.parse(open(report_path, 'rt'))
tree = ET.parse(open(report_path, "rt"))
root = tree.getroot()
for file in root.findall('file'):
sourcepath = file.get('name').replace(topsrcdir + '/', '')
for file in root.findall("file"):
sourcepath = file.get("name").replace(topsrcdir + "/", "")
for error in file.findall('error'):
for error in file.findall("error"):
# Like <error column="42" line="22" message="Name 'mPorts' must match pattern 'xm[A-Z][A-Za-z]*$'." severity="error" source="com.puppycrawl.tools.checkstyle.checks.naming.MemberNameCheck" />. # NOQA: E501
err = {
'level': 'error',
'rule': error.get('source'),
'message': error.get('message'),
'path': sourcepath,
'lineno': int(error.get('line') or 0),
'column': int(error.get('column') or 0),
"level": "error",
"rule": error.get("source"),
"message": error.get("message"),
"path": sourcepath,
"lineno": int(error.get("line") or 0),
"column": int(error.get("column") or 0),
}
yield result.from_config(config, **err)
def checkstyle(config, **lintargs):
topsrcdir = lintargs['root']
topobjdir = lintargs['topobjdir']
topsrcdir = lintargs["root"]
topobjdir = lintargs["topobjdir"]
gradle(lintargs['log'], topsrcdir=topsrcdir, topobjdir=topobjdir,
tasks=lintargs['substs']['GRADLE_ANDROID_CHECKSTYLE_TASKS'],
extra_args=lintargs.get('extra_args') or [])
gradle(
lintargs["log"],
topsrcdir=topsrcdir,
topobjdir=topobjdir,
tasks=lintargs["substs"]["GRADLE_ANDROID_CHECKSTYLE_TASKS"],
extra_args=lintargs.get("extra_args") or [],
)
results = []
for relative_path in lintargs['substs']['GRADLE_ANDROID_CHECKSTYLE_OUTPUT_FILES']:
report_path = os.path.join(lintargs['topobjdir'], relative_path)
results.extend(_parse_checkstyle_output(config,
topsrcdir=lintargs['root'],
report_path=report_path))
for relative_path in lintargs["substs"]["GRADLE_ANDROID_CHECKSTYLE_OUTPUT_FILES"]:
report_path = os.path.join(lintargs["topobjdir"], relative_path)
results.extend(
_parse_checkstyle_output(
config, topsrcdir=lintargs["root"], report_path=report_path
)
)
return results
@ -217,77 +249,96 @@ def checkstyle(config, **lintargs):
def _parse_android_test_results(config, topsrcdir=None, report_dir=None):
# A brute force way to turn a Java FQN into a path on disk. Assumes Java
# and Kotlin sources are in mobile/android for performance and simplicity.
sourcepath_finder = FileFinder(os.path.join(topsrcdir, 'mobile', 'android'))
sourcepath_finder = FileFinder(os.path.join(topsrcdir, "mobile", "android"))
finder = FileFinder(report_dir)
reports = list(finder.find('TEST-*.xml'))
reports = list(finder.find("TEST-*.xml"))
if not reports:
raise RuntimeError('No reports found under {}'.format(report_dir))
raise RuntimeError("No reports found under {}".format(report_dir))
for report, _ in reports:
tree = ET.parse(open(os.path.join(finder.base, report), 'rt'))
tree = ET.parse(open(os.path.join(finder.base, report), "rt"))
root = tree.getroot()
class_name = root.get('name') # Like 'org.mozilla.gecko.permissions.TestPermissions'.
path = '**/' + class_name.replace('.', '/') + '.*' # Like '**/org/mozilla/gecko/permissions/TestPermissions.*'. # NOQA: E501
class_name = root.get(
"name"
) # Like 'org.mozilla.gecko.permissions.TestPermissions'.
path = (
"**/" + class_name.replace(".", "/") + ".*"
) # Like '**/org/mozilla/gecko/permissions/TestPermissions.*'. # NOQA: E501
for testcase in root.findall('testcase'):
function_name = testcase.get('name')
for testcase in root.findall("testcase"):
function_name = testcase.get("name")
# Schema cribbed from http://llg.cubic.org/docs/junit/.
for unexpected in itertools.chain(testcase.findall('error'),
testcase.findall('failure')):
for unexpected in itertools.chain(
testcase.findall("error"), testcase.findall("failure")
):
sourcepaths = list(sourcepath_finder.find(path))
if not sourcepaths:
raise RuntimeError('No sourcepath found for class {class_name}'
.format(class_name=class_name))
raise RuntimeError(
"No sourcepath found for class {class_name}".format(
class_name=class_name
)
)
for sourcepath, _ in sourcepaths:
lineno = 0
message = unexpected.get('message')
message = unexpected.get("message")
# Turn '... at org.mozilla.gecko.permissions.TestPermissions.testMultipleRequestsAreQueuedAndDispatchedSequentially(TestPermissions.java:118)' into 118. # NOQA: E501
pattern = r'at {class_name}\.{function_name}\(.*:(\d+)\)'
pattern = pattern.format(class_name=class_name, function_name=function_name)
pattern = r"at {class_name}\.{function_name}\(.*:(\d+)\)"
pattern = pattern.format(
class_name=class_name, function_name=function_name
)
match = re.search(pattern, message)
if match:
lineno = int(match.group(1))
else:
msg = 'No source line found for {class_name}.{function_name}'.format(
class_name=class_name, function_name=function_name)
msg = "No source line found for {class_name}.{function_name}".format(
class_name=class_name, function_name=function_name
)
raise RuntimeError(msg)
err = {
'level': 'error',
'rule': unexpected.get('type'),
'message': message,
'path': os.path.join('mobile', 'android', sourcepath),
'lineno': lineno,
"level": "error",
"rule": unexpected.get("type"),
"message": message,
"path": os.path.join("mobile", "android", sourcepath),
"lineno": lineno,
}
yield result.from_config(config, **err)
def test(config, **lintargs):
topsrcdir = lintargs['root']
topobjdir = lintargs['topobjdir']
topsrcdir = lintargs["root"]
topobjdir = lintargs["topobjdir"]
gradle(lintargs['log'], topsrcdir=topsrcdir, topobjdir=topobjdir,
tasks=lintargs['substs']['GRADLE_ANDROID_TEST_TASKS'],
extra_args=lintargs.get('extra_args') or [])
gradle(
lintargs["log"],
topsrcdir=topsrcdir,
topobjdir=topobjdir,
tasks=lintargs["substs"]["GRADLE_ANDROID_TEST_TASKS"],
extra_args=lintargs.get("extra_args") or [],
)
results = []
def capitalize(s):
# Can't use str.capitalize because it lower cases trailing letters.
return (s[0].upper() + s[1:]) if s else ''
return (s[0].upper() + s[1:]) if s else ""
pairs = [('geckoview', lintargs['substs']['GRADLE_ANDROID_GECKOVIEW_VARIANT_NAME'])]
pairs = [("geckoview", lintargs["substs"]["GRADLE_ANDROID_GECKOVIEW_VARIANT_NAME"])]
for project, variant in pairs:
report_dir = os.path.join(
lintargs['topobjdir'],
'gradle/build/mobile/android/{}/test-results/test{}UnitTest'
.format(project, capitalize(variant)))
results.extend(_parse_android_test_results(config,
topsrcdir=lintargs['root'],
report_dir=report_dir))
lintargs["topobjdir"],
"gradle/build/mobile/android/{}/test-results/test{}UnitTest".format(
project, capitalize(variant)
),
)
results.extend(
_parse_android_test_results(
config, topsrcdir=lintargs["root"], report_dir=report_dir
)
)
return results

Просмотреть файл

@ -28,8 +28,11 @@ def parse_issues(config, output, paths, log):
for line in output:
match = diff_line.match(line)
file, line_no, col, diff, diff2 = match.groups()
log.debug("file={} line={} col={} diff={} diff2={}".format(
file, line_no, col, diff, diff2))
log.debug(
"file={} line={} col={} diff={} diff2={}".format(
file, line_no, col, diff, diff2
)
)
d = diff + "\n" + diff2
res = {
"path": file,
@ -78,34 +81,34 @@ def get_clang_format_binary():
clang_tools_path = os.path.join(get_state_dir(), "clang-tools")
bin_path = os.path.join(clang_tools_path, "clang-tidy", "bin")
return os.path.join(bin_path, "clang-format" + substs.get('BIN_SUFFIX', ''))
return os.path.join(bin_path, "clang-format" + substs.get("BIN_SUFFIX", ""))
def is_ignored_path(ignored_dir_re, topsrcdir, f):
# Remove up to topsrcdir in pathname and match
if f.startswith(topsrcdir + '/'):
match_f = f[len(topsrcdir + '/'):]
if f.startswith(topsrcdir + "/"):
match_f = f[len(topsrcdir + "/") :]
else:
match_f = f
return re.match(ignored_dir_re, match_f)
def remove_ignored_path(paths, topsrcdir, log):
path_to_third_party = os.path.join(topsrcdir, '.clang-format-ignore')
path_to_third_party = os.path.join(topsrcdir, ".clang-format-ignore")
ignored_dir = []
with open(path_to_third_party, 'r') as fh:
with open(path_to_third_party, "r") as fh:
for line in fh:
# In case it starts with a space
line = line.strip()
# Remove comments and empty lines
if line.startswith('#') or len(line) == 0:
if line.startswith("#") or len(line) == 0:
continue
# The regexp is to make sure we are managing relative paths
ignored_dir.append(r"^[\./]*" + line.rstrip())
# Generates the list of regexp
ignored_dir_re = '(%s)' % '|'.join(ignored_dir)
ignored_dir_re = "(%s)" % "|".join(ignored_dir)
path_list = []
for f in paths:
@ -119,13 +122,13 @@ def remove_ignored_path(paths, topsrcdir, log):
def lint(paths, config, fix=None, **lintargs):
log = lintargs['log']
paths = list(expand_exclusions(paths, config, lintargs['root']))
log = lintargs["log"]
paths = list(expand_exclusions(paths, config, lintargs["root"]))
# We ignored some specific files for a bunch of reasons.
# Not using excluding to avoid duplication
if lintargs.get('use_filters', True):
paths = remove_ignored_path(paths, lintargs['root'], log)
if lintargs.get("use_filters", True):
paths = remove_ignored_path(paths, lintargs["root"], log)
# An empty path array can occur when the user passes in `-n`. If we don't
# return early in this case, rustfmt will attempt to read stdin and hang.
@ -146,18 +149,20 @@ def lint(paths, config, fix=None, **lintargs):
else:
cmd_args.append("--dry-run")
base_command = cmd_args + paths
log.debug("Command: {}".format(' '.join(cmd_args)))
log.debug("Command: {}".format(" ".join(cmd_args)))
output = run_process(config, base_command)
output_list = []
if len(output) % 3 != 0:
raise Exception("clang-format output should be a multiple of 3. Output: %s" % output)
raise Exception(
"clang-format output should be a multiple of 3. Output: %s" % output
)
for i in range(0, len(output), 3):
# Merge the element 3 by 3 (clang-format output)
line = output[i]
line += ";" + output[i+1]
line += ";" + output[i+2]
line += ";" + output[i + 1]
line += ";" + output[i + 2]
output_list.append(line)
if fix:

Просмотреть файл

@ -44,13 +44,16 @@ def parse_issues(log, config, issues, path, onlyIn):
try:
detail = json.loads(six.ensure_text(issue))
if "message" in detail:
p = detail['target']['src_path']
p = detail["target"]["src_path"]
detail = detail["message"]
if "level" in detail:
if ((detail["level"] == "error" or detail["level"] == "failure-note")
and not detail["code"]):
log.debug("Error outside of clippy."
"This means that the build failed. Therefore, skipping this")
if (
detail["level"] == "error" or detail["level"] == "failure-note"
) and not detail["code"]:
log.debug(
"Error outside of clippy."
"This means that the build failed. Therefore, skipping this"
)
log.debug("File = {} / Detail = {}".format(p, detail))
continue
# We are in a clippy warning
@ -61,13 +64,17 @@ def parse_issues(log, config, issues, path, onlyIn):
# [], 'code': None, 'level': 'warning', 'message':
# '5 warnings emitted', 'spans': []}
# if this is the case, skip it
log.debug("Skipping the summary line {} for file {}".format(detail, p))
log.debug(
"Skipping the summary line {} for file {}".format(detail, p)
)
continue
l = detail["spans"][0]
if onlyIn and onlyIn not in p:
# Case when we have a .rs in the include list in the yaml file
log.debug("{} is not part of the list of files '{}'".format(p, onlyIn))
log.debug(
"{} is not part of the list of files '{}'".format(p, onlyIn)
)
continue
res = {
"path": p,
@ -119,13 +126,9 @@ def get_clippy_version(log, binary):
# --version failed, clippy isn't installed.
return False
log.debug(
"Found version: {}".format(
output
)
)
log.debug("Found version: {}".format(output))
version = re.findall(r'(\d+-\d+-\d+)', output)[0].replace("-", ".")
version = re.findall(r"(\d+-\d+-\d+)", output)[0].replace("-", ".")
version = StrictVersion(version)
return version
@ -160,11 +163,11 @@ def lint(paths, config, fix=None, **lintargs):
if not cargo:
print(CARGO_NOT_FOUND)
if 'MOZ_AUTOMATION' in os.environ:
if "MOZ_AUTOMATION" in os.environ:
return 1
return []
min_version_str = config.get('min_clippy_version')
min_version_str = config.get("min_clippy_version")
min_version = StrictVersion(min_version_str)
actual_version = get_clippy_version(log, cargo)
log.debug(
@ -183,13 +186,13 @@ def lint(paths, config, fix=None, **lintargs):
cmd_args_common = ["--manifest-path"]
cmd_args_clippy = [
cargo,
'clippy',
'--message-format=json',
"clippy",
"--message-format=json",
]
lock_files_to_delete = []
for p in paths:
lock_file = os.path.join(p, 'Cargo.lock')
lock_file = os.path.join(p, "Cargo.lock")
if not os.path.exists(lock_file):
lock_files_to_delete.append(lock_file)
@ -218,7 +221,7 @@ def lint(paths, config, fix=None, **lintargs):
# Make sure that we don't display that either
onlyIn = p
cargo_files = get_ancestors_by_name('Cargo.toml', p, lintargs['root'])
cargo_files = get_ancestors_by_name("Cargo.toml", p, lintargs["root"])
p = cargo_files[0]
log.debug("Path translated to = {}".format(p))

Просмотреть файл

@ -8,7 +8,7 @@ import re
from mozlint.types import LineType
here = os.path.abspath(os.path.dirname(__file__))
HEADERS_FILE = os.path.join(here, 'mingw-headers.txt')
HEADERS_FILE = os.path.join(here, "mingw-headers.txt")
# generated by cd mingw-w64/mingw-w64-headers &&
# find . -name "*.h" | xargs -I bob -- basename bob | sort | uniq)
@ -16,7 +16,7 @@ HEADERS_FILE = os.path.join(here, 'mingw-headers.txt')
class MinGWCapitalization(LineType):
def __init__(self, *args, **kwargs):
super(MinGWCapitalization, self).__init__(*args, **kwargs)
with open(HEADERS_FILE, 'r') as fh:
with open(HEADERS_FILE, "r") as fh:
self.headers = fh.read().strip().splitlines()
self.regex = re.compile("^#include\s*<(" + "|".join(self.headers) + ")>")

Просмотреть файл

@ -47,8 +47,8 @@ def setup(root, **lintargs):
def lint(paths, config, binary=None, fix=None, setup=None, **lintargs):
"""Run eslint."""
log = lintargs['log']
setup_helper.set_project_root(lintargs['root'])
log = lintargs["log"]
setup_helper.set_project_root(lintargs["root"])
module_path = setup_helper.get_project_root()
# Valid binaries are:
@ -63,35 +63,44 @@ def lint(paths, config, binary=None, fix=None, setup=None, **lintargs):
print(ESLINT_NOT_FOUND_MESSAGE)
return 1
extra_args = lintargs.get('extra_args') or []
extra_args = lintargs.get("extra_args") or []
exclude_args = []
for path in config.get('exclude', []):
exclude_args.extend(['--ignore-pattern', os.path.relpath(path, lintargs['root'])])
for path in config.get("exclude", []):
exclude_args.extend(
["--ignore-pattern", os.path.relpath(path, lintargs["root"])]
)
cmd_args = [binary,
os.path.join(module_path, "node_modules", "eslint", "bin", "eslint.js"),
# This keeps ext as a single argument.
'--ext', '[{}]'.format(','.join(config['extensions'])),
'--format', 'json',
'--no-error-on-unmatched-pattern',
] + extra_args + exclude_args + paths
log.debug("Command: {}".format(' '.join(cmd_args)))
cmd_args = (
[
binary,
os.path.join(module_path, "node_modules", "eslint", "bin", "eslint.js"),
# This keeps ext as a single argument.
"--ext",
"[{}]".format(",".join(config["extensions"])),
"--format",
"json",
"--no-error-on-unmatched-pattern",
]
+ extra_args
+ exclude_args
+ paths
)
log.debug("Command: {}".format(" ".join(cmd_args)))
# eslint requires that --fix be set before the --ext argument.
if fix:
cmd_args.insert(2, '--fix')
cmd_args.insert(2, "--fix")
shell = False
if os.environ.get('MSYSTEM') in ('MINGW32', 'MINGW64'):
if os.environ.get("MSYSTEM") in ("MINGW32", "MINGW64"):
# The eslint binary needs to be run from a shell with msys
shell = True
encoding = 'utf-8'
encoding = "utf-8"
orig = signal.signal(signal.SIGINT, signal.SIG_IGN)
proc = subprocess.Popen(cmd_args,
shell=shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc = subprocess.Popen(
cmd_args, shell=shell, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
signal.signal(signal.SIGINT, orig)
try:
@ -118,16 +127,18 @@ def lint(paths, config, binary=None, fix=None, setup=None, **lintargs):
results = []
for obj in jsonresult:
errors = obj['messages']
errors = obj["messages"]
for err in errors:
err.update({
'hint': err.get('fix'),
'level': 'error' if err['severity'] == 2 else 'warning',
'lineno': err.get('line') or 0,
'path': obj['filePath'],
'rule': err.get('ruleId'),
})
err.update(
{
"hint": err.get("fix"),
"level": "error" if err["severity"] == 2 else "warning",
"lineno": err.get("line") or 0,
"path": obj["filePath"],
"rule": err.get("ruleId"),
}
)
results.append(result.from_config(config, **err))
return results

Просмотреть файл

@ -13,8 +13,12 @@ import sys
from distutils.version import LooseVersion
from filecmp import dircmp
from mozbuild.nodeutil import (find_node_executable, find_npm_executable,
NPM_MIN_VERSION, NODE_MIN_VERSION)
from mozbuild.nodeutil import (
find_node_executable,
find_npm_executable,
NPM_MIN_VERSION,
NODE_MIN_VERSION,
)
from mozbuild.util import ensure_subprocess_env
from mozfile.mozfile import remove as mozfileremove
@ -70,11 +74,16 @@ def eslint_setup(should_clobber=False):
guide you through an interactive wizard helping you configure
eslint for optimal use on Mozilla projects.
"""
package_setup(get_project_root(), 'eslint', should_clobber=should_clobber)
package_setup(get_project_root(), "eslint", should_clobber=should_clobber)
def package_setup(package_root, package_name, should_update=False, should_clobber=False,
no_optional=False):
def package_setup(
package_root,
package_name,
should_update=False,
should_clobber=False,
no_optional=False,
):
"""Ensure `package_name` at `package_root` is installed.
When `should_update` is true, clobber, install, and produce a new
@ -101,8 +110,8 @@ def package_setup(package_root, package_name, should_update=False, should_clobbe
if should_clobber:
node_modules_path = os.path.join(project_root, "node_modules")
print("Clobbering %s..." % node_modules_path)
if sys.platform.startswith('win') and have_winrm():
process = subprocess.Popen(['winrm', '-rf', node_modules_path])
if sys.platform.startswith("win") and have_winrm():
process = subprocess.Popen(["winrm", "-rf", node_modules_path])
process.wait()
else:
mozfileremove(node_modules_path)
@ -118,7 +127,7 @@ def package_setup(package_root, package_name, should_update=False, should_clobbe
extra_parameters = ["--loglevel=error"]
if no_optional:
extra_parameters.append('--no-optional')
extra_parameters.append("--no-optional")
package_lock_json_path = os.path.join(get_project_root(), "package-lock.json")
@ -140,13 +149,15 @@ def package_setup(package_root, package_name, should_update=False, should_clobbe
cmd.extend(extra_parameters)
print("Installing %s for mach using \"%s\"..." % (package_name, " ".join(cmd)))
print('Installing %s for mach using "%s"...' % (package_name, " ".join(cmd)))
result = call_process(package_name, cmd)
if not result:
return 1
bin_path = os.path.join(get_project_root(), "node_modules", ".bin", package_name)
bin_path = os.path.join(
get_project_root(), "node_modules", ".bin", package_name
)
print("\n%s installed successfully!" % package_name)
print("\nNOTE: Your local %s binary is at %s\n" % (package_name, bin_path))
@ -184,15 +195,17 @@ def expected_eslint_modules():
# Also read the in-tree ESLint plugin mozilla information, to ensure the
# dependencies are up to date.
mozilla_json_path = os.path.join(get_eslint_module_path(),
"eslint-plugin-mozilla", "package.json")
mozilla_json_path = os.path.join(
get_eslint_module_path(), "eslint-plugin-mozilla", "package.json"
)
with open(mozilla_json_path, "r", encoding="utf-8") as f:
expected_modules.update(json.load(f)["dependencies"])
# Also read the in-tree ESLint plugin spidermonkey information, to ensure the
# dependencies are up to date.
mozilla_json_path = os.path.join(get_eslint_module_path(),
"eslint-plugin-spidermonkey-js", "package.json")
mozilla_json_path = os.path.join(
get_eslint_module_path(), "eslint-plugin-spidermonkey-js", "package.json"
)
with open(mozilla_json_path, "r", encoding="utf-8") as f:
expected_modules.update(json.load(f)["dependencies"])
@ -204,7 +217,7 @@ def check_eslint_files(node_modules_path, name):
# Diff files only looks at files that are different. Not for files
# that are only present on one side. This should be generally OK as
# new files will need to be added in the index.js for the package.
if dcmp.diff_files and dcmp.diff_files != ['package.json']:
if dcmp.diff_files and dcmp.diff_files != ["package.json"]:
return True
result = False
@ -216,8 +229,10 @@ def check_eslint_files(node_modules_path, name):
return result
dcmp = dircmp(os.path.join(node_modules_path, name),
os.path.join(get_eslint_module_path(), name))
dcmp = dircmp(
os.path.join(node_modules_path, name),
os.path.join(get_eslint_module_path(), name),
)
return check_file_diffs(dcmp)
@ -299,18 +314,21 @@ def get_possible_node_paths_win():
if platform.system() != "Windows":
return []
return list({
"%s\\nodejs" % os.environ.get("SystemDrive"),
os.path.join(os.environ.get("ProgramFiles"), "nodejs"),
os.path.join(os.environ.get("PROGRAMW6432"), "nodejs"),
os.path.join(os.environ.get("PROGRAMFILES"), "nodejs")
})
return list(
{
"%s\\nodejs" % os.environ.get("SystemDrive"),
os.path.join(os.environ.get("ProgramFiles"), "nodejs"),
os.path.join(os.environ.get("PROGRAMW6432"), "nodejs"),
os.path.join(os.environ.get("PROGRAMFILES"), "nodejs"),
}
)
def get_version(path):
try:
version_str = subprocess.check_output([path, "--version"], stderr=subprocess.STDOUT,
universal_newlines=True)
version_str = subprocess.check_output(
[path, "--version"], stderr=subprocess.STDOUT, universal_newlines=True
)
return version_str
except (subprocess.CalledProcessError, OSError):
return None
@ -332,8 +350,8 @@ def set_project_root(root=None):
file_found = False
folder = os.getcwd()
while (folder):
if os.path.exists(os.path.join(folder, 'mach')):
while folder:
if os.path.exists(os.path.join(folder, "mach")):
file_found = True
break
else:
@ -382,9 +400,9 @@ def check_node_executables_valid():
def have_winrm():
# `winrm -h` should print 'winrm version ...' and exit 1
try:
p = subprocess.Popen(['winrm.exe', '-h'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
return p.wait() == 1 and p.stdout.read().startswith('winrm')
p = subprocess.Popen(
["winrm.exe", "-h"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
return p.wait() == 1 and p.stdout.read().startswith("winrm")
except Exception:
return False

Просмотреть файл

@ -13,15 +13,15 @@ results = []
def lint(paths, config, fix=None, **lintargs):
if platform.system() == 'Windows':
if platform.system() == "Windows":
# Windows doesn't have permissions in files
# Exit now
return results
files = list(expand_exclusions(paths, config, lintargs['root']))
files = list(expand_exclusions(paths, config, lintargs["root"]))
for f in files:
if os.access(f, os.X_OK):
with open(f, 'r+') as content:
with open(f, "r+") as content:
# Some source files have +x permissions
line = content.readline()
if line.startswith("#!"):
@ -34,9 +34,10 @@ def lint(paths, config, fix=None, **lintargs):
os.chmod(f, 0o644)
continue
res = {'path': f,
'message': "Execution permissions on a source file",
'level': 'error'
}
res = {
"path": f,
"message": "Execution permissions on a source file",
"level": "error",
}
results.append(result.from_config(config, **res))
return results

Просмотреть файл

@ -9,10 +9,10 @@ results = []
def lint(paths, config, fix=None, **lintargs):
files = list(expand_exclusions(paths, config, lintargs['root']))
files = list(expand_exclusions(paths, config, lintargs["root"]))
for f in files:
with open(f, 'rb') as open_file:
with open(f, "rb") as open_file:
hasFix = False
content_to_write = []
for i, line in enumerate(open_file):
@ -23,18 +23,19 @@ def lint(paths, config, fix=None, **lintargs):
content_to_write.append(line.rstrip() + b"\n")
hasFix = True
else:
res = {'path': f,
'message': "Trailing whitespace",
'level': 'error',
'lineno': i + 1,
}
res = {
"path": f,
"message": "Trailing whitespace",
"level": "error",
"lineno": i + 1,
}
results.append(result.from_config(config, **res))
else:
if fix:
content_to_write.append(line)
if hasFix:
# Only update the file when we found a change to make
with open(f, 'wb') as open_file_to_write:
with open(f, "wb") as open_file_to_write:
open_file_to_write.write(b"".join(content_to_write))
# We are still using the same fp, let's return to the first
@ -47,14 +48,15 @@ def lint(paths, config, fix=None, **lintargs):
if b"\r\n" in content:
if fix:
# replace \r\n by \n
content = content.replace(b'\r\n', b'\n')
with open(f, 'wb') as open_file_to_write:
content = content.replace(b"\r\n", b"\n")
with open(f, "wb") as open_file_to_write:
open_file_to_write.write(content)
else:
res = {'path': f,
'message': "Windows line return",
'level': 'error'
}
res = {
"path": f,
"message": "Windows line return",
"level": "error",
}
results.append(result.from_config(config, **res))
return results

Просмотреть файл

@ -27,35 +27,35 @@ def run_process(cmd):
def run_mozlint(hooktype, args):
# --quiet prevents warnings on eslint, it will be ignored by other linters
python = find_executable('python3')
python = find_executable("python3")
if not python:
print("error: Python 3 not detected on your system! Please install it.")
sys.exit(1)
cmd = [python, os.path.join(topsrcdir, 'mach'), 'lint', '--quiet']
cmd = [python, os.path.join(topsrcdir, "mach"), "lint", "--quiet"]
if 'commit' in hooktype:
if "commit" in hooktype:
# don't prevent commits, just display the lint results
run_process(cmd + ['--workdir=staged'])
run_process(cmd + ["--workdir=staged"])
return False
elif 'push' in hooktype:
return run_process(cmd + ['--outgoing'] + args)
elif "push" in hooktype:
return run_process(cmd + ["--outgoing"] + args)
print("warning: '{}' is not a valid mozlint hooktype".format(hooktype))
return False
def hg(ui, repo, **kwargs):
hooktype = kwargs['hooktype']
return run_mozlint(hooktype, kwargs.get('pats', []))
hooktype = kwargs["hooktype"]
return run_mozlint(hooktype, kwargs.get("pats", []))
def git():
hooktype = os.path.basename(__file__)
if hooktype == 'hooks.py':
hooktype = 'pre-push'
if hooktype == "hooks.py":
hooktype = "pre-push"
return run_mozlint(hooktype, [])
if __name__ == '__main__':
if __name__ == "__main__":
sys.exit(git())

Просмотреть файл

@ -11,7 +11,11 @@ import sys
here = os.path.dirname(os.path.realpath(__file__))
topsrcdir = os.path.join(here, os.pardir, os.pardir)
EXTRA_PATHS = ("python/mozversioncontrol", "python/mozbuild", "testing/mozbase/mozfile",)
EXTRA_PATHS = (
"python/mozversioncontrol",
"python/mozbuild",
"testing/mozbase/mozfile",
)
sys.path[:0] = [os.path.join(topsrcdir, p) for p in EXTRA_PATHS]
from mozversioncontrol import get_repository_object, InvalidRepoPath

Просмотреть файл

@ -11,7 +11,11 @@ import sys
here = os.path.dirname(os.path.realpath(__file__))
topsrcdir = os.path.join(here, os.pardir, os.pardir)
EXTRA_PATHS = ("python/mozversioncontrol", "python/mozbuild", "testing/mozbase/mozfile",)
EXTRA_PATHS = (
"python/mozversioncontrol",
"python/mozbuild",
"testing/mozbase/mozfile",
)
sys.path[:0] = [os.path.join(topsrcdir, p) for p in EXTRA_PATHS]
from mozversioncontrol import get_repository_object, InvalidRepoPath

Просмотреть файл

@ -16,14 +16,14 @@ from mozlint.pathutils import expand_exclusions
# If for any reason a pref needs to appear in both files, add it to this set.
IGNORE_PREFS = {
'devtools.console.stdout.chrome', # Uses the 'sticky' attribute.
'devtools.console.stdout.content', # Uses the 'sticky' attribute.
'fission.autostart', # Uses the 'locked' attribute.
'browser.dom.window.dump.enabled', # Uses the 'sticky' attribute.
'apz.fling_curve_function_y2', # This pref is a part of a series.
'dom.postMessage.sharedArrayBuffer.bypassCOOP_COEP.insecure.enabled', # NOQA: E501; Uses the 'locked' attribute.
"devtools.console.stdout.chrome", # Uses the 'sticky' attribute.
"devtools.console.stdout.content", # Uses the 'sticky' attribute.
"fission.autostart", # Uses the 'locked' attribute.
"browser.dom.window.dump.enabled", # Uses the 'sticky' attribute.
"apz.fling_curve_function_y2", # This pref is a part of a series.
"dom.postMessage.sharedArrayBuffer.bypassCOOP_COEP.insecure.enabled", # NOQA: E501; Uses the 'locked' attribute.
}
PATTERN = re.compile(r'\s*pref\(\s*\"(?P<pref>.+)\"\s*,\s*(?P<val>.+)\)\s*;.*')
PATTERN = re.compile(r"\s*pref\(\s*\"(?P<pref>.+)\"\s*,\s*(?P<val>.+)\)\s*;.*")
def get_names(pref_list_filename):
@ -32,17 +32,16 @@ def get_names(pref_list_filename):
# pattern does not happen in 'name', so it's fine to ignore these.
# We also want to evaluate all branches of #ifdefs for pref names, so we
# ignore anything else preprocessor related.
file = open(pref_list_filename).read().replace('@', '')
file = open(pref_list_filename).read().replace("@", "")
try:
pref_list = yaml.safe_load(file)
except (IOError, ValueError) as e:
print('{}: error:\n {}'
.format(pref_list_filename, e), file=sys.stderr)
print("{}: error:\n {}".format(pref_list_filename, e), file=sys.stderr)
sys.exit(1)
for pref in pref_list:
if pref['name'] not in IGNORE_PREFS:
pref_names[pref['name']] = pref['value']
if pref["name"] not in IGNORE_PREFS:
pref_names[pref["name"]] = pref["value"]
return pref_names
@ -53,21 +52,23 @@ def check_against(path, pref_names):
errors = []
prefs = read_prefs(path)
for pref in prefs:
if pref['name'] in pref_names:
errors.extend(check_value_for_pref(pref, pref_names[pref['name']], path))
if pref["name"] in pref_names:
errors.extend(check_value_for_pref(pref, pref_names[pref["name"]], path))
return errors
def check_value_for_pref(some_pref, some_value, path):
errors = []
if some_pref['value'] == some_value:
errors.append({
'path': path,
'message': some_pref['raw'],
'lineno': some_pref['line'],
'hint': 'Remove the duplicate pref or add it to IGNORE_PREFS.',
'level': 'error',
})
if some_pref["value"] == some_value:
errors.append(
{
"path": path,
"message": some_pref["raw"],
"lineno": some_pref["line"],
"hint": "Remove the duplicate pref or add it to IGNORE_PREFS.",
"level": "error",
}
)
return errors
@ -79,17 +80,19 @@ def read_prefs(path):
for lineno, line in enumerate(source, start=1):
match = PATTERN.match(line)
if match:
prefs.append({
'name': match.group('pref'),
'value': evaluate_pref(match.group('val')),
'line': lineno,
'raw': line
})
prefs.append(
{
"name": match.group("pref"),
"value": evaluate_pref(match.group("val")),
"line": lineno,
"raw": line,
}
)
return prefs
def evaluate_pref(value):
bools = {'true': True, 'false': False}
bools = {"true": True, "false": False}
if value in bools:
return bools[value]
elif value.isdigit():
@ -100,8 +103,8 @@ def evaluate_pref(value):
def checkdupes(paths, config, **kwargs):
results = []
errors = []
pref_names = get_names(config['support-files'][0])
files = list(expand_exclusions(paths, config, kwargs['root']))
pref_names = get_names(config["support-files"][0])
files = list(expand_exclusions(paths, config, kwargs["root"]))
for file in files:
errors.extend(check_against(file, pref_names))
for error in errors:

Просмотреть файл

@ -13,19 +13,17 @@ results = []
# Official source: https://www.mozilla.org/en-US/MPL/headers/
TEMPLATES = {
"mpl2_license":
"""
"mpl2_license": """
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
""".strip().splitlines(),
"public_domain_license":
"""
"public_domain_license": """
Any copyright is dedicated to the public domain.
http://creativecommons.org/publicdomain/zero/1.0/
""".strip().splitlines(),
}
license_list = os.path.join(here, 'valid-licenses.txt')
license_list = os.path.join(here, "valid-licenses.txt")
def load_valid_license():
@ -35,7 +33,7 @@ def load_valid_license():
with open(license_list) as f:
l = f.readlines()
# Remove the empty lines
return list(filter(bool, [x.replace('\n', '') for x in l]))
return list(filter(bool, [x.replace("\n", "") for x in l]))
def is_valid_license(licenses, filename):
@ -43,7 +41,7 @@ def is_valid_license(licenses, filename):
From a given file, check if we can find the license patterns
in the X first lines of the file
"""
with open(filename, 'r', errors='replace') as myfile:
with open(filename, "r", errors="replace") as myfile:
contents = myfile.read()
# Empty files don't need a license.
if not contents:
@ -60,7 +58,7 @@ def add_header(filename, header):
Add the header to the top of the file
"""
header.append("\n")
with open(filename, 'r+') as f:
with open(filename, "r+") as f:
# lines in list format
lines = f.readlines()
@ -85,9 +83,17 @@ def is_test(f):
if "lint/test/" in f:
# For the unit tests
return False
return ("/test" in f or "/gtest" in f or "/crashtest" in f or "/mochitest" in f
or "/reftest" in f or "/imptest" in f or "/androidTest" in f
or "/jit-test/" in f or "jsapi-tests/" in f)
return (
"/test" in f
or "/gtest" in f
or "/crashtest" in f
or "/mochitest" in f
or "/reftest" in f
or "/imptest" in f
or "/androidTest" in f
or "/jit-test/" in f
or "jsapi-tests/" in f
)
def fix_me(filename):
@ -97,14 +103,26 @@ def fix_me(filename):
_, ext = os.path.splitext(filename)
license = []
license_template = TEMPLATES['mpl2_license']
license_template = TEMPLATES["mpl2_license"]
test = False
if is_test(filename):
license_template = TEMPLATES['public_domain_license']
license_template = TEMPLATES["public_domain_license"]
test = True
if ext in ['.cpp', '.c', '.cc', '.h', '.m', '.mm', '.rs', '.js', '.jsm', '.jsx', '.css']:
if ext in [
".cpp",
".c",
".cc",
".h",
".m",
".mm",
".rs",
".js",
".jsm",
".jsx",
".css",
]:
for i, l in enumerate(license_template):
start = " "
end = ""
@ -119,13 +137,13 @@ def fix_me(filename):
add_header(filename, license)
return
if ext in ['.py', '.ftl', '.properties'] or filename.endswith(".inc.xul"):
if ext in [".py", ".ftl", ".properties"] or filename.endswith(".inc.xul"):
for l in license_template:
license.append("# " + l.strip() + "\n")
add_header(filename, license)
return
if ext in ['.xml', '.xul', '.html', '.xhtml', '.dtd', '.svg']:
if ext in [".xml", ".xul", ".html", ".xhtml", ".dtd", ".svg"]:
for i, l in enumerate(license_template):
start = " - "
end = ""
@ -136,7 +154,7 @@ def fix_me(filename):
# Last line, we end by -->
end = " -->"
license.append(start + l.strip() + end)
if ext != '.svg' or end == "":
if ext != ".svg" or end == "":
# When dealing with an svg, we should not have a space between
# the license and the content
license.append("\n")
@ -145,7 +163,7 @@ def fix_me(filename):
def lint(paths, config, fix=None, **lintargs):
files = list(expand_exclusions(paths, config, lintargs['root']))
files = list(expand_exclusions(paths, config, lintargs["root"]))
licenses = load_valid_license()
@ -154,10 +172,11 @@ def lint(paths, config, fix=None, **lintargs):
# For now, do not do anything with test (too many)
continue
if not is_valid_license(licenses, f):
res = {'path': f,
'message': "No matching license strings found in tools/lint/license/valid-licenses.txt", # noqa
'level': 'error'
}
res = {
"path": f,
"message": "No matching license strings found in tools/lint/license/valid-licenses.txt", # noqa
"level": "error",
}
results.append(result.from_config(config, **res))
if fix:
fix_me(f)

Просмотреть файл

@ -21,23 +21,24 @@ from mach.decorators import (
here = os.path.abspath(os.path.dirname(__file__))
EXCLUSION_FILES = [
os.path.join('tools', 'rewriting', 'Generated.txt'),
os.path.join('tools', 'rewriting', 'ThirdPartyPaths.txt'),
os.path.join("tools", "rewriting", "Generated.txt"),
os.path.join("tools", "rewriting", "ThirdPartyPaths.txt"),
]
EXCLUSION_FILES_OPTIONAL = []
thunderbird_excludes = os.path.join('comm', 'tools', 'lint', 'GlobalExclude.txt')
thunderbird_excludes = os.path.join("comm", "tools", "lint", "GlobalExclude.txt")
if os.path.exists(thunderbird_excludes):
EXCLUSION_FILES_OPTIONAL.append(thunderbird_excludes)
GLOBAL_EXCLUDES = [
'node_modules',
'tools/lint/test/files',
"node_modules",
"tools/lint/test/files",
]
def setup_argument_parser():
from mozlint import cli
return cli.MozlintParser()
@ -46,11 +47,16 @@ def get_global_excludes(topsrcdir):
excludes = GLOBAL_EXCLUDES[:]
# exclude top level paths that look like objdirs
excludes.extend([name for name in os.listdir(topsrcdir)
if name.startswith('obj') and os.path.isdir(name)])
excludes.extend(
[
name
for name in os.listdir(topsrcdir)
if name.startswith("obj") and os.path.isdir(name)
]
)
for path in EXCLUSION_FILES + EXCLUSION_FILES_OPTIONAL:
with open(os.path.join(topsrcdir, path), 'r') as fh:
with open(os.path.join(topsrcdir, path), "r") as fh:
excludes.extend([f.strip() for f in fh.readlines()])
return excludes
@ -58,11 +64,12 @@ def get_global_excludes(topsrcdir):
@CommandProvider
class MachCommands(MachCommandBase):
@Command(
'lint', category='devenv',
description='Run linters.',
parser=setup_argument_parser)
"lint",
category="devenv",
description="Run linters.",
parser=setup_argument_parser,
)
def lint(self, *runargs, **lintargs):
"""Run linters."""
self._activate_virtualenv()
@ -70,35 +77,58 @@ class MachCommands(MachCommandBase):
try:
buildargs = {}
buildargs['substs'] = copy.deepcopy(dict(self.substs))
buildargs['defines'] = copy.deepcopy(dict(self.defines))
buildargs['topobjdir'] = self.topobjdir
buildargs["substs"] = copy.deepcopy(dict(self.substs))
buildargs["defines"] = copy.deepcopy(dict(self.defines))
buildargs["topobjdir"] = self.topobjdir
lintargs.update(buildargs)
except BuildEnvironmentNotFoundException:
pass
lintargs.setdefault('root', self.topsrcdir)
lintargs['exclude'] = get_global_excludes(lintargs['root'])
lintargs['config_paths'].insert(0, here)
lintargs.setdefault("root", self.topsrcdir)
lintargs["exclude"] = get_global_excludes(lintargs["root"])
lintargs["config_paths"].insert(0, here)
for path in EXCLUSION_FILES:
parser.GLOBAL_SUPPORT_FILES.append(os.path.join(self.topsrcdir, path))
return cli.run(*runargs, **lintargs)
@Command('eslint', category='devenv',
description='Run eslint or help configure eslint for optimal development.')
@CommandArgument('paths', default=None, nargs='*',
help="Paths to file or directories to lint, like "
"'browser/' Defaults to the "
"current directory if not given.")
@CommandArgument('-s', '--setup', default=False, action='store_true',
help='Configure eslint for optimal development.')
@CommandArgument('-b', '--binary', default=None,
help='Path to eslint binary.')
@CommandArgument('--fix', default=False, action='store_true',
help='Request that eslint automatically fix errors, where possible.')
@CommandArgument('extra_args', nargs=argparse.REMAINDER,
help='Extra args that will be forwarded to eslint.')
@Command(
"eslint",
category="devenv",
description="Run eslint or help configure eslint for optimal development.",
)
@CommandArgument(
"paths",
default=None,
nargs="*",
help="Paths to file or directories to lint, like "
"'browser/' Defaults to the "
"current directory if not given.",
)
@CommandArgument(
"-s",
"--setup",
default=False,
action="store_true",
help="Configure eslint for optimal development.",
)
@CommandArgument("-b", "--binary", default=None, help="Path to eslint binary.")
@CommandArgument(
"--fix",
default=False,
action="store_true",
help="Request that eslint automatically fix errors, where possible.",
)
@CommandArgument(
"extra_args",
nargs=argparse.REMAINDER,
help="Extra args that will be forwarded to eslint.",
)
def eslint(self, paths, extra_args=[], **kwargs):
self._mach_context.commands.dispatch('lint', self._mach_context,
linters=['eslint'], paths=paths,
argv=extra_args, **kwargs)
self._mach_context.commands.dispatch(
"lint",
self._mach_context,
linters=["eslint"],
paths=paths,
argv=extra_args,
**kwargs
)

Просмотреть файл

@ -7,7 +7,7 @@ from perfdocs import perfdocs
from mozlint.util import pip
here = os.path.abspath(os.path.dirname(__file__))
PERFDOCS_REQUIREMENTS_PATH = os.path.join(here, 'requirements.txt')
PERFDOCS_REQUIREMENTS_PATH = os.path.join(here, "requirements.txt")
def setup(root, **lintargs):
@ -17,6 +17,4 @@ def setup(root, **lintargs):
def lint(paths, config, logger, fix=False, **lintargs):
return perfdocs.run_perfdocs(
config, logger=logger, paths=paths, generate=fix
)
return perfdocs.run_perfdocs(config, logger=logger, paths=paths, generate=fix)

Просмотреть файл

@ -9,48 +9,46 @@ import re
from perfdocs.utils import read_yaml
from manifestparser import TestManifest
'''
"""
This file is for framework specific gatherers since manifests
might be parsed differently in each of them. The gatherers
must implement the FrameworkGatherer class.
'''
"""
class FrameworkGatherer(object):
'''
"""
Abstract class for framework gatherers.
'''
"""
def __init__(self, yaml_path, workspace_dir):
'''
"""
Generic initialization for a framework gatherer.
'''
"""
self.workspace_dir = workspace_dir
self._yaml_path = yaml_path
self._suite_list = {}
self._test_list = {}
self._manifest_path = ''
self._manifest_path = ""
self._manifest = None
def get_manifest_path(self):
'''
"""
Returns the path to the manifest based on the
manifest entry in the frameworks YAML configuration
file.
:return str: Path to the manifest.
'''
"""
if self._manifest_path:
return self._manifest_path
yaml_content = read_yaml(self._yaml_path)
self._manifest_path = os.path.join(
self.workspace_dir, yaml_content["manifest"]
)
self._manifest_path = os.path.join(self.workspace_dir, yaml_content["manifest"])
return self._manifest_path
def get_suite_list(self):
'''
"""
Each framework gatherer must return a dictionary with
the following structure. Note that the test names must
be relative paths so that issues can be correctly issued
@ -62,32 +60,29 @@ class FrameworkGatherer(object):
'testing/raptor/test2'
]
}
'''
"""
raise NotImplementedError
def _build_section_with_header(self, title, content, header_type=None):
'''
"""
Adds a section to the documentation with the title as the type mentioned
and paragraph as content mentioned.
:param title: title of the section
:param content: content of section paragraph
:param documentation: documentation object to add section to
:param type: type of the title heading
'''
heading_map = {
'H4': '-',
'H5': '^'
}
return [title, heading_map.get(type, '^') * len(title), content, '']
"""
heading_map = {"H4": "-", "H5": "^"}
return [title, heading_map.get(type, "^") * len(title), content, ""]
class RaptorGatherer(FrameworkGatherer):
'''
"""
Gatherer for the Raptor framework.
'''
"""
def get_suite_list(self):
'''
"""
Returns a dictionary containing a mapping from suites
to the tests they contain.
@ -97,7 +92,7 @@ class RaptorGatherer(FrameworkGatherer):
'testing/raptor/test2'
]
}
'''
"""
if self._suite_list:
return self._suite_list
@ -115,7 +110,7 @@ class RaptorGatherer(FrameworkGatherer):
self._suite_list[s] = []
# Get the individual test
fpath = re.sub(".*testing", "testing", test['manifest'])
fpath = re.sub(".*testing", "testing", test["manifest"])
if fpath not in self._suite_list[s]:
self._suite_list[s].append(fpath)
@ -123,12 +118,12 @@ class RaptorGatherer(FrameworkGatherer):
return self._suite_list
def _get_subtests_from_ini(self, manifest_path):
'''
"""
Returns a list of (sub)tests from an ini file containing the test definitions.
:param str manifest_path: path to the ini file
:return list: the list of the tests
'''
"""
test_manifest = TestManifest([manifest_path], strict=False)
test_list = test_manifest.active_tests(exists=False, disabled=False)
subtest_list = {subtest["name"]: subtest["manifest"] for subtest in test_list}
@ -136,7 +131,7 @@ class RaptorGatherer(FrameworkGatherer):
return subtest_list
def get_test_list(self):
'''
"""
Returns a dictionary containing the tests in every suite ini file.
:return dict: A dictionary with the following structure: {
@ -145,7 +140,7 @@ class RaptorGatherer(FrameworkGatherer):
'raptor_test2'
]
}
'''
"""
if self._test_list:
return self._test_list

Просмотреть файл

@ -18,18 +18,18 @@ frameworks = {
class Gatherer(object):
'''
"""
Gatherer produces the tree of the perfdoc's entries found
and can obtain manifest-based test lists. Used by the Verifier.
'''
"""
def __init__(self, root_dir, workspace_dir):
'''
"""
Initialzie the Gatherer.
:param str root_dir: Path to the testing directory.
:param str workspace_dir: Path to the gecko checkout.
'''
"""
self.root_dir = root_dir
self.workspace_dir = workspace_dir
self._perfdocs_tree = []
@ -38,14 +38,14 @@ class Gatherer(object):
@property
def perfdocs_tree(self):
'''
"""
Returns the perfdocs_tree, and computes it
if it doesn't exist.
:return dict: The perfdocs tree containing all
framework perfdoc entries. See `fetch_perfdocs_tree`
for information on the data structure.
'''
"""
if self._perfdocs_tree:
return self._perfdocs_tree
else:
@ -53,7 +53,7 @@ class Gatherer(object):
return self._perfdocs_tree
def fetch_perfdocs_tree(self):
'''
"""
Creates the perfdocs tree with the following structure:
[
{
@ -66,11 +66,11 @@ class Gatherer(object):
This method doesn't return anything. The result can be found in
the perfdocs_tree attribute.
'''
"""
for dirpath, dirname, files in os.walk(self.root_dir):
# Walk through the testing directory tree
if dirpath.endswith('/perfdocs'):
if dirpath.endswith("/perfdocs"):
matched = {"path": dirpath, "yml": "", "rst": "", "static": []}
for file in files:
# Add the yml/rst/static file to its key if re finds the searched file
@ -85,11 +85,14 @@ class Gatherer(object):
if all(val for val in matched.values() if not type(val) == list):
self._perfdocs_tree.append(matched)
logger.log("Found {} perfdocs directories in {}"
.format(len(self._perfdocs_tree), self.root_dir))
logger.log(
"Found {} perfdocs directories in {}".format(
len(self._perfdocs_tree), self.root_dir
)
)
def get_test_list(self, sdt_entry):
'''
"""
Use a perfdocs_tree entry to find the test list for
the framework that was found.
@ -99,28 +102,29 @@ class Gatherer(object):
'name': Name of framework,
'test_list': Test list found for the framework
}
'''
"""
# If it was computed before, return it
yaml_path = os.path.join(sdt_entry["path"], sdt_entry['yml'])
yaml_path = os.path.join(sdt_entry["path"], sdt_entry["yml"])
for entry in self._test_list:
if entry['yml_path'] == yaml_path:
if entry["yml_path"] == yaml_path:
return entry
# Set up framework entry with meta data
yaml_content = read_yaml(yaml_path)
framework = {
'yml_content': yaml_content,
'yml_path': yaml_path,
'name': yaml_content["name"],
"yml_content": yaml_content,
"yml_path": yaml_path,
"name": yaml_content["name"],
}
# Get and then store the frameworks tests
self.framework_gatherers[framework["name"]] = frameworks[framework["name"]](
framework["yml_path"],
self.workspace_dir
framework["yml_path"], self.workspace_dir
)
framework["test_list"] = self.framework_gatherers[framework["name"]].get_test_list()
framework["test_list"] = self.framework_gatherers[
framework["name"]
].get_test_list()
self._test_list.append(framework)
return framework

Просмотреть файл

@ -9,47 +9,45 @@ import shutil
import tempfile
from perfdocs.logger import PerfDocLogger
from perfdocs.utils import (
are_dirs_equal,
read_file,
read_yaml,
save_file
)
from perfdocs.utils import are_dirs_equal, read_file, read_yaml, save_file
logger = PerfDocLogger()
class Generator(object):
'''
"""
After each perfdocs directory was validated, the generator uses the templates
for each framework, fills them with the test descriptions in config and saves
the perfdocs in the form index.rst as index file and suite_name.rst for
each suite of tests in the framework.
'''
"""
def __init__(self, verifier, workspace, generate=False):
'''
"""
Initialize the Generator.
:param verifier: Verifier object. It should not be a fresh Verifier object,
but an initialized one with validate_tree() method already called
:param workspace: Path to the top-level checkout directory.
:param generate: Flag for generating the documentation
'''
"""
self._workspace = workspace
if not self._workspace:
raise Exception("PerfDocs Generator requires a workspace directory.")
# Template documents without added information reside here
self.templates_path = os.path.join(
self._workspace, 'tools', 'lint', 'perfdocs', 'templates')
self._workspace, "tools", "lint", "perfdocs", "templates"
)
self.perfdocs_path = os.path.join(
self._workspace, 'testing', 'perfdocs', 'generated')
self._workspace, "testing", "perfdocs", "generated"
)
self._generate = generate
self._verifier = verifier
self._perfdocs_tree = self._verifier._gatherer.perfdocs_tree
def build_perfdocs_from_tree(self):
'''
"""
Builds up a document for each framework that was found.
:return dict: A dictionary containing a mapping from each framework
@ -58,83 +56,84 @@ class Generator(object):
framework_name: framework_document,
...
}
'''
"""
def _append_rst_section(title, content, documentation, type=None):
'''
"""
Adds a section to the documentation with the title as the type mentioned
and paragraph as content mentioned.
:param title: title of the section
:param content: content of section paragraph
:param documentation: documentation object to add section to
:param type: type of the title heading
'''
heading_map = {
'H4': '-',
'H5': '^'
}
heading_symbol = heading_map.get(type, '-')
documentation.extend([title, heading_symbol * len(title), content, ''])
"""
heading_map = {"H4": "-", "H5": "^"}
heading_symbol = heading_map.get(type, "-")
documentation.extend([title, heading_symbol * len(title), content, ""])
# Using the verified `perfdocs_tree`, build up the documentation.
frameworks_info = {}
for framework in self._perfdocs_tree:
yaml_content = read_yaml(os.path.join(framework['path'], framework['yml']))
yaml_content = read_yaml(os.path.join(framework["path"], framework["yml"]))
rst_content = read_file(
os.path.join(framework['path'], framework['rst']),
stringify=True)
os.path.join(framework["path"], framework["rst"]), stringify=True
)
# Gather all tests and descriptions and format them into
# documentation content
documentation = []
suites = yaml_content['suites']
suites = yaml_content["suites"]
for suite_name in sorted(suites.keys()):
suite_info = suites[suite_name]
# Add the suite with an H4 heading
_append_rst_section(
suite_name.capitalize(), suite_info['description'], documentation, type="H4")
tests = suite_info.get('tests', {})
suite_name.capitalize(),
suite_info["description"],
documentation,
type="H4",
)
tests = suite_info.get("tests", {})
for test_name in sorted(tests.keys()):
documentation.extend(
self._verifier
._gatherer
.framework_gatherers[yaml_content["name"]]
.build_test_description(
test_name, tests[test_name]
)
self._verifier._gatherer.framework_gatherers[
yaml_content["name"]
].build_test_description(test_name, tests[test_name])
)
documentation.append('')
documentation.append("")
# Insert documentation into `.rst` file
framework_rst = re.sub(
r'{documentation}',
os.linesep.join(documentation),
rst_content
r"{documentation}", os.linesep.join(documentation), rst_content
)
frameworks_info[yaml_content["name"]] = {"dynamic": framework_rst, "static": []}
frameworks_info[yaml_content["name"]] = {
"dynamic": framework_rst,
"static": [],
}
# For static `.rst` file
for static_file in framework["static"]:
frameworks_info[yaml_content["name"]]["static"].append({
"file": static_file,
"content": read_file(
os.path.join(framework["path"], static_file),
stringify=True
)
})
frameworks_info[yaml_content["name"]]["static"].append(
{
"file": static_file,
"content": read_file(
os.path.join(framework["path"], static_file), stringify=True
),
}
)
return frameworks_info
def _create_temp_dir(self):
'''
"""
Create a temp directory as preparation of saving the documentation tree.
:return: str the location of perfdocs_tmpdir
'''
"""
# Build the directory that will contain the final result (a tmp dir
# that will be moved to the final location afterwards)
try:
tmpdir = tempfile.mkdtemp()
perfdocs_tmpdir = os.path.join(tmpdir, 'generated')
perfdocs_tmpdir = os.path.join(tmpdir, "generated")
os.mkdir(perfdocs_tmpdir)
except OSError as e:
logger.critical("Error creating temp file: {}".format(e))
@ -146,10 +145,10 @@ class Generator(object):
return success
def _create_perfdocs(self):
'''
"""
Creates the perfdocs documentation.
:return: str path of the temp dir it is saved in
'''
"""
# All directories that are kept in the perfdocs tree are valid,
# so use it to build up the documentation.
framework_docs = self.build_perfdocs_from_tree()
@ -161,29 +160,33 @@ class Generator(object):
frameworks.append(framework_name)
save_file(
framework_docs[framework_name]["dynamic"],
os.path.join(perfdocs_tmpdir, framework_name)
os.path.join(perfdocs_tmpdir, framework_name),
)
for static_name in framework_docs[framework_name]["static"]:
save_file(
static_name["content"],
os.path.join(perfdocs_tmpdir, static_name["file"].split(".")[0])
os.path.join(perfdocs_tmpdir, static_name["file"].split(".")[0]),
)
# Get the main page and add the framework links to it
mainpage = read_file(os.path.join(self.templates_path, "index.rst"), stringify=True)
fmt_frameworks = os.linesep.join([' * :doc:`%s`' % name for name in frameworks])
mainpage = read_file(
os.path.join(self.templates_path, "index.rst"), stringify=True
)
fmt_frameworks = os.linesep.join(
[" * :doc:`%s`" % name for name in frameworks]
)
fmt_mainpage = re.sub(r"{test_documentation}", fmt_frameworks, mainpage)
save_file(fmt_mainpage, os.path.join(perfdocs_tmpdir, 'index'))
save_file(fmt_mainpage, os.path.join(perfdocs_tmpdir, "index"))
return perfdocs_tmpdir
def _save_perfdocs(self, perfdocs_tmpdir):
'''
"""
Copies the perfdocs tree after it was saved into the perfdocs_tmpdir
:param perfdocs_tmpdir: str location of the temp dir where the
perfdocs was saved
'''
"""
# Remove the old docs and copy the new version there without
# checking if they need to be regenerated.
logger.log("Regenerating perfdocs...")
@ -194,13 +197,18 @@ class Generator(object):
try:
saved = shutil.copytree(perfdocs_tmpdir, self.perfdocs_path)
if saved:
logger.log("Documentation saved to {}/".format(
re.sub(".*testing", "testing", self.perfdocs_path)))
logger.log(
"Documentation saved to {}/".format(
re.sub(".*testing", "testing", self.perfdocs_path)
)
)
except Exception as e:
logger.critical("There was an error while saving the documentation: {}".format(e))
logger.critical(
"There was an error while saving the documentation: {}".format(e)
)
def generate_perfdocs(self):
'''
"""
Generate the performance documentation.
If `self._generate` is True, then the documentation will be regenerated
@ -212,21 +220,23 @@ class Generator(object):
docs were regenerated. If `self._generate` is False, then True will mean
that the docs should be regenerated, and False means that they do not
need to be regenerated.
'''
"""
def get_possibly_changed_files():
'''
"""
Returns files that might have been modified
(used to output a linter warning for regeneration)
:return: list - files that might have been modified
'''
"""
# Returns files that might have been modified
# (used to output a linter warning for regeneration)
files = []
for entry in self._perfdocs_tree:
files.extend(
[os.path.join(entry['path'], entry['yml']),
os.path.join(entry['path'], entry['rst'])]
[
os.path.join(entry["path"], entry["yml"]),
os.path.join(entry["path"], entry["rst"]),
]
)
return files
@ -235,8 +245,7 @@ class Generator(object):
# If they don't exist and we are not generating, then throw
# a linting error and exit.
logger.warning(
"PerfDocs need to be regenerated.",
files=get_possibly_changed_files()
"PerfDocs need to be regenerated.", files=get_possibly_changed_files()
)
return True
@ -249,5 +258,5 @@ class Generator(object):
if not are_dirs_equal(perfdocs_tmpdir, self.perfdocs_path):
logger.warning(
"PerfDocs are outdated, run ./mach lint -l perfdocs --fix` to update them.",
files=get_possibly_changed_files()
files=get_possibly_changed_files(),
)

Просмотреть файл

@ -7,16 +7,17 @@ import re
class PerfDocLogger(object):
'''
"""
Logger for the PerfDoc tooling. Handles the warnings by outputting
them into through the StructuredLogger provided by lint.
'''
"""
PATHS = []
LOGGER = None
FAILED = False
def __init__(self):
'''Initializes the PerfDocLogger.'''
"""Initializes the PerfDocLogger."""
# Set up class attributes for all logger instances
if not PerfDocLogger.LOGGER:
@ -24,28 +25,26 @@ class PerfDocLogger(object):
"Missing linting LOGGER instance for PerfDocLogger initialization"
)
if not PerfDocLogger.PATHS:
raise Exception(
"Missing PATHS for PerfDocLogger initialization"
)
raise Exception("Missing PATHS for PerfDocLogger initialization")
self.logger = PerfDocLogger.LOGGER
def log(self, msg):
'''
"""
Log an info message.
:param str msg: Message to log.
'''
"""
self.logger.info(msg)
def warning(self, msg, files):
'''
"""
Logs a validation warning message. The warning message is
used as the error message that is output in the reviewbot.
:param str msg: Message to log, it's also used as the error message
for the issue that is output by the reviewbot.
:param list/str files: The file(s) that this warning is about.
'''
"""
if type(files) != list:
files = [files]
@ -67,17 +66,17 @@ class PerfDocLogger(object):
lineno=0,
column=None,
path=fpath,
linter='perfdocs',
rule="Flawless performance docs."
linter="perfdocs",
rule="Flawless performance docs.",
)
PerfDocLogger.FAILED = True
break
def critical(self, msg):
'''
"""
Log a critical message.
:param str msg: Message to log.
'''
"""
self.logger.critical(msg)

Просмотреть файл

@ -8,7 +8,7 @@ import re
def run_perfdocs(config, logger=None, paths=None, generate=True):
'''
"""
Build up performance testing documentation dynamically by combining
text data from YAML files that reside in `perfdoc` folders
across the `testing` directory. Each directory is expected to have
@ -42,13 +42,13 @@ def run_perfdocs(config, logger=None, paths=None, generate=True):
:param list paths: The paths that are being tested. Used to filter
out errors from files outside of these paths.
:param bool generate: If true, the docs will be (re)generated.
'''
"""
from perfdocs.logger import PerfDocLogger
top_dir = os.environ.get('WORKSPACE', None)
top_dir = os.environ.get("WORKSPACE", None)
if not top_dir:
floc = os.path.abspath(__file__)
top_dir = floc.split('tools')[0]
top_dir = floc.split("tools")[0]
PerfDocLogger.LOGGER = logger
# Convert all the paths to relative ones
@ -56,7 +56,7 @@ def run_perfdocs(config, logger=None, paths=None, generate=True):
PerfDocLogger.PATHS = rel_paths
# TODO: Expand search to entire tree rather than just the testing directory
testing_dir = os.path.join(top_dir, 'testing')
testing_dir = os.path.join(top_dir, "testing")
if not os.path.exists(testing_dir):
raise Exception("Cannot locate testing directory at %s" % testing_dir)

Просмотреть файл

@ -12,50 +12,48 @@ logger = PerfDocLogger()
def save_file(file_content, path, extension="rst"):
'''
"""
Saves data into a file.
:param str path: Location and name of the file being saved
(without an extension).
:param str data: Content to write into the file.
:param str extension: Extension to save the file as.
'''
with open("{}.{}".format(path, extension), 'w') as f:
"""
with open("{}.{}".format(path, extension), "w") as f:
f.write(file_content)
def read_file(path, stringify=False):
'''
"""
Opens a file and returns its contents.
:param str path: Path to the file.
:return list: List containing the lines in the file.
'''
with open(path, 'r') as f:
"""
with open(path, "r") as f:
return f.read() if stringify else f.readlines()
def read_yaml(yaml_path):
'''
"""
Opens a YAML file and returns the contents.
:param str yaml_path: Path to the YAML to open.
:return dict: Dictionary containing the YAML content.
'''
"""
contents = {}
try:
with open(yaml_path, 'r') as f:
with open(yaml_path, "r") as f:
contents = yaml.safe_load(f)
except Exception as e:
logger.warning(
"Error opening file {}: {}".format(yaml_path, str(e)), yaml_path
)
logger.warning("Error opening file {}: {}".format(yaml_path, str(e)), yaml_path)
return contents
def are_dirs_equal(dir_1, dir_2):
'''
"""
Compare two directories to see if they are equal. Files in each
directory are assumed to be equal if their names and contents
are equal.
@ -63,7 +61,7 @@ def are_dirs_equal(dir_1, dir_2):
:param dir_1: First directory path
:param dir_2: Second directory path
:return: True if the directory trees are the same and False otherwise.
'''
"""
dirs_cmp = filecmp.dircmp(dir_1, dir_2)
if dirs_cmp.left_only or dirs_cmp.right_only or dirs_cmp.funny_files:

Просмотреть файл

@ -13,7 +13,7 @@ from perfdocs.gatherer import Gatherer
logger = PerfDocLogger()
'''
"""
Schema for the config.yml file.
Expecting a YAML file with a format such as this:
@ -31,7 +31,7 @@ suites:
tests:
wasm: "All wasm tests."
'''
"""
CONFIG_SCHEMA = {
"type": "object",
"properties": {
@ -45,46 +45,38 @@ CONFIG_SCHEMA = {
"properties": {
"tests": {
"type": "object",
"properties": {
"test_name": {"type": "string"},
}
"properties": {"test_name": {"type": "string"},},
},
"description": {"type": "string"},
},
"required": [
"description"
]
"required": ["description"],
}
}
}
},
},
},
"required": [
"name",
"manifest",
"suites"
]
"required": ["name", "manifest", "suites"],
}
class Verifier(object):
'''
"""
Verifier is used for validating the perfdocs folders/tree. In the future,
the generator will make use of this class to obtain a validated set of
descriptions that can be used to build up a document.
'''
"""
def __init__(self, root_dir, workspace_dir):
'''
"""
Initialize the Verifier.
:param str root_dir: Path to the 'testing' directory.
:param str workspace_dir: Path to the top-level checkout directory.
'''
"""
self.workspace_dir = workspace_dir
self._gatherer = Gatherer(root_dir, workspace_dir)
def validate_descriptions(self, framework_info):
'''
"""
Cross-validate the tests found in the manifests and the YAML
test definitions. This function doesn't return a valid flag. Instead,
the StructDocLogger.VALIDATION_LOG is used to determine validity.
@ -101,12 +93,12 @@ class Verifier(object):
:param dict framework_info: Contains information about the framework. See
`Gatherer.get_test_list` for information about its structure.
'''
yaml_content = framework_info['yml_content']
"""
yaml_content = framework_info["yml_content"]
# Check for any bad test/suite names in the yaml config file
global_descriptions = {}
for suite, ytests in yaml_content['suites'].items():
for suite, ytests in yaml_content["suites"].items():
# Find the suite, then check against the tests within it
if framework_info["test_list"].get(suite):
global_descriptions[suite] = []
@ -116,7 +108,7 @@ class Verifier(object):
# Suite found - now check if any tests in YAML
# definitions doesn't exist
ytests = ytests['tests']
ytests = ytests["tests"]
for test_name in ytests:
foundtest = False
for t in framework_info["test_list"][suite]:
@ -137,12 +129,14 @@ class Verifier(object):
"Could not find an existing test for {} - bad test name?".format(
test_name
),
framework_info["yml_path"]
framework_info["yml_path"],
)
else:
logger.warning(
"Could not find an existing suite for {} - bad suite name?".format(suite),
framework_info["yml_path"]
"Could not find an existing suite for {} - bad suite name?".format(
suite
),
framework_info["yml_path"],
)
# Check for any missing tests/suites
@ -151,14 +145,14 @@ class Verifier(object):
# Description doesn't exist for the suite
logger.warning(
"Missing suite description for {}".format(suite),
yaml_content['manifest']
yaml_content["manifest"],
)
continue
# If only a description is provided for the suite, assume
# that this is a suite-wide description and don't check for
# it's tests
stests = yaml_content['suites'][suite].get('tests', None)
stests = yaml_content["suites"][suite].get("tests", None)
if not stests:
continue
@ -197,67 +191,66 @@ class Verifier(object):
for test_name in new_mtests:
logger.warning(
"Could not find a test description for {}".format(test_name),
test_to_manifest[test_name]
test_to_manifest[test_name],
)
def validate_yaml(self, yaml_path):
'''
"""
Validate that the YAML file has all the fields that are
required and parse the descriptions into strings in case
some are give as relative file paths.
:param str yaml_path: Path to the YAML to validate.
:return bool: True/False => Passed/Failed Validation
'''
"""
def _get_description(desc):
'''
"""
Recompute the description in case it's a file.
'''
"""
desc_path = os.path.join(self.workspace_dir, desc)
if os.path.exists(desc_path):
with open(desc_path, 'r') as f:
with open(desc_path, "r") as f:
desc = f.readlines()
return desc
def _parse_descriptions(content):
for suite, sinfo in content.items():
desc = sinfo['description']
sinfo['description'] = _get_description(desc)
desc = sinfo["description"]
sinfo["description"] = _get_description(desc)
# It's possible that the suite has no tests and
# only a description. If they exist, then parse them.
if 'tests' in sinfo:
for test, desc in sinfo['tests'].items():
sinfo['tests'][test] = _get_description(desc)
if "tests" in sinfo:
for test, desc in sinfo["tests"].items():
sinfo["tests"][test] = _get_description(desc)
valid = False
yaml_content = read_yaml(yaml_path)
try:
jsonschema.validate(instance=yaml_content, schema=CONFIG_SCHEMA)
_parse_descriptions(yaml_content['suites'])
_parse_descriptions(yaml_content["suites"])
valid = True
except Exception as e:
logger.warning(
"YAML ValidationError: {}".format(str(e)), yaml_path
)
logger.warning("YAML ValidationError: {}".format(str(e)), yaml_path)
return valid
def validate_rst_content(self, rst_path):
'''
"""
Validate that the index file given has a {documentation} entry
so that the documentation can be inserted there.
:param str rst_path: Path to the RST file.
:return bool: True/False => Passed/Failed Validation
'''
"""
rst_content = read_file(rst_path)
# Check for a {documentation} entry in some line,
# if we can't find one, then the validation fails.
valid = False
docs_match = re.compile('.*{documentation}.*')
docs_match = re.compile(".*{documentation}.*")
for line in rst_content:
if docs_match.search(line):
valid = True
@ -265,36 +258,36 @@ class Verifier(object):
if not valid:
logger.warning(
"Cannot find a '{documentation}' entry in the given index file",
rst_path
rst_path,
)
return valid
def _check_framework_descriptions(self, item):
'''
"""
Helper method for validating descriptions
'''
"""
framework_info = self._gatherer.get_test_list(item)
self.validate_descriptions(framework_info)
def validate_tree(self):
'''
"""
Validate the `perfdocs` directory that was found.
Returns True if it is good, false otherwise.
:return bool: True/False => Passed/Failed Validation
'''
"""
found_good = 0
# For each framework, check their files and validate descriptions
for matched in self._gatherer.perfdocs_tree:
# Get the paths to the YAML and RST for this framework
matched_yml = os.path.join(matched['path'], matched['yml'])
matched_rst = os.path.join(matched['path'], matched['rst'])
matched_yml = os.path.join(matched["path"], matched["yml"])
matched_rst = os.path.join(matched["path"], matched["rst"])
_valid_files = {
"yml": self.validate_yaml(matched_yml),
"rst": self.validate_rst_content(matched_rst)
"rst": self.validate_rst_content(matched_rst),
}
# Log independently the errors found for the matched files

Просмотреть файл

@ -9,18 +9,18 @@ import sys
def parse_file(f):
with open(f, 'rb') as fh:
with open(f, "rb") as fh:
content = fh.read()
try:
return ast.parse(content)
except SyntaxError as e:
err = {
'path': f,
'message': e.msg,
'lineno': e.lineno,
'column': e.offset,
'source': e.text,
'rule': 'is-parseable',
"path": f,
"message": e.msg,
"lineno": e.lineno,
"column": e.offset,
"source": e.text,
"rule": "is-parseable",
}
print(json.dumps(err))
@ -39,7 +39,7 @@ def check_compat_py2(f):
may_have_relative_imports = False
for node in ast.walk(root):
if isinstance(node, ast.ImportFrom):
if node.module == '__future__':
if node.module == "__future__":
future_lineno = node.lineno
futures |= set(n.name for n in node.names)
else:
@ -50,19 +50,19 @@ def check_compat_py2(f):
haveprint = True
err = {
'path': f,
'lineno': future_lineno,
'column': 1,
"path": f,
"lineno": future_lineno,
"column": 1,
}
if 'absolute_import' not in futures and may_have_relative_imports:
err['rule'] = 'require absolute_import'
err['message'] = 'Missing from __future__ import absolute_import'
if "absolute_import" not in futures and may_have_relative_imports:
err["rule"] = "require absolute_import"
err["message"] = "Missing from __future__ import absolute_import"
print(json.dumps(err))
if haveprint and 'print_function' not in futures:
err['rule'] = 'require print_function'
err['message'] = 'Missing from __future__ import print_function'
if haveprint and "print_function" not in futures:
err["rule"] = "require print_function"
err["message"] = "Missing from __future__ import print_function"
print(json.dumps(err))
@ -71,14 +71,14 @@ def check_compat_py3(f):
parse_file(f)
if __name__ == '__main__':
if __name__ == "__main__":
if sys.version_info[0] == 2:
fn = check_compat_py2
else:
fn = check_compat_py3
manifest = sys.argv[1]
with open(manifest, 'r') as fh:
with open(manifest, "r") as fh:
files = fh.read().splitlines()
for f in files:

Просмотреть файл

@ -20,17 +20,19 @@ results = []
class PyCompatProcess(ProcessHandlerMixin):
def __init__(self, config, *args, **kwargs):
self.config = config
kwargs['processOutputLine'] = [self.process_line]
kwargs["processOutputLine"] = [self.process_line]
ProcessHandlerMixin.__init__(self, *args, **kwargs)
def process_line(self, line):
try:
res = json.loads(line)
except ValueError:
print('Non JSON output from {} linter: {}'.format(self.config['name'], line))
print(
"Non JSON output from {} linter: {}".format(self.config["name"], line)
)
return
res['level'] = 'error'
res["level"] = "error"
results.append(result.from_config(self.config, **res))
@ -41,27 +43,27 @@ def setup(python):
binary = find_executable(python)
if not binary:
# TODO Bootstrap python2/python3 if not available
print('warning: {} not detected, skipping py-compat check'.format(python))
print("warning: {} not detected, skipping py-compat check".format(python))
def run_linter(python, paths, config, **lintargs):
log = lintargs['log']
log = lintargs["log"]
binary = find_executable(python)
if not binary:
# If we're in automation, this is fatal. Otherwise, the warning in the
# setup method was already printed.
if 'MOZ_AUTOMATION' in os.environ:
if "MOZ_AUTOMATION" in os.environ:
return 1
return []
files = expand_exclusions(paths, config, lintargs['root'])
files = expand_exclusions(paths, config, lintargs["root"])
with mozfile.NamedTemporaryFile(mode='w') as fh:
fh.write('\n'.join(files))
with mozfile.NamedTemporaryFile(mode="w") as fh:
fh.write("\n".join(files))
fh.flush()
cmd = [binary, os.path.join(here, 'check_compat.py'), fh.name]
log.debug("Command: {}".format(' '.join(cmd)))
cmd = [binary, os.path.join(here, "check_compat.py"), fh.name]
log.debug("Command: {}".format(" ".join(cmd)))
proc = PyCompatProcess(config, cmd)
proc.run()
@ -74,16 +76,16 @@ def run_linter(python, paths, config, **lintargs):
def setuppy2(**lintargs):
return setup('python2')
return setup("python2")
def lintpy2(*args, **kwargs):
return run_linter('python2', *args, **kwargs)
return run_linter("python2", *args, **kwargs)
def setuppy3(**lintargs):
return setup('python3')
return setup("python3")
def lintpy3(*args, **kwargs):
return run_linter('python3', *args, **kwargs)
return run_linter("python3", *args, **kwargs)

Просмотреть файл

@ -15,38 +15,42 @@ from mozlint.pathutils import expand_exclusions
from mozlint.util import pip
here = os.path.abspath(os.path.dirname(__file__))
FLAKE8_REQUIREMENTS_PATH = os.path.join(here, 'flake8_requirements.txt')
FLAKE8_REQUIREMENTS_PATH = os.path.join(here, "flake8_requirements.txt")
FLAKE8_NOT_FOUND = """
Could not find flake8! Install flake8 and try again.
$ pip install -U --require-hashes -r {}
""".strip().format(FLAKE8_REQUIREMENTS_PATH)
""".strip().format(
FLAKE8_REQUIREMENTS_PATH
)
FLAKE8_INSTALL_ERROR = """
Unable to install correct version of flake8
Try to install it manually with:
$ pip install -U --require-hashes -r {}
""".strip().format(FLAKE8_REQUIREMENTS_PATH)
""".strip().format(
FLAKE8_REQUIREMENTS_PATH
)
LINE_OFFSETS = {
# continuation line under-indented for hanging indent
'E121': (-1, 2),
"E121": (-1, 2),
# continuation line missing indentation or outdented
'E122': (-1, 2),
"E122": (-1, 2),
# continuation line over-indented for hanging indent
'E126': (-1, 2),
"E126": (-1, 2),
# continuation line over-indented for visual indent
'E127': (-1, 2),
"E127": (-1, 2),
# continuation line under-indented for visual indent
'E128': (-1, 2),
"E128": (-1, 2),
# continuation line unaligned for hanging indend
'E131': (-1, 2),
"E131": (-1, 2),
# expected 1 blank line, found 0
'E301': (-1, 2),
"E301": (-1, 2),
# expected 2 blank lines, found 1
'E302': (-2, 3),
"E302": (-2, 3),
}
"""Maps a flake8 error to a lineoffset tuple.
@ -56,10 +60,10 @@ to the lineoffset property of an `Issue`.
# We use sys.prefix to find executables as that gets modified with
# virtualenv's activate_this.py, whereas sys.executable doesn't.
if platform.system() == 'Windows':
bindir = os.path.join(sys.prefix, 'Scripts')
if platform.system() == "Windows":
bindir = os.path.join(sys.prefix, "Scripts")
else:
bindir = os.path.join(sys.prefix, 'bin')
bindir = os.path.join(sys.prefix, "bin")
class NothingToLint(Exception):
@ -77,19 +81,21 @@ def setup(root, **lintargs):
def lint(paths, config, **lintargs):
from flake8.main.application import Application
log = lintargs['log']
root = lintargs['root']
config_path = os.path.join(root, '.flake8')
log = lintargs["log"]
root = lintargs["root"]
config_path = os.path.join(root, ".flake8")
if lintargs.get('fix'):
if lintargs.get("fix"):
fix_cmd = [
os.path.join(bindir, 'autopep8'),
'--global-config', config_path,
'--in-place', '--recursive',
os.path.join(bindir, "autopep8"),
"--global-config",
config_path,
"--in-place",
"--recursive",
]
if config.get('exclude'):
fix_cmd.extend(['--exclude', ','.join(config['exclude'])])
if config.get("exclude"):
fix_cmd.extend(["--exclude", ",".join(config["exclude"])])
subprocess.call(fix_cmd + paths)
@ -97,15 +103,19 @@ def lint(paths, config, **lintargs):
app = Application()
log.debug("flake8 version={}".format(app.version))
output_file = mozfile.NamedTemporaryFile(mode='r')
output_file = mozfile.NamedTemporaryFile(mode="r")
flake8_cmd = [
'--config', config_path,
'--output-file', output_file.name,
'--format', '{"path":"%(path)s","lineno":%(row)s,'
'"column":%(col)s,"rule":"%(code)s","message":"%(text)s"}',
'--filename', ','.join(['*.{}'.format(e) for e in config['extensions']]),
"--config",
config_path,
"--output-file",
output_file.name,
"--format",
'{"path":"%(path)s","lineno":%(row)s,'
'"column":%(col)s,"rule":"%(code)s","message":"%(text)s"}',
"--filename",
",".join(["*.{}".format(e) for e in config["extensions"]]),
]
log.debug("Command: {}".format(' '.join(flake8_cmd)))
log.debug("Command: {}".format(" ".join(flake8_cmd)))
orig_make_file_checker_manager = app.make_file_checker_manager
@ -119,16 +129,18 @@ def lint(paths, config, **lintargs):
tools/lint/mach_commands.py.
"""
# Ignore exclude rules if `--no-filter` was passed in.
config.setdefault('exclude', [])
if lintargs.get('use_filters', True):
config['exclude'].extend(self.options.exclude)
config.setdefault("exclude", [])
if lintargs.get("use_filters", True):
config["exclude"].extend(self.options.exclude)
# Since we use the root .flake8 file to store exclusions, we haven't
# properly filtered the paths through mozlint's `filterpaths` function
# yet. This mimics that though there could be other edge cases that are
# different. Maybe we should call `filterpaths` directly, though for
# now that doesn't appear to be necessary.
filtered = [p for p in paths if not any(p.startswith(e) for e in config['exclude'])]
filtered = [
p for p in paths if not any(p.startswith(e) for e in config["exclude"])
]
self.args = self.args + list(expand_exclusions(filtered, config, root))
@ -136,7 +148,9 @@ def lint(paths, config, **lintargs):
raise NothingToLint
return orig_make_file_checker_manager()
app.make_file_checker_manager = wrap_make_file_checker_manager.__get__(app, Application)
app.make_file_checker_manager = wrap_make_file_checker_manager.__get__(
app, Application
)
# Make sure to run from repository root so exclusions are joined to the
# repository root and not the current working directory.
@ -153,15 +167,15 @@ def lint(paths, config, **lintargs):
def process_line(line):
# Escape slashes otherwise JSON conversion will not work
line = line.replace('\\', '\\\\')
line = line.replace("\\", "\\\\")
try:
res = json.loads(line)
except ValueError:
print('Non JSON output from linter, will not be processed: {}'.format(line))
print("Non JSON output from linter, will not be processed: {}".format(line))
return
if res.get('code') in LINE_OFFSETS:
res['lineoffset'] = LINE_OFFSETS[res['code']]
if res.get("code") in LINE_OFFSETS:
res["lineoffset"] = LINE_OFFSETS[res["code"]]
results.append(result.from_config(config, **res))

Просмотреть файл

@ -10,9 +10,9 @@ pyflakes==2.2.0 \
pycodestyle==2.6.0 \
--hash=sha256:2295e7b2f6b5bd100585ebcb1f616591b652db8a741695b3d8f5d28bdc934367 \
--hash=sha256:c58a7d2815e0e8d7972bf1803331fb0152f867bd89adf8a01dfd55085434192e
setuptools==47.3.1 \
--hash=sha256:4ba6f9789ea243a6b8ba57da81f75a53494456117810436fd9277a74d1c915d1 \
--hash=sha256:843037738d1e34e8b326b5e061f474aca6ef9d7ece41329afbc8aac6195a3920
setuptools==49.1.0 \
--hash=sha256:60351853f8c093ef57224695ee989d5d074168f6b93dae000fa9996072adaba3 \
--hash=sha256:daf2e1c215f805b0ddc3b4262886bb6667ae0d4563887a8374fb766adc47c324
autopep8==1.5.3 \
--hash=sha256:60fd8c4341bab59963dafd5d2a566e94f547e660b9b396f772afe67d8481dbf0
entrypoints==0.3 \
@ -20,9 +20,9 @@ entrypoints==0.3 \
--hash=sha256:c70dd71abe5a8c85e55e12c19bd91ccfeec11a6e99044204511f9ed547d48451
toml==0.10.1 \
--hash=sha256:bda89d5935c2eac546d648028b9901107a595863cb36bae0c73ac804a9b4ce88
importlib-metadata==1.6.1 \
--hash=sha256:0505dd08068cfec00f53a74a0ad927676d7757da81b7436a6eefe4c7cf75c545 \
--hash=sha256:15ec6c0fd909e893e3a08b3a7c76ecb149122fb14b7efe1199ddd4c7c57ea958
importlib-metadata==1.7.0 \
--hash=sha256:90bb658cdbbf6d1735b6341ce708fc7024a3e14e99ffdc5783edea9f9b077f83 \
--hash=sha256:dc15b2969b4ce36305c51eebe62d418ac7791e9a157911d58bfb1f9ccd8e2070
zipp==3.1.0 \
--hash=sha256:aa36550ff0c0b7ef7fa639055d797116ee891440eac1a56f378e2d3179e0320b \
--hash=sha256:c599e4d75c98f6798c509911d08a22e6c021d074469042177c8c86fb92eefd96

Просмотреть файл

@ -16,7 +16,7 @@ from compare_locales import parser
from compare_locales.paths import TOMLParser, ProjectFiles
LOCALE = 'gecko-strings'
LOCALE = "gecko-strings"
PULL_AFTER = timedelta(days=2)
@ -24,9 +24,9 @@ PULL_AFTER = timedelta(days=2)
def lint(paths, lintconfig, **lintargs):
l10n_base = mb_util.get_state_dir()
root = lintargs['root']
exclude = lintconfig.get('exclude')
extensions = lintconfig.get('extensions')
root = lintargs["root"]
exclude = lintconfig.get("exclude")
extensions = lintconfig.get("extensions")
# Load l10n.toml configs
l10nconfigs = load_configs(lintconfig, root, l10n_base)
@ -35,9 +35,9 @@ def lint(paths, lintconfig, **lintargs):
# Only the l10n.yml will show up here, but if the l10n.toml files
# change, we also get the l10n.yml as the toml files are listed as
# support files.
if lintconfig['path'] in paths:
if lintconfig["path"] in paths:
results = validate_linter_includes(lintconfig, l10nconfigs, lintargs)
paths.remove(lintconfig['path'])
paths.remove(lintconfig["path"])
else:
results = []
@ -53,18 +53,21 @@ def lint(paths, lintconfig, **lintargs):
# explicitly excluded in the l10n.yml configuration.
# `browser/locales/en-US/firefox-l10n.js` is a good example.
all_files, _ = pathutils.filterpaths(
lintargs['root'], all_files, lintconfig['include'],
exclude=exclude, extensions=extensions
lintargs["root"],
all_files,
lintconfig["include"],
exclude=exclude,
extensions=extensions,
)
# These should be excluded in l10n.yml
skips = {p for p in all_files if not parser.hasParser(p)}
results.extend(
result.from_config(
lintconfig,
level='warning',
level="warning",
path=path,
message="file format not supported in compare-locales"
)
message="file format not supported in compare-locales",
)
for path in skips
)
all_files = [p for p in all_files if p not in skips]
@ -79,7 +82,7 @@ def lint(paths, lintconfig, **lintargs):
def gecko_strings_setup(**lint_args):
gs = mozpath.join(mb_util.get_state_dir(), LOCALE)
marker = mozpath.join(gs, '.hg', 'l10n_pull_marker')
marker = mozpath.join(gs, ".hg", "l10n_pull_marker")
try:
last_pull = datetime.fromtimestamp(os.stat(marker).st_mtime)
skip_clone = datetime.now() < last_pull + PULL_AFTER
@ -87,27 +90,21 @@ def gecko_strings_setup(**lint_args):
skip_clone = False
if skip_clone:
return
hg = mozversioncontrol.get_tool_path('hg')
hg = mozversioncontrol.get_tool_path("hg")
mozversioncontrol.repoupdate.update_mercurial_repo(
hg,
'https://hg.mozilla.org/l10n/gecko-strings',
gs
hg, "https://hg.mozilla.org/l10n/gecko-strings", gs
)
with open(marker, 'w') as fh:
with open(marker, "w") as fh:
fh.flush()
def load_configs(lintconfig, root, l10n_base):
'''Load l10n configuration files specified in the linter configuration.'''
"""Load l10n configuration files specified in the linter configuration."""
configs = []
env = {
'l10n_base': l10n_base
}
for toml in lintconfig['l10n_configs']:
env = {"l10n_base": l10n_base}
for toml in lintconfig["l10n_configs"]:
cfg = TOMLParser().parse(
mozpath.join(root, toml),
env=env,
ignore_missing_includes=True
mozpath.join(root, toml), env=env, ignore_missing_includes=True
)
cfg.set_locales([LOCALE], deep=True)
configs.append(cfg)
@ -115,9 +112,9 @@ def load_configs(lintconfig, root, l10n_base):
def validate_linter_includes(lintconfig, l10nconfigs, lintargs):
'''Check l10n.yml config against l10n.toml configs.'''
"""Check l10n.yml config against l10n.toml configs."""
reference_paths = set(
mozpath.relpath(p['reference'].prefix, lintargs['root'])
mozpath.relpath(p["reference"].prefix, lintargs["root"])
for project in l10nconfigs
for config in project.configs
for p in config.paths
@ -125,24 +122,29 @@ def validate_linter_includes(lintconfig, l10nconfigs, lintargs):
# Just check for directories
reference_dirs = sorted(p for p in reference_paths if os.path.isdir(p))
missing_in_yml = [
refd for refd in reference_dirs if refd not in lintconfig['include']
refd for refd in reference_dirs if refd not in lintconfig["include"]
]
# These might be subdirectories in the config, though
missing_in_yml = [
d for d in missing_in_yml
if not any(d.startswith(parent + '/') for parent in lintconfig['include'])
d
for d in missing_in_yml
if not any(d.startswith(parent + "/") for parent in lintconfig["include"])
]
if missing_in_yml:
dirs = ', '.join(missing_in_yml)
return [result.from_config(
lintconfig, path=lintconfig['path'],
message='l10n.yml out of sync with l10n.toml, add: ' + dirs
)]
dirs = ", ".join(missing_in_yml)
return [
result.from_config(
lintconfig,
path=lintconfig["path"],
message="l10n.yml out of sync with l10n.toml, add: " + dirs,
)
]
return []
class MozL10nLinter(L10nLinter):
'''Subclass linter to generate the right result type.'''
"""Subclass linter to generate the right result type."""
def __init__(self, lintconfig):
super(MozL10nLinter, self).__init__()
self.lintconfig = lintconfig

Просмотреть файл

@ -15,20 +15,24 @@ from mozlint.pathutils import expand_exclusions
from mozlint.util import pip
here = os.path.abspath(os.path.dirname(__file__))
PYLINT_REQUIREMENTS_PATH = os.path.join(here, 'pylint_requirements.txt')
PYLINT_REQUIREMENTS_PATH = os.path.join(here, "pylint_requirements.txt")
PYLINT_NOT_FOUND = """
Could not find pylint! Install pylint and try again.
$ pip install -U --require-hashes -r {}
""".strip().format(PYLINT_REQUIREMENTS_PATH)
""".strip().format(
PYLINT_REQUIREMENTS_PATH
)
PYLINT_INSTALL_ERROR = """
Unable to install correct version of pylint
Try to install it manually with:
$ pip install -U --require-hashes -r {}
""".strip().format(PYLINT_REQUIREMENTS_PATH)
""".strip().format(
PYLINT_REQUIREMENTS_PATH
)
class PylintProcess(ProcessHandler):
@ -65,9 +69,6 @@ def run_process(config, cmd):
return proc.output
PYLINT_FORMAT_REGEX = re.compile(r'(.*):(.*): [(.*)] (.*)$')
def parse_issues(log, config, issues_json, path):
results = []
@ -92,25 +93,27 @@ def parse_issues(log, config, issues_json, path):
def lint(paths, config, **lintargs):
log = lintargs['log']
log = lintargs["log"]
binary = get_pylint_binary()
log = lintargs['log']
paths = list(expand_exclusions(paths, config, lintargs['root']))
log = lintargs["log"]
paths = list(expand_exclusions(paths, config, lintargs["root"]))
cmd_args = [binary]
results = []
# list from https://code.visualstudio.com/docs/python/linting#_pylint
# And ignore a bit more elements
cmd_args += ["-fjson",
"--disable=all",
"--enable=F,E,unreachable,duplicate-key,unnecessary-semicolon,global-variable-not-assigned,unused-variable,binary-op-exception,bad-format-string,anomalous-backslash-in-string,bad-open-mode", # NOQA: E501
"--disable=import-error,no-member"]
cmd_args += [
"-fjson",
"--disable=all",
"--enable=F,E,unreachable,duplicate-key,unnecessary-semicolon,global-variable-not-assigned,unused-variable,binary-op-exception,bad-format-string,anomalous-backslash-in-string,bad-open-mode", # NOQA: E501
"--disable=import-error,no-member",
]
base_command = cmd_args + paths
log.debug("Command: {}".format(' '.join(cmd_args)))
log.debug("Command: {}".format(" ".join(cmd_args)))
output = " ".join(run_process(config, base_command))
results = parse_issues(log, config, str(output), [])

Просмотреть файл

@ -19,7 +19,7 @@ from mozfile import which
# (4, 'severe')
abspath = os.path.abspath(os.path.dirname(__file__))
rstcheck_requirements_file = os.path.join(abspath, 'requirements.txt')
rstcheck_requirements_file = os.path.join(abspath, "requirements.txt")
results = []
@ -27,15 +27,19 @@ RSTCHECK_NOT_FOUND = """
Could not find rstcheck! Install rstcheck and try again.
$ pip install -U --require-hashes -r {}
""".strip().format(rstcheck_requirements_file)
""".strip().format(
rstcheck_requirements_file
)
RSTCHECK_INSTALL_ERROR = """
Unable to install required version of rstcheck
Try to install it manually with:
$ pip install -U --require-hashes -r {}
""".strip().format(rstcheck_requirements_file)
""".strip().format(
rstcheck_requirements_file
)
RSTCHECK_FORMAT_REGEX = re.compile(r'(.*):(.*): \(.*/([0-9]*)\) (.*)$')
RSTCHECK_FORMAT_REGEX = re.compile(r"(.*):(.*): \(.*/([0-9]*)\) (.*)$")
def setup(root, **lintargs):
@ -49,11 +53,11 @@ def get_rstcheck_binary():
Returns the path of the first rstcheck binary available
if not found returns None
"""
binary = os.environ.get('RSTCHECK')
binary = os.environ.get("RSTCHECK")
if binary:
return binary
return which('rstcheck')
return which("rstcheck")
def parse_with_split(errors):
@ -64,24 +68,21 @@ def parse_with_split(errors):
def lint(files, config, **lintargs):
log = lintargs['log']
config['root'] = lintargs['root']
paths = expand_exclusions(files, config, config['root'])
log = lintargs["log"]
config["root"] = lintargs["root"]
paths = expand_exclusions(files, config, config["root"])
paths = list(paths)
chunk_size = 50
binary = get_rstcheck_binary()
rstcheck_options = "--ignore-language=cpp,json"
while paths:
cmdargs = [
which('python'),
binary,
rstcheck_options,
] + paths[:chunk_size]
log.debug("Command: {}".format(' '.join(cmdargs)))
cmdargs = [which("python"), binary, rstcheck_options,] + paths[:chunk_size]
log.debug("Command: {}".format(" ".join(cmdargs)))
proc = subprocess.Popen(
cmdargs, stdout=subprocess.PIPE,
cmdargs,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=os.environ,
universal_newlines=True,
@ -91,10 +92,10 @@ def lint(files, config, **lintargs):
if len(errors) > 1:
filename, lineno, level, message = parse_with_split(errors)
res = {
'path': filename,
'message': message,
'lineno': lineno,
'level': "error" if int(level) >= 2 else "warning",
"path": filename,
"message": message,
"lineno": lineno,
"level": "error" if int(level) >= 2 else "warning",
}
results.append(result.from_config(config, **res))
paths = paths[chunk_size:]

Просмотреть файл

@ -117,20 +117,18 @@ def get_rustfmt_version(binary):
"""
try:
output = subprocess.check_output(
[binary, "--version"],
stderr=subprocess.STDOUT,
universal_newlines=True,
[binary, "--version"], stderr=subprocess.STDOUT, universal_newlines=True,
)
except subprocess.CalledProcessError as e:
output = e.output
version = re.findall(r'\d.\d+.\d+', output)[0]
version = re.findall(r"\d.\d+.\d+", output)[0]
return StrictVersion(version)
def lint(paths, config, fix=None, **lintargs):
log = lintargs['log']
paths = list(expand_exclusions(paths, config, lintargs['root']))
log = lintargs["log"]
paths = list(expand_exclusions(paths, config, lintargs["root"]))
# An empty path array can occur when the user passes in `-n`. If we don't
# return early in this case, rustfmt will attempt to read stdin and hang.
@ -145,7 +143,7 @@ def lint(paths, config, fix=None, **lintargs):
return 1
return []
min_version_str = config.get('min_rustfmt_version')
min_version_str = config.get("min_rustfmt_version")
min_version = StrictVersion(min_version_str)
actual_version = get_rustfmt_version(binary)
log.debug(
@ -162,7 +160,7 @@ def lint(paths, config, fix=None, **lintargs):
if not fix:
cmd_args.append("--check")
base_command = cmd_args + paths
log.debug("Command: {}".format(' '.join(cmd_args)))
log.debug("Command: {}".format(" ".join(cmd_args)))
output = run_process(config, base_command)
if fix:

Просмотреть файл

@ -24,22 +24,21 @@ results = []
class ShellcheckProcess(LintProcess):
def process_line(self, line):
try:
data = json.loads(line)
except JSONDecodeError as e:
print('Unable to load shellcheck output ({}): {}'.format(e, line))
print("Unable to load shellcheck output ({}): {}".format(e, line))
return
for entry in data:
res = {
'path': entry['file'],
'message': entry['message'],
'level': 'error',
'lineno': entry['line'],
'column': entry['column'],
'rule': entry['code'],
"path": entry["file"],
"message": entry["message"],
"level": "error",
"lineno": entry["line"],
"column": entry["column"],
"rule": entry["code"],
}
results.append(result.from_config(self.config, **res))
@ -54,22 +53,22 @@ def determine_shell_from_script(path):
#!/bin/bash
#!/usr/bin/env bash
"""
with open(path, 'r') as f:
with open(path, "r") as f:
head = f.readline()
if not head.startswith('#!'):
if not head.startswith("#!"):
return
# allow for parameters to the shell
shebang = head.split()[0]
# if the first entry is a variant of /usr/bin/env
if 'env' in shebang:
if "env" in shebang:
shebang = head.split()[1]
if shebang.endswith('sh'):
if shebang.endswith("sh"):
# Strip first to avoid issues with #!bash
return shebang.strip('#!').split('/')[-1]
return shebang.strip("#!").split("/")[-1]
# make it clear we return None, rather than fall through.
return
@ -77,19 +76,22 @@ def determine_shell_from_script(path):
def find_shell_scripts(config, paths):
found = dict()
root = config['root']
exclude = [mozpath.join(root, e) for e in config.get('exclude', [])]
root = config["root"]
exclude = [mozpath.join(root, e) for e in config.get("exclude", [])]
if config.get('extensions'):
pattern = '**/*.{}'.format(config.get('extensions')[0])
if config.get("extensions"):
pattern = "**/*.{}".format(config.get("extensions")[0])
else:
pattern = '**/*.sh'
pattern = "**/*.sh"
files = []
for path in paths:
path = mozpath.normsep(path)
ignore = [e[len(path):].lstrip('/') for e in exclude
if mozpath.commonprefix((path, e)) == path]
ignore = [
e[len(path) :].lstrip("/")
for e in exclude
if mozpath.commonprefix((path, e)) == path
]
finder = FileFinder(path, ignore=ignore)
files.extend([os.path.join(path, p) for p, f in finder.find(pattern)])
@ -114,34 +116,34 @@ def get_shellcheck_binary():
Returns the path of the first shellcheck binary available
if not found returns None
"""
binary = os.environ.get('SHELLCHECK')
binary = os.environ.get("SHELLCHECK")
if binary:
return binary
return which('shellcheck')
return which("shellcheck")
def lint(paths, config, **lintargs):
log = lintargs['log']
log = lintargs["log"]
binary = get_shellcheck_binary()
if not binary:
print(SHELLCHECK_NOT_FOUND)
if 'MOZ_AUTOMATION' in os.environ:
if "MOZ_AUTOMATION" in os.environ:
return 1
return []
config['root'] = lintargs['root']
config["root"] = lintargs["root"]
files = find_shell_scripts(config, paths)
base_command = [binary, '-f', 'json']
if config.get('excludecodes'):
base_command.extend(['-e', ','.join(config.get('excludecodes'))])
base_command = [binary, "-f", "json"]
if config.get("excludecodes"):
base_command.extend(["-e", ",".join(config.get("excludecodes"))])
for f in files:
cmd = list(base_command)
cmd.extend(['-s', files[f], f])
cmd.extend(["-s", files[f], f])
log.debug("Command: {}".format(cmd))
run_process(config, cmd)
return results

Просмотреть файл

@ -18,46 +18,49 @@ from mozlint.util import pip
from mozlint.util.implementation import LintProcess
here = os.path.abspath(os.path.dirname(__file__))
CODESPELL_REQUIREMENTS_PATH = os.path.join(here, 'codespell_requirements.txt')
CODESPELL_REQUIREMENTS_PATH = os.path.join(here, "codespell_requirements.txt")
CODESPELL_NOT_FOUND = """
Could not find codespell! Install codespell and try again.
$ pip install -U --require-hashes -r {}
""".strip().format(CODESPELL_REQUIREMENTS_PATH)
""".strip().format(
CODESPELL_REQUIREMENTS_PATH
)
CODESPELL_INSTALL_ERROR = """
Unable to install correct version of codespell
Try to install it manually with:
$ pip install -U --require-hashes -r {}
""".strip().format(CODESPELL_REQUIREMENTS_PATH)
""".strip().format(
CODESPELL_REQUIREMENTS_PATH
)
results = []
CODESPELL_FORMAT_REGEX = re.compile(r'(.*):(.*): (.*) ==> (.*)$')
CODESPELL_FORMAT_REGEX = re.compile(r"(.*):(.*): (.*) ==> (.*)$")
class CodespellProcess(LintProcess):
def process_line(self, line):
try:
match = CODESPELL_FORMAT_REGEX.match(line)
abspath, line, typo, correct = match.groups()
except AttributeError:
print('Unable to match regex against output: {}'.format(line))
print("Unable to match regex against output: {}".format(line))
return
# Ignore false positive like aParent (which would be fixed to apparent)
# See https://github.com/lucasdemarchi/codespell/issues/314
m = re.match(r'^[a-z][A-Z][a-z]*', typo)
m = re.match(r"^[a-z][A-Z][a-z]*", typo)
if m:
return
res = {
'path': abspath,
'message': typo.strip() + " ==> " + correct,
'level': 'error',
'lineno': line,
"path": abspath,
"message": typo.strip() + " ==> " + correct,
"level": "error",
"lineno": line,
}
results.append(result.from_config(self.config, **res))
@ -76,11 +79,11 @@ def get_codespell_binary():
Returns the path of the first codespell binary available
if not found returns None
"""
binary = os.environ.get('CODESPELL')
binary = os.environ.get("CODESPELL")
if binary:
return binary
return which('codespell')
return which("codespell")
def setup(root, **lintargs):
@ -90,36 +93,38 @@ def setup(root, **lintargs):
def lint(paths, config, fix=None, **lintargs):
log = lintargs['log']
log = lintargs["log"]
binary = get_codespell_binary()
if not binary:
print(CODESPELL_NOT_FOUND)
if 'MOZ_AUTOMATION' in os.environ:
if "MOZ_AUTOMATION" in os.environ:
return 1
return []
config['root'] = lintargs['root']
config["root"] = lintargs["root"]
skip_files = ''
if 'exclude' in config:
skip_files = '--skip=*.dic,{}'.format(','.join(config['exclude']))
skip_files = ""
if "exclude" in config:
skip_files = "--skip=*.dic,{}".format(",".join(config["exclude"]))
exclude_list = os.path.join(here, 'exclude-list.txt')
cmd_args = [which('python'),
binary,
'--disable-colors',
# Silence some warnings:
# 1: disable warnings about wrong encoding
# 2: disable warnings about binary file
# 4: shut down warnings about automatic fixes
# that were disabled in dictionary.
'--quiet-level=7',
'--ignore-words=' + exclude_list,
skip_files]
exclude_list = os.path.join(here, "exclude-list.txt")
cmd_args = [
which("python"),
binary,
"--disable-colors",
# Silence some warnings:
# 1: disable warnings about wrong encoding
# 2: disable warnings about binary file
# 4: shut down warnings about automatic fixes
# that were disabled in dictionary.
"--quiet-level=7",
"--ignore-words=" + exclude_list,
skip_files,
]
if fix:
cmd_args.append('--write-changes')
log.debug("Command: {}".format(' '.join(cmd_args)))
cmd_args.append("--write-changes")
log.debug("Command: {}".format(" ".join(cmd_args)))
base_command = cmd_args + paths

Просмотреть файл

@ -16,8 +16,8 @@ results = []
def lint(files, config, **kwargs):
log = kwargs['log']
tests_dir = os.path.join(kwargs['root'], 'testing', 'web-platform', 'tests')
log = kwargs["log"]
tests_dir = os.path.join(kwargs["root"], "testing", "web-platform", "tests")
def process_line(line):
try:
@ -26,27 +26,34 @@ def lint(files, config, **kwargs):
return
data["level"] = "error"
data["path"] = os.path.relpath(os.path.join(tests_dir, data["path"]), kwargs['root'])
data["path"] = os.path.relpath(
os.path.join(tests_dir, data["path"]), kwargs["root"]
)
data.setdefault("lineno", 0)
results.append(result.from_config(config, **data))
if files == [tests_dir]:
print("No specific files specified, running the full wpt lint"
" (this is slow)", file=sys.stderr)
print(
"No specific files specified, running the full wpt lint" " (this is slow)",
file=sys.stderr,
)
files = ["--all"]
cmd = ['python2', os.path.join(tests_dir, 'wpt'), 'lint', '--json'] + files
log.debug("Command: {}".format(' '.join(cmd)))
cmd = ["python2", os.path.join(tests_dir, "wpt"), "lint", "--json"] + files
log.debug("Command: {}".format(" ".join(cmd)))
proc = ProcessHandler(cmd, env=os.environ, processOutputLine=process_line,
universal_newlines=True)
proc = ProcessHandler(
cmd, env=os.environ, processOutputLine=process_line, universal_newlines=True
)
proc.run()
try:
proc.wait()
if proc.returncode != 0:
results.append(
result.from_config(config,
message="Lint process exited with return code %s" %
proc.returncode))
result.from_config(
config,
message="Lint process exited with return code %s" % proc.returncode,
)
)
except KeyboardInterrupt:
proc.kill()

Просмотреть файл

@ -14,28 +14,28 @@ from mozlint.pathutils import get_ancestors_by_name
from mozlint.util.implementation import LintProcess
YAMLLINT_FORMAT_REGEX = re.compile('(.*):(.*):(.*): \[(error|warning)\] (.*) \((.*)\)$')
YAMLLINT_FORMAT_REGEX = re.compile("(.*):(.*):(.*): \[(error|warning)\] (.*) \((.*)\)$")
results = []
class YAMLLintProcess(LintProcess):
def process_line(self, line):
try:
match = YAMLLINT_FORMAT_REGEX.match(line)
abspath, line, col, level, message, code = match.groups()
except AttributeError:
print('Unable to match yaml regex against output: {}'.format(line))
print("Unable to match yaml regex against output: {}".format(line))
return
res = {'path': os.path.relpath(str(abspath), self.config['root']),
'message': str(message),
'level': 'error',
'lineno': line,
'column': col,
'rule': code,
}
res = {
"path": os.path.relpath(str(abspath), self.config["root"]),
"message": str(message),
"level": "error",
"lineno": line,
"column": col,
"rule": code,
}
results.append(result.from_config(self.config, **res))
@ -45,19 +45,21 @@ def get_yamllint_binary(mc_root):
Returns the path of the first yamllint binary available
if not found returns None
"""
binary = os.environ.get('YAMLLINT')
binary = os.environ.get("YAMLLINT")
if binary:
return binary
# yamllint is vendored in mozilla-central: let's use this
# if no environment variable is found.
return os.path.join(mc_root, 'third_party', 'python', 'yamllint', 'yamllint')
return os.path.join(mc_root, "third_party", "python", "yamllint", "yamllint")
def get_yamllint_version(binary):
return subprocess.check_output([which('python'), binary, "--version"],
universal_newlines=True,
stderr=subprocess.STDOUT)
return subprocess.check_output(
[which("python"), binary, "--version"],
universal_newlines=True,
stderr=subprocess.STDOUT,
)
def _run_pip(*args):
@ -65,8 +67,7 @@ def _run_pip(*args):
Helper function that runs pip with subprocess
"""
try:
subprocess.check_output(['pip'] + list(args),
stderr=subprocess.STDOUT)
subprocess.check_output(["pip"] + list(args), stderr=subprocess.STDOUT)
return True
except subprocess.CalledProcessError as e:
print(e.output)
@ -86,37 +87,35 @@ def gen_yamllint_args(cmdargs, paths=None, conf_file=None):
args = cmdargs[:]
if isinstance(paths, str):
paths = [paths]
if conf_file and conf_file != 'default':
return args + ['-c', conf_file] + paths
if conf_file and conf_file != "default":
return args + ["-c", conf_file] + paths
return args + paths
def lint(files, config, **lintargs):
log = lintargs['log']
log = lintargs["log"]
binary = get_yamllint_binary(lintargs['root'])
binary = get_yamllint_binary(lintargs["root"])
log.debug("Version: {}".format(get_yamllint_version(binary)))
cmdargs = [
which('python'),
binary,
'-f', 'parsable'
]
log.debug("Command: {}".format(' '.join(cmdargs)))
cmdargs = [which("python"), binary, "-f", "parsable"]
log.debug("Command: {}".format(" ".join(cmdargs)))
config = config.copy()
config['root'] = lintargs['root']
config["root"] = lintargs["root"]
# Run any paths with a .yamllint file in the directory separately so
# it gets picked up. This means only .yamllint files that live in
# directories that are explicitly included will be considered.
paths_by_config = defaultdict(list)
for f in files:
conf_files = get_ancestors_by_name('.yamllint', f, config['root'])
paths_by_config[conf_files[0] if conf_files else 'default'].append(f)
conf_files = get_ancestors_by_name(".yamllint", f, config["root"])
paths_by_config[conf_files[0] if conf_files else "default"].append(f)
for conf_file, paths in paths_by_config.items():
run_process(config, gen_yamllint_args(cmdargs, conf_file=conf_file, paths=paths))
run_process(
config, gen_yamllint_args(cmdargs, conf_file=conf_file, paths=paths)
)
return results