New ContextGenerator stuff
This commit is contained in:
Коммит
58a1be5429
|
@ -47,7 +47,7 @@ def test_results_aggregator():
|
|||
l10n._aggregate_results(err,
|
||||
[{"type":"unchanged_entity",
|
||||
"entities":0,
|
||||
"unchanged_entities":["asdf","ghjk"],
|
||||
"unchanged_entities":[("asdf", 1), ("ghjk", 1)],
|
||||
"filename":"foo.bar"},
|
||||
{"type":"total_entities",
|
||||
"entities":100}],
|
||||
|
@ -58,7 +58,7 @@ def test_results_aggregator():
|
|||
l10n._aggregate_results(err,
|
||||
[{"type":"unchanged_entity",
|
||||
"entities":50,
|
||||
"unchanged_entities":["asdf","ghjk"],
|
||||
"unchanged_entities":[("asdf", 1), ("ghjk", 1)],
|
||||
"filename":"foo.bar"},
|
||||
{"type":"file_entity_count",
|
||||
"filename":"foo.bar",
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
from validator.contextgenerator import ContextGenerator
|
||||
|
||||
class ChromeManifest(object):
|
||||
"""This class enables convenient reading and searching of
|
||||
|
@ -7,6 +8,7 @@ class ChromeManifest(object):
|
|||
"Reads an ntriples style chrome.manifest file"
|
||||
|
||||
self.data = data
|
||||
self.context = ContextGenerator(data)
|
||||
self.lines = data.split("\n")
|
||||
|
||||
# Extract the data from the triples in the manifest
|
||||
|
|
|
@ -0,0 +1,52 @@
|
|||
import os
|
||||
from StringIO import StringIO
|
||||
|
||||
# The context generator creates a line-by-line mapping of all files that are
|
||||
# validated. It will then use that to help produce useful bits of code for
|
||||
# errors, warnings, and the like.
|
||||
|
||||
class ContextGenerator:
|
||||
|
||||
def __init__(self, data=None):
|
||||
if isinstance(data, StringIO):
|
||||
data = data.getvalue()
|
||||
|
||||
self.data = data.split("\n")
|
||||
|
||||
def get_context(self, line):
|
||||
"Returns a tuple containing the context for a line"
|
||||
|
||||
# If there is no data in the file, there can be no context.
|
||||
datalen = len(self.data)
|
||||
if datalen <= line:
|
||||
return None
|
||||
|
||||
build = [self.data[line]]
|
||||
|
||||
# Add surrounding lines if they're available.
|
||||
if line > 0:
|
||||
build.insert(0, self.data[line - 1])
|
||||
if line < datalen - 1:
|
||||
build.append(self.data[line + 1])
|
||||
|
||||
for i in range(len(build)):
|
||||
# This erases all leading whitespace. Perhaps we should keep it?
|
||||
build[i] = build[i].strip()
|
||||
# Truncate each line to 140-ish characters
|
||||
if len(build[i]) >= 140:
|
||||
build[i] = "%s ..." % build[i][:140]
|
||||
|
||||
# Return the final output as a tuple.
|
||||
return tuple(build)
|
||||
|
||||
def get_line(self, position):
|
||||
"Returns the line that the given string position would be found on"
|
||||
|
||||
count = len(self.data[0])
|
||||
line = 1
|
||||
while count < position:
|
||||
count += len(self.data[line])
|
||||
line += 1
|
||||
|
||||
return line
|
||||
|
|
@ -1,8 +1,8 @@
|
|||
import json
|
||||
import platform
|
||||
import uuid
|
||||
|
||||
import json
|
||||
|
||||
from contextgenerator import ContextGenerator
|
||||
if platform.system() != "Windows":
|
||||
from outputhandlers.shellcolors import OutputHandler
|
||||
else: # pragma: no cover
|
||||
|
@ -43,7 +43,8 @@ class ErrorBundle(object):
|
|||
self.handler = OutputHandler(pipe, no_color)
|
||||
|
||||
|
||||
def error(self, err_id, error, description='', filename='', line=0):
|
||||
def error(self, err_id, error,
|
||||
description='', filename='', line=0, context=None):
|
||||
"Stores an error message for the validation process"
|
||||
self._save_message(self.errors,
|
||||
"errors",
|
||||
|
@ -51,10 +52,12 @@ class ErrorBundle(object):
|
|||
"message": error,
|
||||
"description": description,
|
||||
"file": filename,
|
||||
"line": line})
|
||||
"line": line},
|
||||
context=context)
|
||||
return self
|
||||
|
||||
def warning(self, err_id, warning, description='', filename='', line=0):
|
||||
def warning(self, err_id, warning,
|
||||
description='', filename='', line=0, context=None):
|
||||
"Stores a warning message for the validation process"
|
||||
self._save_message(self.warnings,
|
||||
"warnings",
|
||||
|
@ -62,14 +65,17 @@ class ErrorBundle(object):
|
|||
"message": warning,
|
||||
"description": description,
|
||||
"file": filename,
|
||||
"line": line})
|
||||
"line": line},
|
||||
context=context)
|
||||
return self
|
||||
|
||||
# NOTE : This function WILL NOT support contexts since it's deprecated.
|
||||
def info(self, err_id, info, description="", filename="", line=0):
|
||||
"An alias for notice"
|
||||
self.notice(err_id, info, description, filename, line)
|
||||
|
||||
def notice(self, err_id, notice, description="", filename="", line=0):
|
||||
def notice(self, err_id, notice,
|
||||
description="", filename="", line=0, context=None):
|
||||
"Stores an informational message about the validation"
|
||||
self._save_message(self.notices,
|
||||
"notices",
|
||||
|
@ -77,15 +83,24 @@ class ErrorBundle(object):
|
|||
"message": notice,
|
||||
"description": description,
|
||||
"file": filename,
|
||||
"line": line})
|
||||
"line": line},
|
||||
context=context)
|
||||
return self
|
||||
|
||||
def _save_message(self, stack, type_, message):
|
||||
def _save_message(self, stack, type_, message, context=None):
|
||||
"Stores a message in the appropriate message stack."
|
||||
|
||||
uid = uuid.uuid1().hex
|
||||
|
||||
message["uid"] = uid
|
||||
|
||||
# Get the context for the message (if there's a context available)
|
||||
if context is not None:
|
||||
if isinstance(context, tuple):
|
||||
message["context"] = context
|
||||
else:
|
||||
message["context"] = context.get_context(message["line"])
|
||||
|
||||
stack.append(message)
|
||||
|
||||
# Mark the tier that the error occurred at
|
||||
|
@ -194,9 +209,10 @@ class ErrorBundle(object):
|
|||
# right carets.
|
||||
callback(message["id"],
|
||||
message["message"],
|
||||
message["description"],
|
||||
trace,
|
||||
message["line"])
|
||||
description=message["description"],
|
||||
filename=trace,
|
||||
line=message["line"],
|
||||
context=message["context"])
|
||||
|
||||
|
||||
def _clean_description(self, message, json=False):
|
||||
|
@ -331,9 +347,9 @@ class ErrorBundle(object):
|
|||
self._clean_message(message["description"]))
|
||||
|
||||
# Show the user what tier we're on
|
||||
verbose_output.append("\tTier: %d" % message["tier"])
|
||||
verbose_output.append("\tTier:\t%d" % message["tier"])
|
||||
|
||||
# If file information is availe, output that as well.
|
||||
# If file information is available, output that as well.
|
||||
files = message["file"]
|
||||
if files is not None and files != "":
|
||||
fmsg = "\tFile:\t%s"
|
||||
|
@ -350,6 +366,12 @@ class ErrorBundle(object):
|
|||
if message["line"]:
|
||||
verbose_output.append("\tLine:\t%s" % message["line"])
|
||||
|
||||
if "context" in message and message["context"]:
|
||||
verbose_output.append("\tContext:")
|
||||
verbose_output.extend(["\t>\t%s" % x
|
||||
for x
|
||||
in message["context"]])
|
||||
|
||||
# Stick it in with the standard items.
|
||||
output.append("\n")
|
||||
output.append("\n".join(verbose_output))
|
||||
|
|
|
@ -96,6 +96,7 @@ def test_conduittoolbar(err, package_contents=None, xpi_manager=None):
|
|||
"Detected Conduit toolbar.",
|
||||
"'ebtoolbarstyle' found in chrome.manifest",
|
||||
"chrome.manifest",
|
||||
data["line"])
|
||||
line=data["line"],
|
||||
context=chrome.context)
|
||||
|
||||
|
|
@ -38,7 +38,8 @@ def test_xpcnativewrappers(err, package_contents=None, xpi_package=None):
|
|||
"""chrome.manifest files are not allowed to contain
|
||||
xpcnativewrappers directives.""",
|
||||
"chrome.manifest",
|
||||
triple["line"])
|
||||
line=triple["line"],
|
||||
context=chrome.context)
|
||||
|
||||
|
||||
@decorator.register_test(tier=2)
|
||||
|
@ -55,15 +56,15 @@ def test_packed_packages(err, package_contents=None, xpi_package=None):
|
|||
continue
|
||||
|
||||
if name.split("/")[-1].startswith("._"):
|
||||
err.info(("testcases_content",
|
||||
"test_packed_packages",
|
||||
"macintosh_junk"),
|
||||
"Garbage file found.",
|
||||
["""A junk file has been detected. It may cause
|
||||
problems with proper operation of the add-on down the
|
||||
road.""",
|
||||
"It is recommended that you delete the file"],
|
||||
name)
|
||||
err.notice(("testcases_content",
|
||||
"test_packed_packages",
|
||||
"macintosh_junk"),
|
||||
"Garbage file found.",
|
||||
["""A junk file has been detected. It may cause
|
||||
problems with proper operation of the add-on down the
|
||||
road.""",
|
||||
"It is recommended that you delete the file"],
|
||||
name)
|
||||
|
||||
processed = False
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ class MockBundler:
|
|||
class Traverser:
|
||||
"Traverses the AST Tree and determines problems with a chunk of JS."
|
||||
|
||||
def __init__(self, err, filename, start_line=0):
|
||||
def __init__(self, err, filename, start_line=0, context=None):
|
||||
if err is not None:
|
||||
self.err = err
|
||||
else:
|
||||
|
@ -54,7 +54,8 @@ class Traverser:
|
|||
self.filename = filename
|
||||
self.start_line = start_line
|
||||
self.polluted = False
|
||||
self.line = 0
|
||||
self.line = 1
|
||||
self.context = context
|
||||
|
||||
# Can use the `this` object
|
||||
self.can_use_this = False
|
||||
|
@ -88,7 +89,8 @@ class Traverser:
|
|||
"""Some JavaScript code was found, but could not be
|
||||
interpreted.""",
|
||||
self.filename,
|
||||
self.start_line)
|
||||
self.start_line,
|
||||
self.context)
|
||||
return None
|
||||
|
||||
self._debug("START>>")
|
||||
|
@ -298,7 +300,8 @@ class Traverser:
|
|||
accessed by some JavaScript code.""",
|
||||
"Accessed object: %s" % name],
|
||||
self.filename,
|
||||
self.line)
|
||||
self.line,
|
||||
self.context)
|
||||
|
||||
# Build out the wrapper object from the global definition.
|
||||
result = JSWrapper(is_global=True, traverser=self, lazy=True)
|
||||
|
@ -395,7 +398,8 @@ class JSWrapper(object):
|
|||
"A variable declared as constant has been "
|
||||
"overwritten in some JS code.",
|
||||
traverser.filename,
|
||||
traverser.line)
|
||||
traverser.line,
|
||||
traverser.context)
|
||||
|
||||
# We want to obey the permissions of global objects
|
||||
if self.is_global:
|
||||
|
@ -407,7 +411,8 @@ class JSWrapper(object):
|
|||
"An attempt to overwrite a global varible "
|
||||
"made in some JS code.",
|
||||
traverser.filename,
|
||||
traverser.line)
|
||||
traverser.line,
|
||||
traverser.context)
|
||||
return self
|
||||
|
||||
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
|
||||
from validator.contextgenerator import ContextGenerator
|
||||
|
||||
try:
|
||||
from HTMLParser import HTMLParser
|
||||
except ImportError: # pragma: no cover
|
||||
|
@ -16,6 +18,7 @@ class DTDParser(object):
|
|||
"""
|
||||
|
||||
self.entities = {}
|
||||
self.items = []
|
||||
|
||||
if isinstance(dtd, str):
|
||||
dtd_instance = open(dtd)
|
||||
|
@ -26,6 +29,9 @@ class DTDParser(object):
|
|||
|
||||
self._parse(data)
|
||||
|
||||
# Create a context for the file
|
||||
self.context = ContextGenerator(data)
|
||||
|
||||
def __len__(self):
|
||||
return len(self.entities)
|
||||
|
||||
|
@ -43,8 +49,9 @@ class DTDParser(object):
|
|||
parser = DTDXMLParser()
|
||||
else:
|
||||
if parser.out_buffer:
|
||||
for name, value in parser.out_buffer.items():
|
||||
for name, value, line in parser.out_buffer:
|
||||
self.entities[name] = value
|
||||
self.items.append((name, value, line))
|
||||
parser.clear_buffer()
|
||||
|
||||
|
||||
|
@ -53,7 +60,7 @@ class DTDXMLParser(HTMLParser):
|
|||
|
||||
def __init__(self):
|
||||
HTMLParser.__init__(self)
|
||||
self.out_buffer = {}
|
||||
self.out_buffer = []
|
||||
|
||||
def unknown_decl(self, decl):
|
||||
"Handles non-DOCTYPE SGML declarations in *ML documents."
|
||||
|
@ -67,10 +74,12 @@ class DTDXMLParser(HTMLParser):
|
|||
# TODO: Log an error?
|
||||
return
|
||||
|
||||
self.out_buffer[split_decl[1]] = split_decl[2].strip('\'"')
|
||||
self.out_buffer.append((split_decl[1],
|
||||
split_decl[2].strip('\'"'),
|
||||
self.getpos()[0])) # Pos 0 is the line #
|
||||
|
||||
def clear_buffer(self):
|
||||
"Clears the return buffer."
|
||||
self.out_buffer = {}
|
||||
self.out_buffer = []
|
||||
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
from validator.contextgenerator import ContextGenerator
|
||||
|
||||
class PropertiesParser(object):
|
||||
"""
|
||||
|
@ -13,6 +14,7 @@ class PropertiesParser(object):
|
|||
"""
|
||||
|
||||
self.entities = {}
|
||||
self.items = []
|
||||
|
||||
if isinstance(dtd, str):
|
||||
dtd_instance = open(dtd)
|
||||
|
@ -21,29 +23,49 @@ class PropertiesParser(object):
|
|||
else:
|
||||
data = dtd.getvalue()
|
||||
|
||||
# Create a context!
|
||||
self.context = ContextGenerator(data)
|
||||
|
||||
split_data = data.split("\n")
|
||||
line_buffer = None
|
||||
line_number = 0
|
||||
for line in split_data:
|
||||
|
||||
# Increment the line number
|
||||
line_number += 1
|
||||
|
||||
# Clean things up
|
||||
clean_line = line.strip()
|
||||
if not clean_line:
|
||||
continue
|
||||
if clean_line.startswith("#"):
|
||||
continue
|
||||
|
||||
# It's a line that wraps
|
||||
if clean_line.count("=") == 0:
|
||||
if line_buffer:
|
||||
line_buffer[-1] += clean_line
|
||||
else:
|
||||
continue
|
||||
else:
|
||||
|
||||
if line_buffer:
|
||||
# This line terminates a wrapped line
|
||||
self.entities[line_buffer[0].strip()] = \
|
||||
line_buffer[1].strip()
|
||||
self.items.append((line_buffer[0].strip(),
|
||||
line_buffer[1].strip(),
|
||||
line_number))
|
||||
|
||||
line_buffer = clean_line.split("=", 1)
|
||||
|
||||
# Handle any left-over wrapped line data
|
||||
if line_buffer:
|
||||
self.entities[line_buffer[0].strip()] = \
|
||||
line_buffer[1].strip()
|
||||
self.items.append((line_buffer[0].strip(),
|
||||
line_buffer[1].strip(),
|
||||
line_number))
|
||||
|
||||
def __len__(self):
|
||||
return len(self.entities)
|
||||
|
|
|
@ -218,7 +218,7 @@ def _compare_packages(reference, target, ref_base=None):
|
|||
missing_entities = []
|
||||
unchanged_entities = []
|
||||
|
||||
for rname, rvalue in ref_doc.entities.items():
|
||||
for rname, rvalue, rline in ref_doc.items:
|
||||
entity_count += 1
|
||||
|
||||
if rname not in tar_doc.entities:
|
||||
|
@ -229,7 +229,7 @@ def _compare_packages(reference, target, ref_base=None):
|
|||
len(rvalue) > L10N_LENGTH_THRESHOLD and \
|
||||
not fnmatch.fnmatch(rvalue, "http*://*"):
|
||||
|
||||
unchanged_entities.append(rname)
|
||||
unchanged_entities.append((rname, rline))
|
||||
continue
|
||||
|
||||
|
||||
|
@ -362,7 +362,9 @@ def _aggregate_results(err, results, locale, similar=False):
|
|||
(name,
|
||||
unchanged["count"],
|
||||
count,
|
||||
", ".join(unchanged["entities"]),
|
||||
", ".join(["%s (%d)" % (e, line)
|
||||
for e, line
|
||||
in unchanged["entities"]]),
|
||||
percentage * 100))
|
||||
|
||||
if agg_unchanged:
|
||||
|
|
|
@ -3,6 +3,7 @@ import re
|
|||
|
||||
from validator import decorator
|
||||
from validator.chromemanifest import ChromeManifest
|
||||
from validator.contextgenerator import ContextGenerator
|
||||
from validator.constants import PACKAGE_LANGPACK
|
||||
|
||||
BAD_LINK = '(href|src)=["\'](?!(chrome:\/\/|#([a-z][a-z0-9\-_:\.]*)?))'
|
||||
|
@ -41,7 +42,9 @@ def test_langpack_manifest(err, package_contents=None, xpi_package=None):
|
|||
'locale' or 'override'. Other values are not
|
||||
allowed.""",
|
||||
"Invalid subject: %s" % subject],
|
||||
"chrome.manifest")
|
||||
"chrome.manifest",
|
||||
line=triple["line"],
|
||||
context=chrome.context)
|
||||
|
||||
if subject == "override":
|
||||
object_ = triple["object"]
|
||||
|
@ -57,16 +60,21 @@ def test_langpack_manifest(err, package_contents=None, xpi_package=None):
|
|||
"Invalid chrome.manifest object/predicate.",
|
||||
"'override' entry does not match '/%s/'" % pattern,
|
||||
"chrome.manifest",
|
||||
triple["line"])
|
||||
line=triple["line"],
|
||||
context=chrome.context)
|
||||
|
||||
|
||||
# This function is called by content.py
|
||||
def test_unsafe_html(err, filename, data):
|
||||
"Tests for unsafe HTML tags in language pack files."
|
||||
|
||||
context = ContextGenerator(data)
|
||||
|
||||
unsafe_pttrn = re.compile('<(script|embed|object)')
|
||||
|
||||
if unsafe_pttrn.search(data):
|
||||
match = unsafe_pttrn.search(data)
|
||||
if match:
|
||||
line = context.get_line(match.start())
|
||||
err.error(("testcases_langpack",
|
||||
"test_unsafe_html",
|
||||
"unsafe_content_html"),
|
||||
|
@ -74,15 +82,23 @@ def test_unsafe_html(err, filename, data):
|
|||
"""Language packs are not allowed to contain scripts,
|
||||
embeds, or other executable code in the language
|
||||
definition files.""",
|
||||
filename)
|
||||
filename,
|
||||
line=line,
|
||||
context=context)
|
||||
|
||||
remote_pttrn = re.compile(BAD_LINK, re.I)
|
||||
if remote_pttrn.search(data):
|
||||
|
||||
match = remote_pttrn.search(data)
|
||||
if match:
|
||||
line = context.get_line(match.start())
|
||||
err.error(("testcases_langpack",
|
||||
"test_unsafe_html",
|
||||
"unsafe_content_link"),
|
||||
"Unsafe remote resource found in language pack.",
|
||||
"""Language packs are not allowed to contain
|
||||
references to remote resources.""",
|
||||
filename)
|
||||
filename,
|
||||
line=line,
|
||||
context=context)
|
||||
|
||||
|
|
@ -1,17 +1,23 @@
|
|||
import re
|
||||
import json
|
||||
import fnmatch
|
||||
|
||||
import cssutils
|
||||
|
||||
from validator.contextgenerator import ContextGenerator
|
||||
|
||||
def test_css_file(err, filename, data, line_start=1):
|
||||
"Parse and test a whole CSS file."
|
||||
|
||||
tokenizer = cssutils.tokenize2.Tokenizer()
|
||||
context = ContextGenerator(data)
|
||||
token_generator = tokenizer.tokenize(data)
|
||||
|
||||
try:
|
||||
_run_css_tests(err, token_generator, filename, line_start - 1)
|
||||
_run_css_tests(err,
|
||||
tokens=token_generator,
|
||||
filename=filename,
|
||||
line_start=line_start - 1,
|
||||
context=context)
|
||||
except: #pragma: no cover
|
||||
# This happens because tokenize is a generator.
|
||||
# Bravo, Mr. Bond, Bravo.
|
||||
|
@ -28,12 +34,13 @@ def test_css_file(err, filename, data, line_start=1):
|
|||
def test_css_snippet(err, filename, data, line):
|
||||
"Parse and test a CSS nugget."
|
||||
|
||||
# Re-package to make it CSS-complete
|
||||
data = "#foo{%s}" % data
|
||||
# Re-package to make it CSS-complete. Note the whitespace to prevent
|
||||
# the extra code from showing in the context output.
|
||||
data = "#foo{\n\n%s\n\n}" % data
|
||||
|
||||
test_css_file(err, filename, data, line)
|
||||
|
||||
def _run_css_tests(err, tokens, filename, line_start=0):
|
||||
def _run_css_tests(err, tokens, filename, line_start=0, context=None):
|
||||
"""Processes a CSS file to test it for things that could cause it
|
||||
to be harmful to the browser."""
|
||||
|
||||
|
@ -89,7 +96,8 @@ def _run_css_tests(err, tokens, filename, line_start=0):
|
|||
be chrome:// URLs. They should not be
|
||||
used with relative file paths.""",
|
||||
filename,
|
||||
line + line_start)
|
||||
line=line + line_start,
|
||||
context=context.get_context(line))
|
||||
else:
|
||||
err.error(("testcases_markup_csstester",
|
||||
"_run_css_tests",
|
||||
|
@ -101,7 +109,8 @@ def _run_css_tests(err, tokens, filename, line_start=0):
|
|||
placed in the /content/ directory of the
|
||||
package.""",
|
||||
filename,
|
||||
line + line_start)
|
||||
line=line + line_start,
|
||||
context=context.get_context(line))
|
||||
|
||||
elif tok_type == "HASH":
|
||||
# Search for interference with the identity box.
|
||||
|
@ -117,7 +126,7 @@ def _run_css_tests(err, tokens, filename, line_start=0):
|
|||
sensitive piece of the interface and should
|
||||
not be modified.""",
|
||||
"Lines: %s" % ", ".join(identity_box_mods)],
|
||||
filename)
|
||||
filename)
|
||||
if webkit_insts:
|
||||
err.error(("testcases_markup_csstester",
|
||||
"_run_css_tests",
|
||||
|
|
|
@ -7,6 +7,7 @@ except ImportError: # pragma: no cover
|
|||
|
||||
import validator.testcases.scripting as scripting
|
||||
from validator.testcases.markup import csstester
|
||||
from validator.contextgenerator import ContextGenerator
|
||||
from validator.constants import *
|
||||
|
||||
DEBUG = False
|
||||
|
@ -43,6 +44,8 @@ class MarkupParser(HTMLParser):
|
|||
self.line = 0
|
||||
self.debug = debug
|
||||
|
||||
self.context = None
|
||||
|
||||
self.xml_state = []
|
||||
self.xml_buffer = []
|
||||
|
||||
|
@ -59,6 +62,8 @@ class MarkupParser(HTMLParser):
|
|||
|
||||
self.reported = {}
|
||||
|
||||
self.context = ContextGenerator(data)
|
||||
|
||||
lines = data.split("\n")
|
||||
line_buffer = []
|
||||
force_buffer = False
|
||||
|
@ -105,17 +110,18 @@ class MarkupParser(HTMLParser):
|
|||
self.debug and "testscript" in self.xml_state):
|
||||
if "script_comments" in self.reported:
|
||||
return
|
||||
self.err.info(("testcases_markup_markuptester",
|
||||
"_feed",
|
||||
"missing_script_comments"),
|
||||
"Missing comments in <script> tag",
|
||||
"""Markup parsing errors occurred
|
||||
while trying to parse the file. This
|
||||
can likely be mitigated by wrapping
|
||||
<script> tag contents in HTML comment
|
||||
tags (<!-- -->)""",
|
||||
self.filename,
|
||||
self.line)
|
||||
self.err.notice(("testcases_markup_markuptester",
|
||||
"_feed",
|
||||
"missing_script_comments"),
|
||||
"Missing comments in <script> tag",
|
||||
"""Markup parsing errors occurred
|
||||
while trying to parse the file. This
|
||||
can likely be mitigated by wrapping
|
||||
<script> tag contents in HTML comment
|
||||
tags (<!-- -->)""",
|
||||
self.filename,
|
||||
line=self.line,
|
||||
context=self.context)
|
||||
self.reported["script_comments"] = True
|
||||
return
|
||||
|
||||
|
@ -127,7 +133,8 @@ class MarkupParser(HTMLParser):
|
|||
document.""",
|
||||
str(inst)],
|
||||
self.filename,
|
||||
self.line)
|
||||
line=self.line,
|
||||
context=self.context)
|
||||
self.reported["markup"] = True
|
||||
|
||||
|
||||
|
@ -157,7 +164,8 @@ class MarkupParser(HTMLParser):
|
|||
"Banned element",
|
||||
"A banned element was detected",
|
||||
self.filename,
|
||||
self.line)
|
||||
line=self.line,
|
||||
context=self.context)
|
||||
|
||||
if self.err.detected_type == PACKAGE_LANGPACK:
|
||||
|
||||
|
@ -173,7 +181,8 @@ class MarkupParser(HTMLParser):
|
|||
performs.""",
|
||||
'Tag "%s" is disallowed.' % tag],
|
||||
self.filename,
|
||||
self.line)
|
||||
line=self.line,
|
||||
context=self.context)
|
||||
if DEBUG: # pragma: no cover
|
||||
print "Unsafe Tag ------"
|
||||
|
||||
|
@ -189,7 +198,8 @@ class MarkupParser(HTMLParser):
|
|||
src/href attributes must begin with
|
||||
'chrome://'""",
|
||||
self.filename,
|
||||
self.line)
|
||||
line=self.line,
|
||||
context=self.context)
|
||||
self.err.reject = True
|
||||
|
||||
if tag in ("iframe", "browser") and self.extension == "xul":
|
||||
|
@ -221,7 +231,8 @@ class MarkupParser(HTMLParser):
|
|||
either a valid `type` attribute or a `src`
|
||||
attribute that points to a local file.""",
|
||||
self.filename,
|
||||
self.line)
|
||||
line=self.line,
|
||||
context=self.context)
|
||||
elif (not type_ or
|
||||
type_ not in SAFE_IFRAME_TYPES) and \
|
||||
remote_src:
|
||||
|
@ -234,7 +245,8 @@ class MarkupParser(HTMLParser):
|
|||
attributes that reference local
|
||||
resources.""",
|
||||
self.filename,
|
||||
self.line)
|
||||
line=self.line,
|
||||
context=self.context)
|
||||
|
||||
elif tag == "script" and self.extension == "xul":
|
||||
# Per the Addon Validator Spec (v2), scripts in XUL
|
||||
|
@ -254,7 +266,8 @@ class MarkupParser(HTMLParser):
|
|||
referenced to script files that are
|
||||
hosted remotely.""",
|
||||
self.filename,
|
||||
self.line)
|
||||
line=self.line,
|
||||
context=self.context)
|
||||
self.err.reject = True
|
||||
|
||||
# Find CSS and JS attributes and handle their values like they
|
||||
|
@ -295,7 +308,8 @@ class MarkupParser(HTMLParser):
|
|||
"""The markup file has more closing tags
|
||||
than it has opening tags.""",
|
||||
self.filename,
|
||||
self.line)
|
||||
line=self.line,
|
||||
context=self.context)
|
||||
self.reported["closing_tags"] = True
|
||||
if DEBUG: # pragma: no cover
|
||||
print "Too many closing tags ------"
|
||||
|
@ -320,7 +334,8 @@ class MarkupParser(HTMLParser):
|
|||
little overzealous with forward-slashes?""",
|
||||
'Tag "%s" closed before it was opened' % tag],
|
||||
self.filename,
|
||||
self.line)
|
||||
line=self.line,
|
||||
context=self.context)
|
||||
if DEBUG: # pragma: no cover
|
||||
print "Tag closed before opened ------"
|
||||
return
|
||||
|
@ -345,7 +360,8 @@ class MarkupParser(HTMLParser):
|
|||
invalidly nests its tags. This is not
|
||||
permitted in the specified document type.""",
|
||||
self.filename,
|
||||
self.line)
|
||||
line=self.line,
|
||||
context=self.context)
|
||||
if DEBUG: # pragma: no cover
|
||||
print "Invalid markup nesting ------"
|
||||
|
||||
|
|
|
@ -84,7 +84,6 @@ def test_xpcom(err, package_contents, xpi_package):
|
|||
break
|
||||
|
||||
|
||||
|
||||
@decorator.register_test(tier=1, expected_type=3)
|
||||
def test_dictionary_layout(err, package_contents=None, xpi_package=None):
|
||||
"""Ensures that dictionary packages contain the necessary
|
||||
|
|
|
@ -3,6 +3,7 @@ import os
|
|||
import subprocess
|
||||
|
||||
import validator.testcases.javascript.traverser as traverser
|
||||
from validator.contextgenerator import ContextGenerator
|
||||
from validator.constants import SPIDERMONKEY_INSTALLATION as SPIDERMONKEY
|
||||
|
||||
def test_js_file(err, name, data, filename=None, line=0):
|
||||
|
@ -19,6 +20,7 @@ def test_js_file(err, name, data, filename=None, line=0):
|
|||
if tree is None:
|
||||
return
|
||||
|
||||
context = ContextGenerator(data)
|
||||
t = traverser.Traverser(err, filename, line)
|
||||
t.run(tree)
|
||||
|
||||
|
|
|
@ -35,4 +35,6 @@ def test_theme_manifest(err, package_contents=None, xpi_package=None):
|
|||
reasons.""",
|
||||
"Invalid subject: %s" % subject],
|
||||
"chrome.manifest",
|
||||
triple["line"])
|
||||
line=triple["line"],
|
||||
context=chrome.context)
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче