Bug 1357001 - Part 5 - Fix linting errors. r=chutten

This commit is contained in:
Georg Fritzsche 2017-04-21 15:48:44 +02:00
Родитель 79a9b8d93a
Коммит 2d12447891
5 изменённых файлов: 52 добавлений и 53 удалений

Просмотреть файл

@ -115,7 +115,7 @@ def main(output, *filenames):
if len(filenames) > 1:
raise Exception('We don\'t support loading from more than one file.')
try:
events = parse_events.load_events(filenames[0])
events = parse_events.load_events(filenames[0])
except ParserError as ex:
print("\nError processing events:\n" + str(ex) + "\n")
sys.exit(1)

Просмотреть файл

@ -129,7 +129,7 @@ def write_histogram_static_asserts(output, histograms):
for histogram in histograms:
kind = histogram.kind()
if not kind in table:
if kind not in table:
raise Exception('Unknown kind "%s" for histogram "%s".' % (kind, histogram.name()))
fn = table[kind]
fn(output, histogram)

Просмотреть файл

@ -11,6 +11,7 @@ import re
import sys
from shared_telemetry_utils import ParserError
from collections import OrderedDict
# Constants.
MAX_LABEL_LENGTH = 20
@ -33,8 +34,6 @@ except ImportError:
# ensured it's in our sys.path.
pass
from collections import OrderedDict
def linear_buckets(dmin, dmax, n_buckets):
ret_array = [0] * n_buckets
@ -178,7 +177,7 @@ associated with the histogram. Returns None if no guarding is necessary."""
'exponential': exponential_buckets,
}
if not self._kind in bucket_fns:
if self._kind not in bucket_fns:
raise ParserError('Unknown kind "%s" for histogram "%s".' % (self._kind, self._name))
fn = bucket_fns[self._kind]
@ -195,7 +194,7 @@ associated with the histogram. Returns None if no guarding is necessary."""
'exponential': Histogram.exponential_bucket_parameters,
}
if not self._kind in bucket_fns:
if self._kind not in bucket_fns:
raise ParserError('Unknown kind "%s" for histogram "%s".' % (self._kind, self._name))
fn = bucket_fns[self._kind]
@ -220,7 +219,7 @@ associated with the histogram. Returns None if no guarding is necessary."""
table['exponential'].append('extended_statistics_ok')
kind = definition['kind']
if not kind in table:
if kind not in table:
raise ParserError('Unknown kind "%s" for histogram "%s".' % (kind, name))
allowed_keys = table[kind]
@ -275,11 +274,11 @@ associated with the histogram. Returns None if no guarding is necessary."""
invalid = filter(lambda l: len(l) > MAX_LABEL_LENGTH, labels)
if len(invalid) > 0:
raise ParserError('Label values for "%s" exceed length limit of %d: %s' %
(name, MAX_LABEL_LENGTH, ', '.join(invalid)))
(name, MAX_LABEL_LENGTH, ', '.join(invalid)))
if len(labels) > MAX_LABEL_COUNT:
raise ParserError('Label count for "%s" exceeds limit of %d' %
(name, MAX_LABEL_COUNT))
(name, MAX_LABEL_COUNT))
# To make it easier to generate C++ identifiers from this etc., we restrict
# the label values to a strict pattern.
@ -287,7 +286,7 @@ associated with the histogram. Returns None if no guarding is necessary."""
invalid = filter(lambda l: not re.match(pattern, l, re.IGNORECASE), labels)
if len(invalid) > 0:
raise ParserError('Label values for %s are not matching pattern "%s": %s' %
(name, pattern, ', '.join(invalid)))
(name, pattern, ', '.join(invalid)))
def check_whitelisted_kind(self, name, definition):
# We don't need to run any of these checks on the server.
@ -382,14 +381,14 @@ associated with the histogram. Returns None if no guarding is necessary."""
continue
if not isinstance(definition[key], key_type):
raise ParserError('Value for key "{0}" in histogram "{1}" should be {2}.'
.format(key, name, nice_type_name(key_type)))
.format(key, name, nice_type_name(key_type)))
for key, key_type in type_checked_list_fields.iteritems():
if key not in definition:
continue
if not all(isinstance(x, key_type) for x in definition[key]):
raise ParserError('All values for list "{0}" in histogram "{1}" should be of type {2}.'
.format(key, name, nice_type_name(key_type)))
.format(key, name, nice_type_name(key_type)))
def check_keys(self, name, definition, allowed_keys):
for key in definition.iterkeys():
@ -451,7 +450,7 @@ associated with the histogram. Returns None if no guarding is necessary."""
'exponential': 'EXPONENTIAL',
}
if not self._kind in types:
if self._kind not in types:
raise ParserError('Unknown kind "%s" for histogram "%s".' % (self._kind, self._name))
self._nsITelemetry_kind = "nsITelemetry::HISTOGRAM_%s" % types[self._kind]

Просмотреть файл

@ -47,8 +47,8 @@ class AtomicTypeChecker:
def check(self, identifier, key, value):
if not isinstance(value, self.instance_type):
raise ParserError("%s: Failed type check for %s - expected %s, got %s." %
(identifier, key, nice_type_name(self.instance_type),
nice_type_name(type(value))))
(identifier, key, nice_type_name(self.instance_type),
nice_type_name(type(value))))
class MultiTypeChecker:
@ -61,9 +61,9 @@ class MultiTypeChecker:
def check(self, identifier, key, value):
if not any(isinstance(value, i) for i in self.instance_types):
raise ParserError("%s: Failed type check for %s - got %s, expected one of:\n%s" %
(identifier, key,
nice_type_name(type(value)),
" or ".join(map(nice_type_name, self.instance_types))))
(identifier, key,
nice_type_name(type(value)),
" or ".join(map(nice_type_name, self.instance_types))))
class ListTypeChecker:
@ -74,13 +74,13 @@ class ListTypeChecker:
def check(self, identifier, key, value):
if len(value) < 1:
raise ParserError("%s: Failed check for %s - list should not be empty." %
(identifier, key))
(identifier, key))
for x in value:
if not isinstance(x, self.instance_type):
raise ParserError("%s: Failed type check for %s - expected list value type %s, got"
" %s." % (identifier, key, nice_type_name(self.instance_type),
nice_type_name(type(x))))
" %s." % (identifier, key, nice_type_name(self.instance_type),
nice_type_name(type(x))))
class DictTypeChecker:
@ -92,21 +92,21 @@ class DictTypeChecker:
def check(self, identifier, key, value):
if len(value.keys()) < 1:
raise ParserError("%s: Failed check for %s - dict should not be empty." %
(identifier, key))
(identifier, key))
for x in value.iterkeys():
if not isinstance(x, self.keys_instance_type):
raise ParserError("%s: Failed dict type check for %s - expected key type %s, got "
"%s." %
(identifier, key,
nice_type_name(self.keys_instance_type),
nice_type_name(type(x))))
"%s." %
(identifier, key,
nice_type_name(self.keys_instance_type),
nice_type_name(type(x))))
for k, v in value.iteritems():
if not isinstance(v, self.values_instance_type):
raise ParserError("%s: Failed dict type check for %s - "
"expected value type %s for key %s, got %s." %
(identifier, key,
nice_type_name(self.values_instance_type),
k, nice_type_name(type(v))))
"expected value type %s for key %s, got %s." %
(identifier, key,
nice_type_name(self.values_instance_type),
k, nice_type_name(type(v))))
def type_check_event_fields(identifier, name, definition):
@ -147,14 +147,14 @@ def string_check(identifier, field, value, min_length=1, max_length=None, regex=
# Length check.
if len(value) < min_length:
raise ParserError("%s: Value '%s' for field %s is less than minimum length of %d." %
(identifier, value, field, min_length))
(identifier, value, field, min_length))
if max_length and len(value) > max_length:
raise ParserError("%s: Value '%s' for field %s is greater than maximum length of %d." %
(identifier, value, field, max_length))
(identifier, value, field, max_length))
# Regex check.
if regex and not re.match(regex, value):
raise ParserError('%s: String value "%s" for %s is not matching pattern "%s".' %
(identifier, value, field, regex))
(identifier, value, field, regex))
class EventData:
@ -183,7 +183,7 @@ class EventData:
allowed_rcc = ["opt-in", "opt-out"]
if rcc not in allowed_rcc:
raise ParserError("%s: Value for %s should be one of: %s" %
(self.identifier, rcc_key, ", ".join(allowed_rcc)))
(self.identifier, rcc_key, ", ".join(allowed_rcc)))
# Check record_in_processes.
record_in_processes = definition.get('record_in_processes')
@ -195,7 +195,7 @@ class EventData:
extra_keys = definition.get('extra_keys', {})
if len(extra_keys.keys()) > MAX_EXTRA_KEYS_COUNT:
raise ParserError("%s: Number of extra_keys exceeds limit %d." %
(self.identifier, MAX_EXTRA_KEYS_COUNT))
(self.identifier, MAX_EXTRA_KEYS_COUNT))
for key in extra_keys.iterkeys():
string_check(self.identifier, field='extra_keys', value=key,
min_length=1, max_length=MAX_EXTRA_KEY_NAME_LENGTH,
@ -204,12 +204,12 @@ class EventData:
# Check expiry.
if 'expiry_version' not in definition and 'expiry_date' not in definition:
raise ParserError("%s: event is missing an expiration - either expiry_version or expiry_date is required" %
(self.identifier))
(self.identifier))
expiry_date = definition.get('expiry_date')
if expiry_date and isinstance(expiry_date, basestring) and expiry_date != 'never':
if not re.match(DATE_PATTERN, expiry_date):
raise ParserError("%s: Event has invalid expiry_date, it should be either 'never' or match this format: %s" %
(self.identifier, DATE_PATTERN))
(self.identifier, DATE_PATTERN))
# Parse into date.
definition['expiry_date'] = datetime.datetime.strptime(expiry_date, '%Y-%m-%d')

Просмотреть файл

@ -55,8 +55,8 @@ class ScalarType:
for n in [group_name, probe_name]:
if len(n) > MAX_NAME_LENGTH:
raise ParserError(("Name '{}' exceeds maximum name length of {} characters.\n"
"See: {}#the-yaml-definition-file")
.format(n, MAX_NAME_LENGTH, BASE_DOC_URL))
"See: {}#the-yaml-definition-file")
.format(n, MAX_NAME_LENGTH, BASE_DOC_URL))
def check_name(name, error_msg_prefix, allowed_char_regexp):
# Check if we only have the allowed characters.
@ -68,8 +68,8 @@ class ScalarType:
# Don't allow leading/trailing digits, '.' or '_'.
if re.search(r'(^[\d\._])|([\d\._])$', name):
raise ParserError((error_msg_prefix + " name must not have a leading/trailing "
"digit, a dot or underscore. Got: '{}'.\n"
" See: {}#the-yaml-definition-file").format(name, BASE_DOC_URL))
"digit, a dot or underscore. Got: '{}'.\n"
" See: {}#the-yaml-definition-file").format(name, BASE_DOC_URL))
check_name(group_name, 'Group', r'\.')
check_name(probe_name, 'Probe', r'_')
@ -115,20 +115,20 @@ class ScalarType:
missing_fields = [f for f in REQUIRED_FIELDS.keys() if f not in definition]
if len(missing_fields) > 0:
raise ParserError(self._name + ' - missing required fields: ' + ', '.join(missing_fields) +
'.\nSee: {}#required-fields'.format(BASE_DOC_URL))
'.\nSee: {}#required-fields'.format(BASE_DOC_URL))
# Do we have any unknown field?
unknown_fields = [f for f in definition.keys() if f not in ALL_FIELDS]
if len(unknown_fields) > 0:
raise ParserError(self._name + ' - unknown fields: ' + ', '.join(unknown_fields) +
'.\nSee: {}#required-fields'.format(BASE_DOC_URL))
'.\nSee: {}#required-fields'.format(BASE_DOC_URL))
# Checks the type for all the fields.
wrong_type_names = ['{} must be {}'.format(f, ALL_FIELDS[f].__name__)
for f in definition.keys() if not isinstance(definition[f], ALL_FIELDS[f])]
if len(wrong_type_names) > 0:
raise ParserError(self._name + ' - ' + ', '.join(wrong_type_names) +
'.\nSee: {}#required-fields'.format(BASE_DOC_URL))
'.\nSee: {}#required-fields'.format(BASE_DOC_URL))
# Check that the lists are not empty and that data in the lists
# have the correct types.
@ -137,16 +137,16 @@ class ScalarType:
# Check for empty lists.
if len(definition[field]) == 0:
raise ParserError(("Field '{}' for probe '{}' must not be empty" +
".\nSee: {}#required-fields)")
.format(field, self._name, BASE_DOC_URL))
".\nSee: {}#required-fields)")
.format(field, self._name, BASE_DOC_URL))
# Check the type of the list content.
broken_types =\
[not isinstance(v, LIST_FIELDS_CONTENT[field]) for v in definition[field]]
if any(broken_types):
raise ParserError(("Field '{}' for probe '{}' must only contain values of type {}"
".\nSee: {}#the-yaml-definition-file)")
.format(field, self._name, LIST_FIELDS_CONTENT[field].__name__,
BASE_DOC_URL))
".\nSee: {}#the-yaml-definition-file)")
.format(field, self._name, LIST_FIELDS_CONTENT[field].__name__,
BASE_DOC_URL))
def validate_values(self, definition):
"""This function checks that the fields have the correct values.
@ -159,26 +159,26 @@ class ScalarType:
scalar_kind = definition.get('kind')
if scalar_kind not in SCALAR_TYPES_MAP.keys():
raise ParserError(self._name + ' - unknown scalar kind: ' + scalar_kind +
'.\nSee: {}'.format(BASE_DOC_URL))
'.\nSee: {}'.format(BASE_DOC_URL))
# Validate the collection policy.
collection_policy = definition.get('release_channel_collection', None)
if collection_policy and collection_policy not in ['opt-in', 'opt-out']:
raise ParserError(self._name + ' - unknown collection policy: ' + collection_policy +
'.\nSee: {}#optional-fields'.format(BASE_DOC_URL))
'.\nSee: {}#optional-fields'.format(BASE_DOC_URL))
# Validate the cpp_guard.
cpp_guard = definition.get('cpp_guard')
if cpp_guard and re.match(r'\W', cpp_guard):
raise ParserError(self._name + ' - invalid cpp_guard: ' + cpp_guard +
'.\nSee: {}#optional-fields'.format(BASE_DOC_URL))
'.\nSee: {}#optional-fields'.format(BASE_DOC_URL))
# Validate record_in_processes.
record_in_processes = definition.get('record_in_processes', [])
for proc in record_in_processes:
if not utils.is_valid_process_name(proc):
raise ParserError(self._name + ' - unknown value in record_in_processes: ' + proc +
'.\nSee: {}'.format(BASE_DOC_URL))
'.\nSee: {}'.format(BASE_DOC_URL))
@property
def name(self):