bug 1382005, update compare-locales to 2.1, r=glandium

Vendor new dependency for compare-locales, python-fluent.
This is the 0.4.2 release of python-fluent.

Also, make mach command a bare minimum wrapper.

The compare-locales mach command used to own a couple of defaults
and opinions.
Now that those opinions and defaults are in the Makefiles, this
command can be much simpler. As a side effect, this should
make the thunderbird port easier, where none of the mach defaults
worked.

Update l10n.mk for compare-locales 2.x:

The command line interface is different now, with positional arguments.
Also, the l10n config file isn't a default anymore (that never worked
for thunderbird).
And the merge dir is now the parent of the locale dir, much like
we already anticipated in the code.

MozReview-Commit-ID: DxVVKyVSt5y

--HG--
extra : rebase_source : 7c591fea8c7d9ecef615fb56f9359d4f1bd4b340
This commit is contained in:
Axel Hecht 2017-09-18 18:31:24 +02:00
Родитель 00433d9cf4
Коммит 3dc36a71a4
46 изменённых файлов: 6712 добавлений и 1142 удалений

Просмотреть файл

@ -8,6 +8,7 @@ mozilla.pth:third_party/python/compare-locales
mozilla.pth:third_party/python/configobj
mozilla.pth:third_party/python/cram
mozilla.pth:third_party/python/dlmanager
mozilla.pth:third_party/python/fluent
mozilla.pth:third_party/python/futures
mozilla.pth:third_party/python/hglib
mozilla.pth:third_party/python/jsmin

Просмотреть файл

@ -1 +1 @@
version = "1.2.3"
version = "2.1"

Просмотреть файл

@ -3,6 +3,7 @@
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import re
from collections import Counter
from difflib import SequenceMatcher
from xml import sax
try:
@ -10,18 +11,24 @@ try:
except ImportError:
from StringIO import StringIO
from compare_locales.parser import DTDParser, PropertiesParser
from compare_locales.parser import DTDParser, PropertiesEntity
class Checker(object):
'''Abstract class to implement checks per file type.
'''
pattern = None
# if a check uses all reference entities, set this to True
needs_reference = False
@classmethod
def use(cls, file):
return cls.pattern.match(file.file)
def __init__(self, extra_tests):
self.extra_tests = extra_tests
self.reference = None
def check(self, refEnt, l10nEnt):
'''Given the reference and localized Entities, performs checks.
@ -34,6 +41,12 @@ class Checker(object):
raise NotImplementedError("Need to subclass")
yield ("error", (0, 0), "This is an example error", "example")
def set_reference(self, reference):
'''Set the reference entities.
Only do this if self.needs_reference is True.
'''
self.reference = reference
class PrintfException(Exception):
def __init__(self, msg, pos):
@ -58,7 +71,8 @@ class PropertiesChecker(Checker):
refSpecs = None
# check for PluralForm.jsm stuff, should have the docs in the
# comment
if 'Localization_and_Plurals' in refEnt.pre_comment:
if (refEnt.pre_comment
and 'Localization_and_Plurals' in refEnt.pre_comment.all):
# For plurals, common variable pattern is #1. Try that.
pats = set(int(m.group(1)) for m in re.finditer('#([0-9]+)',
refValue))
@ -77,9 +91,9 @@ class PropertiesChecker(Checker):
return
# check for lost escapes
raw_val = l10nEnt.raw_val
for m in PropertiesParser.escape.finditer(raw_val):
for m in PropertiesEntity.escape.finditer(raw_val):
if m.group('single') and \
m.group('single') not in PropertiesParser.known_escapes:
m.group('single') not in PropertiesEntity.known_escapes:
yield ('warning', m.start(),
'unknown escape sequence, \\' + m.group('single'),
'escape')
@ -159,10 +173,6 @@ class PropertiesChecker(Checker):
specs[ls:pos] = nones*[None]
specs.append(m.group('spec'))
else:
if specs[pos] is not None:
raise PrintfException('Double ordered argument %d' %
(pos+1),
m.start())
specs[pos] = m.group('spec')
else:
specs.append(m.group('spec'))
@ -184,6 +194,7 @@ class DTDChecker(Checker):
Also checks for some CSS and number heuristics in the values.
"""
pattern = re.compile('.*\.dtd$')
needs_reference = True # to cast a wider net for known entity references
eref = re.compile('&(%s);' % DTDParser.Name)
tmpl = '''<!DOCTYPE elem [%s]>
@ -191,8 +202,11 @@ class DTDChecker(Checker):
'''
xmllist = set(('amp', 'lt', 'gt', 'apos', 'quot'))
def __init__(self, reference):
self.reference = reference
def __init__(self, extra_tests):
super(DTDChecker, self).__init__(extra_tests)
self.processContent = False
if self.extra_tests is not None and 'android-dtd' in self.extra_tests:
self.processContent = True
self.__known_entities = None
def known_entities(self, refValue):
@ -228,8 +242,6 @@ class DTDChecker(Checker):
style = re.compile(r'^%(spec)s\s*(;\s*%(spec)s\s*)*;?$' %
{'spec': spec.pattern})
processContent = None
def check(self, refEnt, l10nEnt):
"""Try to parse the refvalue inside a dummy element, and keep
track of entities that we need to define to make that work.
@ -263,7 +275,7 @@ class DTDChecker(Checker):
l10nlist = self.entities_for_value(l10nValue)
missing = sorted(l10nlist - reflist)
_entities = entities + ''.join('<!ENTITY %s "">' % s for s in missing)
if self.processContent is not None:
if self.processContent:
self.texthandler.textcontent = ''
parser.setContentHandler(self.texthandler)
try:
@ -347,17 +359,10 @@ class DTDChecker(Checker):
if msgs:
yield ('warning', 0, ', '.join(msgs), 'css')
if self.processContent is not None:
for t in self.processContent(self.texthandler.textcontent):
if self.extra_tests is not None and 'android-dtd' in self.extra_tests:
for t in self.processAndroidContent(self.texthandler.textcontent):
yield t
class PrincessAndroid(DTDChecker):
"""Checker for the string values that Android puts into an XML container.
http://developer.android.com/guide/topics/resources/string-resource.html#FormattingAndStyling # noqa
has more info. Check for unescaped apostrophes and bad unicode escapes.
"""
quoted = re.compile("(?P<q>[\"']).*(?P=q)$")
def unicode_escape(self, str):
@ -385,15 +390,11 @@ class PrincessAndroid(DTDChecker):
args[3] = i + len(badstring)
raise UnicodeDecodeError(*args)
@classmethod
def use(cls, file):
"""Use this Checker only for DTD files in embedding/android."""
return (file.module in ("embedding/android",
"mobile/android/base") and
cls.pattern.match(file.file))
def processAndroidContent(self, val):
"""Check for the string values that Android puts into an XML container.
http://developer.android.com/guide/topics/resources/string-resource.html#FormattingAndStyling # noqa
def processContent(self, val):
"""Actual check code.
Check for unicode escapes and unescaped quotes and apostrophes,
if string's not quoted.
"""
@ -428,11 +429,68 @@ class PrincessAndroid(DTDChecker):
yield ('error', m.end(0)+offset, msg, 'android')
def getChecker(file, reference=None):
class FluentChecker(Checker):
'''Tests to run on Fluent (FTL) files.
'''
pattern = re.compile('.*\.ftl')
# Positions yielded by FluentChecker.check are absolute offsets from the
# beginning of the file. This is different from the base Checker behavior
# which yields offsets from the beginning of the current entity's value.
def check(self, refEnt, l10nEnt):
ref_entry = refEnt.entry
l10n_entry = l10nEnt.entry
# verify that values match, either both have a value or none
if ref_entry.value is not None and l10n_entry.value is None:
yield ('error', l10n_entry.span.start,
'Missing value', 'fluent')
if ref_entry.value is None and l10n_entry.value is not None:
yield ('error', l10n_entry.value.span.start,
'Obsolete value', 'fluent')
# verify that we're having the same set of attributes
ref_attr_names = set((attr.id.name for attr in ref_entry.attributes))
ref_pos = dict((attr.id.name, i)
for i, attr in enumerate(ref_entry.attributes))
l10n_attr_counts = \
Counter(attr.id.name for attr in l10n_entry.attributes)
l10n_attr_names = set(l10n_attr_counts)
l10n_pos = dict((attr.id.name, i)
for i, attr in enumerate(l10n_entry.attributes))
# check for duplicate Attributes
# only warn to not trigger a merge skip
for attr_name, cnt in l10n_attr_counts.items():
if cnt > 1:
yield (
'warning',
l10n_entry.attributes[l10n_pos[attr_name]].span.start,
'Attribute "{}" occurs {} times'.format(
attr_name, cnt),
'fluent')
missing_attr_names = sorted(ref_attr_names - l10n_attr_names,
key=lambda k: ref_pos[k])
for attr_name in missing_attr_names:
yield ('error', l10n_entry.span.start,
'Missing attribute: ' + attr_name, 'fluent')
obsolete_attr_names = sorted(l10n_attr_names - ref_attr_names,
key=lambda k: l10n_pos[k])
obsolete_attrs = [
attr
for attr in l10n_entry.attributes
if attr.id.name in obsolete_attr_names
]
for attr in obsolete_attrs:
yield ('error', attr.span.start,
'Obsolete attribute: ' + attr.id.name, 'fluent')
def getChecker(file, extra_tests=None):
if PropertiesChecker.use(file):
return PropertiesChecker()
if PrincessAndroid.use(file):
return PrincessAndroid(reference)
return PropertiesChecker(extra_tests)
if DTDChecker.use(file):
return DTDChecker(reference)
return DTDChecker(extra_tests)
if FluentChecker.use(file):
return FluentChecker(extra_tests)
return None

Просмотреть файл

@ -6,18 +6,20 @@
import logging
from argparse import ArgumentParser
import os
from compare_locales import version
from compare_locales.paths import EnumerateApp
from compare_locales.compare import compareApp, compareDirs
from compare_locales.webapps import compare_web_app
from compare_locales.paths import EnumerateApp, TOMLParser, ConfigNotFound
from compare_locales.compare import compareProjects, Observer
class BaseCommand(object):
"""Base class for compare-locales commands.
This handles command line parsing, and general sugar for setuptools
entry_points.
"""
class CompareLocales(object):
"""Check the localization status of gecko applications.
The first arguments are paths to the l10n.ini or toml files for the
applications, followed by the base directory of the localization repositories.
Then you pass in the list of locale codes you want to compare. If there are
not locales given, the list of locales will be taken from the l10n.toml file
or the all-locales file referenced by the application\'s l10n.ini."""
def __init__(self):
self.parser = None
@ -35,9 +37,27 @@ class BaseCommand(object):
parser.add_argument('-m', '--merge',
help='''Use this directory to stage merged files,
use {ab_CD} to specify a different directory for each locale''')
return parser
def add_data_argument(self, parser):
parser.add_argument('config_paths', metavar='l10n.toml', nargs='+',
help='TOML or INI file for the project')
parser.add_argument('l10n_base_dir', metavar='l10n-base-dir',
help='Parent directory of localizations')
parser.add_argument('locales', nargs='*', metavar='locale-code',
help='Locale code and top-level directory of '
'each localization')
parser.add_argument('-D', action='append', metavar='var=value',
default=[], dest='defines',
help='Overwrite variables in TOML files')
parser.add_argument('--unified', action="store_true",
help="Show output for all projects unified")
parser.add_argument('--full', action="store_true",
help="Compare projects that are disabled")
parser.add_argument('--clobber-merge', action="store_true",
default=False, dest='clobber',
help="""WARNING: DATALOSS.
Use this option with care. If specified, the merge directory will
be clobbered for each module. That means, the subdirectory will
be completely removed, any files that were there are lost.
Be careful to specify the right merge directory when using this option.""")
parser.add_argument('--data', choices=['text', 'exhibit', 'json'],
default='text',
help='''Choose data and format (one of text,
@ -46,6 +66,7 @@ with warnings and errors. Also prints a summary; json: Serialize the internal
tree, useful for tools. Also always succeeds; exhibit: Serialize the summary
data in a json useful for Exhibit
''')
return parser
@classmethod
def call(cls):
@ -54,7 +75,7 @@ data in a json useful for Exhibit
subclasses.
"""
cmd = cls()
cmd.handle_()
return cmd.handle_()
def handle_(self):
"""The instance part of the classmethod call."""
@ -64,92 +85,82 @@ data in a json useful for Exhibit
logging.basicConfig()
logging.getLogger().setLevel(logging.WARNING -
(args.v - args.q) * 10)
observer = self.handle(args)
print observer.serialize(type=args.data).encode('utf-8', 'replace')
kwargs = vars(args)
# strip handeld arguments
kwargs.pop('q')
kwargs.pop('v')
return self.handle(**kwargs)
def handle(self, args):
"""Subclasses need to implement this method for the actual
command handling.
"""
raise NotImplementedError
class CompareLocales(BaseCommand):
"""Check the localization status of a gecko application.
The first argument is a path to the l10n.ini file for the application,
followed by the base directory of the localization repositories.
Then you pass in the list of locale codes you want to compare. If there are
not locales given, the list of locales will be taken from the all-locales file
of the application\'s l10n.ini."""
def get_parser(self):
parser = super(CompareLocales, self).get_parser()
parser.add_argument('ini_file', metavar='l10n.ini',
help='INI file for the project')
parser.add_argument('l10n_base_dir', metavar='l10n-base-dir',
help='Parent directory of localizations')
parser.add_argument('locales', nargs='*', metavar='locale-code',
help='Locale code and top-level directory of '
'each localization')
parser.add_argument('--clobber-merge', action="store_true",
default=False, dest='clobber',
help="""WARNING: DATALOSS.
Use this option with care. If specified, the merge directory will
be clobbered for each module. That means, the subdirectory will
be completely removed, any files that were there are lost.
Be careful to specify the right merge directory when using this option.""")
parser.add_argument('-r', '--reference', default='en-US',
dest='reference',
help='Explicitly set the reference '
'localization. [default: en-US]')
self.add_data_argument(parser)
return parser
def handle(self, args):
app = EnumerateApp(args.ini_file, args.l10n_base_dir, args.locales)
app.reference = args.reference
def handle(self, config_paths, l10n_base_dir, locales,
merge=None, defines=None, unified=False, full=False,
clobber=False, data='text'):
# using nargs multiple times in argparser totally screws things
# up, repair that.
# First files are configs, then the base dir, everything else is
# locales
all_args = config_paths + [l10n_base_dir] + locales
config_paths = []
locales = []
if defines is None:
defines = []
while all_args and not os.path.isdir(all_args[0]):
config_paths.append(all_args.pop(0))
if not config_paths:
self.parser.error('no configuration file given')
for cf in config_paths:
if not os.path.isfile(cf):
self.parser.error('config file %s not found' % cf)
if not all_args:
self.parser.error('l10n-base-dir not found')
l10n_base_dir = all_args.pop(0)
locales.extend(all_args)
# when we compare disabled projects, we set our locales
# on all subconfigs, so deep is True.
locales_deep = full
configs = []
config_env = {}
for define in defines:
var, _, value = define.partition('=')
config_env[var] = value
for config_path in config_paths:
if config_path.endswith('.toml'):
try:
config = TOMLParser.parse(config_path, env=config_env)
except ConfigNotFound as e:
self.parser.exit('config file %s not found' % e.filename)
config.add_global_environment(l10n_base=l10n_base_dir)
if locales:
config.set_locales(locales, deep=locales_deep)
configs.append(config)
else:
app = EnumerateApp(
config_path, l10n_base_dir, locales)
configs.append(app.asConfig())
try:
observer = compareApp(app, merge_stage=args.merge,
clobber=args.clobber)
unified_observer = None
if unified:
unified_observer = Observer()
observers = compareProjects(
configs,
stat_observer=unified_observer,
merge_stage=merge, clobber_merge=clobber)
except (OSError, IOError), exc:
print "FAIL: " + str(exc)
self.parser.exit(2)
return observer
if unified:
observers = [unified_observer]
class CompareDirs(BaseCommand):
"""Check the localization status of a directory tree.
The first argument is a path to the reference data,the second is the
localization to be tested."""
def get_parser(self):
parser = super(CompareDirs, self).get_parser()
parser.add_argument('reference')
parser.add_argument('localization')
self.add_data_argument(parser)
return parser
def handle(self, args):
observer = compareDirs(args.reference, args.localization,
merge_stage=args.merge)
return observer
class CompareWebApp(BaseCommand):
"""Check the localization status of a gaia-style web app.
The first argument is the directory of the web app.
Following arguments explicitly state the locales to test.
If none are given, test all locales in manifest.webapp or files."""
def get_parser(self):
parser = super(CompareWebApp, self).get_parser()
parser.add_argument('webapp')
parser.add_argument('locales', nargs='*', metavar='locale-code',
help='Locale code and top-level directory of '
'each localization')
self.add_data_argument(parser)
return parser
def handle(self, args):
observer = compare_web_app(args.webapp, args.locales)
return observer
rv = 0
for observer in observers:
print observer.serialize(type=data).encode('utf-8', 'replace')
# summary is a dict of lang-summary dicts
# find out if any of our results has errors, return 1 if so
if rv > 0:
continue # we already have errors
for loc, summary in observer.summary.items():
if summary.get('errors', 0) > 0:
rv = 1
# no need to check further summaries, but
# continue to run through observers
break
return rv

Просмотреть файл

@ -6,10 +6,8 @@
import codecs
import os
import os.path
import shutil
import re
from difflib import SequenceMatcher
from collections import defaultdict
try:
@ -18,7 +16,7 @@ except:
from simplejson import dumps
from compare_locales import parser
from compare_locales import paths
from compare_locales import paths, mozpath
from compare_locales.checks import getChecker
@ -31,8 +29,10 @@ class Tree(object):
def __getitem__(self, leaf):
parts = []
if isinstance(leaf, paths.File):
parts = [p for p in [leaf.locale, leaf.module] if p] + \
leaf.file.split('/')
parts = [] if not leaf.locale else [leaf.locale]
if leaf.module:
parts += leaf.module.split('/')
parts += leaf.file.split('/')
else:
parts = leaf.split('/')
return self.__get(parts)
@ -93,16 +93,10 @@ class Tree(object):
Returns this Tree as a JSON-able tree of hashes.
Only the values need to take care that they're JSON-able.
'''
json = {}
keys = self.branches.keys()
keys.sort()
if self.value is not None:
json['value'] = self.value
children = [('/'.join(key), self.branches[key].toJSON())
for key in keys]
if children:
json['children'] = children
return json
return self.value
return dict(('/'.join(key), self.branches[key].toJSON())
for key in self.branches.keys())
def getStrRows(self):
def tostr(t):
@ -116,147 +110,129 @@ class Tree(object):
return '\n'.join(self.getStrRows())
class AddRemove(SequenceMatcher):
class AddRemove(object):
def __init__(self):
SequenceMatcher.__init__(self, None, None, None)
self.left = self.right = None
def set_left(self, left):
if not isinstance(left, list):
left = [l for l in left]
self.set_seq1(left)
left = list(l for l in left)
self.left = left
def set_right(self, right):
if not isinstance(right, list):
right = [l for l in right]
self.set_seq2(right)
right = list(l for l in right)
self.right = right
def __iter__(self):
for tag, i1, i2, j1, j2 in self.get_opcodes():
if tag == 'equal':
for pair in zip(self.a[i1:i2], self.b[j1:j2]):
yield ('equal', pair)
elif tag == 'delete':
for item in self.a[i1:i2]:
yield ('delete', item)
elif tag == 'insert':
for item in self.b[j1:j2]:
yield ('add', item)
# order_map stores index in left and then index in right
order_map = dict((item, (i, -1)) for i, item in enumerate(self.left))
left_items = set(order_map)
# as we go through the right side, keep track of which left
# item we had in right last, and for items not in left,
# set the sortmap to (left_offset, right_index)
left_offset = -1
right_items = set()
for i, item in enumerate(self.right):
right_items.add(item)
if item in order_map:
left_offset = order_map[item][0]
else:
# tag == 'replace'
for item in self.a[i1:i2]:
yield ('delete', item)
for item in self.b[j1:j2]:
yield ('add', item)
class DirectoryCompare(SequenceMatcher):
def __init__(self, reference):
SequenceMatcher.__init__(self, None, [i for i in reference],
[])
self.watcher = None
def setWatcher(self, watcher):
self.watcher = watcher
def compareWith(self, other):
if not self.watcher:
return
self.set_seq2([i for i in other])
for tag, i1, i2, j1, j2 in self.get_opcodes():
if tag == 'equal':
for i, j in zip(xrange(i1, i2), xrange(j1, j2)):
self.watcher.compare(self.a[i], self.b[j])
elif tag == 'delete':
for i in xrange(i1, i2):
self.watcher.add(self.a[i], other.cloneFile(self.a[i]))
elif tag == 'insert':
for j in xrange(j1, j2):
self.watcher.remove(self.b[j])
order_map[item] = (left_offset, i)
for item in sorted(order_map, key=lambda item: order_map[item]):
if item in left_items and item in right_items:
yield ('equal', item)
elif item in left_items:
yield ('delete', item)
else:
for j in xrange(j1, j2):
self.watcher.remove(self.b[j])
for i in xrange(i1, i2):
self.watcher.add(self.a[i], other.cloneFile(self.a[i]))
yield ('add', item)
class Observer(object):
stat_cats = ['missing', 'obsolete', 'missingInFiles', 'report',
'changed', 'unchanged', 'keys']
def __init__(self):
class intdict(defaultdict):
def __init__(self):
defaultdict.__init__(self, int)
self.summary = defaultdict(intdict)
self.details = Tree(dict)
self.filter = None
def __init__(self, filter=None, file_stats=False):
self.summary = defaultdict(lambda: defaultdict(int))
self.details = Tree(list)
self.filter = filter
self.file_stats = None
if file_stats:
self.file_stats = defaultdict(lambda: defaultdict(dict))
# support pickling
def __getstate__(self):
return dict(summary=self.getSummary(), details=self.details)
state = dict(summary=self._dictify(self.summary), details=self.details)
if self.file_stats is not None:
state['file_stats'] = self._dictify(self.file_stats)
return state
def __setstate__(self, state):
class intdict(defaultdict):
def __init__(self):
defaultdict.__init__(self, int)
self.summary = defaultdict(intdict)
self.summary = defaultdict(lambda: defaultdict(int))
if 'summary' in state:
for loc, stats in state['summary'].iteritems():
self.summary[loc].update(stats)
self.file_stats = None
if 'file_stats' in state:
self.file_stats = defaultdict(lambda: defaultdict(dict))
for k, d in state['file_stats'].iteritems():
self.file_stats[k].update(d)
self.details = state['details']
self.filter = None
def getSummary(self):
def _dictify(self, d):
plaindict = {}
for k, v in self.summary.iteritems():
for k, v in d.iteritems():
plaindict[k] = dict(v)
return plaindict
def toJSON(self):
return dict(summary=self.getSummary(), details=self.details.toJSON())
# Don't export file stats, even if we collected them.
# Those are not part of the data we use toJSON for.
return {
'summary': self._dictify(self.summary),
'details': self.details.toJSON()
}
def updateStats(self, file, stats):
# in multi-project scenarios, this file might not be ours,
# check that.
# Pass in a dummy entity key '' to avoid getting in to
# generic file filters. If we have stats for those,
# we want to aggregate the counts
if (self.filter is not None and
self.filter(file, entity='') == 'ignore'):
return
for category, value in stats.iteritems():
self.summary[file.locale][category] += value
if self.file_stats is None:
return
if 'missingInFiles' in stats:
# keep track of how many strings are in a missing file
# we got the {'missingFile': 'error'} from the notify pass
self.details[file].append({'count': stats['missingInFiles']})
# missingInFiles should just be "missing" in file stats
self.file_stats[file.locale][file.localpath]['missing'] = \
stats['missingInFiles']
return # there are no other stats for missing files
self.file_stats[file.locale][file.localpath].update(stats)
def notify(self, category, file, data):
rv = "error"
if category in self.stat_cats:
# these get called post reporting just for stats
# return "error" to forward them to other other_observers
self.summary[file.locale][category] += data
# keep track of how many strings are in a missing file
# we got the {'missingFile': 'error'} from the first pass
if category == 'missingInFiles':
self.details[file]['strings'] = data
return "error"
rv = 'error'
if category in ['missingFile', 'obsoleteFile']:
if self.filter is not None:
rv = self.filter(file)
if rv != "ignore":
self.details[file][category] = rv
self.details[file].append({category: rv})
return rv
if category in ['missingEntity', 'obsoleteEntity']:
if self.filter is not None:
rv = self.filter(file, data)
if rv == "ignore":
return rv
v = self.details[file]
try:
v[category].append(data)
except KeyError:
v[category] = [data]
self.details[file].append({category: data})
return rv
if category == 'error':
try:
self.details[file][category].append(data)
except KeyError:
self.details[file][category] = [data]
self.summary[file.locale]['errors'] += 1
elif category == 'warning':
try:
self.details[file][category].append(data)
except KeyError:
self.details[file][category] = [data]
self.summary[file.locale]['warnings'] += 1
if category in ('error', 'warning'):
self.details[file].append({category: data})
self.summary[file.locale][category + 's'] += 1
return rv
def toExhibit(self):
@ -276,6 +252,9 @@ class Observer(object):
for k in ('changed', 'unchanged', 'report', 'missing',
'missingInFiles')
if k in summary])
total_w = sum([summary[k]
for k in ('changed_w', 'unchanged_w', 'missing_w')
if k in summary])
rate = (('changed' in summary and summary['changed'] * 100) or
0) / total
item.update((k, summary.get(k, 0))
@ -287,6 +266,9 @@ class Observer(object):
summary.get('missingInFiles', 0)
item['completion'] = rate
item['total'] = total
item.update((k, summary.get(k, 0))
for k in ('changed_w', 'unchanged_w', 'missing_w'))
item['total_w'] = total_w
result = 'success'
if item.get('warnings', 0):
result = 'warning'
@ -297,6 +279,7 @@ class Observer(object):
data = {
"properties": dict.fromkeys(
("completion", "errors", "warnings", "missing", "report",
"missing_w", "changed_w", "unchanged_w",
"unchanged", "changed", "obsolete"),
{"valueType": "number"}),
"types": {
@ -316,26 +299,19 @@ class Observer(object):
return ' ' * t[0] + '/'.join(t[2])
o = []
indent = ' ' * (t[0] + 1)
if 'error' in t[2]:
o += [indent + 'ERROR: ' + e for e in t[2]['error']]
if 'warning' in t[2]:
o += [indent + 'WARNING: ' + e for e in t[2]['warning']]
if 'missingEntity' in t[2] or 'obsoleteEntity' in t[2]:
missingEntities = ('missingEntity' in t[2] and
t[2]['missingEntity']) or []
obsoleteEntities = ('obsoleteEntity' in t[2] and
t[2]['obsoleteEntity']) or []
entities = missingEntities + obsoleteEntities
entities.sort()
for entity in entities:
op = '+'
if entity in obsoleteEntities:
op = '-'
o.append(indent + op + entity)
elif 'missingFile' in t[2]:
o.append(indent + '// add and localize this file')
elif 'obsoleteFile' in t[2]:
o.append(indent + '// remove this file')
for item in t[2]:
if 'error' in item:
o += [indent + 'ERROR: ' + item['error']]
elif 'warning' in item:
o += [indent + 'WARNING: ' + item['warning']]
elif 'missingEntity' in item:
o += [indent + '+' + item['missingEntity']]
elif 'obsoleteEntity' in item:
o += [indent + '-' + item['obsoleteEntity']]
elif 'missingFile' in item:
o.append(indent + '// add and localize this file')
elif 'obsoleteFile' in item:
o.append(indent + '// remove this file')
return '\n'.join(o)
out = []
@ -362,99 +338,121 @@ class ContentComparer:
keyRE = re.compile('[kK]ey')
nl = re.compile('\n', re.M)
def __init__(self):
def __init__(self, observers, stat_observers=None):
'''Create a ContentComparer.
observer is usually a instance of Observer. The return values
of the notify method are used to control the handling of missing
entities.
'''
self.reference = dict()
self.observer = Observer()
self.other_observers = []
self.merge_stage = None
self.observers = observers
if stat_observers is None:
stat_observers = []
self.stat_observers = stat_observers
def add_observer(self, obs):
'''Add a non-filtering observer.
Results from the notify calls are ignored.
'''
self.other_observers.append(obs)
def set_merge_stage(self, merge_stage):
self.merge_stage = merge_stage
def merge(self, ref_entities, ref_map, ref_file, l10n_file, missing,
skips, ctx, canMerge, encoding):
outfile = os.path.join(self.merge_stage, l10n_file.module,
l10n_file.file)
outdir = os.path.dirname(outfile)
def create_merge_dir(self, merge_file):
outdir = mozpath.dirname(merge_file)
if not os.path.isdir(outdir):
os.makedirs(outdir)
if not canMerge:
shutil.copyfile(ref_file.fullpath, outfile)
print "copied reference to " + outfile
def merge(self, ref_entities, ref_map, ref_file, l10n_file, merge_file,
missing, skips, ctx, capabilities, encoding):
if capabilities == parser.CAN_NONE:
return
if capabilities & parser.CAN_COPY and (skips or missing):
self.create_merge_dir(merge_file)
shutil.copyfile(ref_file.fullpath, merge_file)
print "copied reference to " + merge_file
return
if not (capabilities & parser.CAN_SKIP):
return
# Start with None in case the merge file doesn't need to be created.
f = None
if skips:
# skips come in ordered by key name, we need them in file order
skips.sort(key=lambda s: s.span[0])
trailing = (['\n'] +
[ref_entities[ref_map[key]].all for key in missing] +
[ref_entities[ref_map[skip.key]].all for skip in skips
if not isinstance(skip, parser.Junk)])
if skips:
# we need to skip a few errornous blocks in the input, copy by hand
f = codecs.open(outfile, 'wb', encoding)
# we need to skip a few erroneous blocks in the input, copy by hand
self.create_merge_dir(merge_file)
f = codecs.open(merge_file, 'wb', encoding)
offset = 0
for skip in skips:
chunk = skip.span
f.write(ctx.contents[offset:chunk[0]])
offset = chunk[1]
f.write(ctx.contents[offset:])
else:
shutil.copyfile(l10n_file.fullpath, outfile)
f = codecs.open(outfile, 'ab', encoding)
print "adding to " + outfile
def ensureNewline(s):
if not s.endswith('\n'):
return s + '\n'
return s
if not (capabilities & parser.CAN_MERGE):
return
f.write(''.join(map(ensureNewline, trailing)))
f.close()
if skips or missing:
if f is None:
self.create_merge_dir(merge_file)
shutil.copyfile(l10n_file.fullpath, merge_file)
f = codecs.open(merge_file, 'ab', encoding)
trailing = (['\n'] +
[ref_entities[ref_map[key]].all for key in missing] +
[ref_entities[ref_map[skip.key]].all for skip in skips
if not isinstance(skip, parser.Junk)])
def ensureNewline(s):
if not s.endswith('\n'):
return s + '\n'
return s
print "adding to " + merge_file
f.write(''.join(map(ensureNewline, trailing)))
if f is not None:
f.close()
def notify(self, category, file, data):
"""Check observer for the found data, and if it's
not to ignore, notify other_observers.
not to ignore, notify stat_observers.
"""
rv = self.observer.notify(category, file, data)
if rv == 'ignore':
return rv
for obs in self.other_observers:
# non-filtering other_observers, ignore results
rvs = set(
observer.notify(category, file, data)
for observer in self.observers
)
if all(rv == 'ignore' for rv in rvs):
return 'ignore'
rvs.discard('ignore')
for obs in self.stat_observers:
# non-filtering stat_observers, ignore results
obs.notify(category, file, data)
return rv
if 'error' in rvs:
return 'error'
assert len(rvs) == 1
return rvs.pop()
def updateStats(self, file, stats):
"""Check observer for the found data, and if it's
not to ignore, notify stat_observers.
"""
for observer in self.observers + self.stat_observers:
observer.updateStats(file, stats)
def remove(self, obsolete):
self.notify('obsoleteFile', obsolete, None)
pass
def compare(self, ref_file, l10n):
def compare(self, ref_file, l10n, merge_file, extra_tests=None):
try:
p = parser.getParser(ref_file.file)
except UserWarning:
# no comparison, XXX report?
return
if ref_file not in self.reference:
# we didn't parse this before
try:
p.readContents(ref_file.getContents())
except Exception, e:
self.notify('error', ref_file, str(e))
return
self.reference[ref_file] = p.parse()
ref = self.reference[ref_file]
ref_list = ref[1].keys()
ref_list.sort()
try:
p.readContents(ref_file.getContents())
except Exception, e:
self.notify('error', ref_file, str(e))
return
ref_entities, ref_map = p.parse()
try:
p.readContents(l10n.getContents())
l10n_entities, l10n_map = p.parse()
@ -463,96 +461,100 @@ class ContentComparer:
self.notify('error', l10n, str(e))
return
l10n_list = l10n_map.keys()
l10n_list.sort()
ar = AddRemove()
ar.set_left(ref_list)
ar.set_right(l10n_list)
ar.set_left(e.key for e in ref_entities)
ar.set_right(e.key for e in l10n_entities)
report = missing = obsolete = changed = unchanged = keys = 0
missing_w = changed_w = unchanged_w = 0 # word stats
missings = []
skips = []
checker = getChecker(l10n, reference=ref[0])
for action, item_or_pair in ar:
checker = getChecker(l10n, extra_tests=extra_tests)
if checker and checker.needs_reference:
checker.set_reference(ref_entities)
for msg in p.findDuplicates(ref_entities):
self.notify('warning', l10n, msg)
for msg in p.findDuplicates(l10n_entities):
self.notify('error', l10n, msg)
for action, entity_id in ar:
if action == 'delete':
# missing entity
_rv = self.notify('missingEntity', l10n, item_or_pair)
if isinstance(ref_entities[ref_map[entity_id]], parser.Junk):
self.notify('warning', l10n, 'Parser error in en-US')
continue
_rv = self.notify('missingEntity', l10n, entity_id)
if _rv == "ignore":
continue
if _rv == "error":
# only add to missing entities for l10n-merge on error,
# not report
missings.append(item_or_pair)
missings.append(entity_id)
missing += 1
refent = ref_entities[ref_map[entity_id]]
missing_w += refent.count_words()
else:
# just report
report += 1
elif action == 'add':
# obsolete entity or junk
if isinstance(l10n_entities[l10n_map[item_or_pair]],
if isinstance(l10n_entities[l10n_map[entity_id]],
parser.Junk):
junk = l10n_entities[l10n_map[item_or_pair]]
junk = l10n_entities[l10n_map[entity_id]]
params = (junk.val,) + junk.position() + junk.position(-1)
self.notify('error', l10n,
'Unparsed content "%s" from line %d colum %d'
'Unparsed content "%s" from line %d column %d'
' to line %d column %d' % params)
if self.merge_stage is not None:
if merge_file is not None:
skips.append(junk)
elif self.notify('obsoleteEntity', l10n,
item_or_pair) != 'ignore':
entity_id) != 'ignore':
obsolete += 1
else:
# entity found in both ref and l10n, check for changed
entity = item_or_pair[0]
refent = ref[0][ref[1][entity]]
l10nent = l10n_entities[l10n_map[entity]]
if self.keyRE.search(entity):
refent = ref_entities[ref_map[entity_id]]
l10nent = l10n_entities[l10n_map[entity_id]]
if self.keyRE.search(entity_id):
keys += 1
else:
if refent.val == l10nent.val:
if refent.equals(l10nent):
self.doUnchanged(l10nent)
unchanged += 1
unchanged_w += refent.count_words()
else:
self.doChanged(ref_file, refent, l10nent)
changed += 1
changed_w += refent.count_words()
# run checks:
if checker:
for tp, pos, msg, cat in checker.check(refent, l10nent):
# compute real src position, if first line,
# col needs adjustment
if isinstance(pos, tuple):
_l, col = l10nent.value_position()
# line, column
if pos[0] == 1:
col = col + pos[1]
else:
col = pos[1]
_l += pos[0] - 1
else:
_l, col = l10nent.value_position(pos)
line, col = l10nent.value_position(pos)
# skip error entities when merging
if tp == 'error' and self.merge_stage is not None:
if tp == 'error' and merge_file is not None:
skips.append(l10nent)
self.notify(tp, l10n,
u"%s at line %d, column %d for %s" %
(msg, _l, col, refent.key))
(msg, line, col, refent.key))
pass
if missing:
self.notify('missing', l10n, missing)
if self.merge_stage is not None and (missings or skips):
if merge_file is not None:
self.merge(
ref[0], ref[1], ref_file,
l10n, missings, skips, l10n_ctx,
p.canMerge, p.encoding)
if report:
self.notify('report', l10n, report)
if obsolete:
self.notify('obsolete', l10n, obsolete)
if changed:
self.notify('changed', l10n, changed)
if unchanged:
self.notify('unchanged', l10n, unchanged)
if keys:
self.notify('keys', l10n, keys)
ref_entities, ref_map, ref_file,
l10n, merge_file, missings, skips, l10n_ctx,
p.capabilities, p.encoding)
stats = {}
for cat, value in (
('missing', missing),
('missing_w', missing_w),
('report', report),
('obsolete', obsolete),
('changed', changed),
('changed_w', changed_w),
('unchanged', unchanged),
('unchanged_w', unchanged_w),
('keys', keys)):
if value:
stats[cat] = value
self.updateStats(l10n, stats)
pass
def add(self, orig, missing):
@ -567,10 +569,16 @@ class ContentComparer:
try:
p.readContents(f.getContents())
entities, map = p.parse()
except Exception, e:
self.notify('error', f, str(e))
except Exception, ex:
self.notify('error', f, str(ex))
return
self.notify('missingInFiles', missing, len(map))
# strip parse errors
entities = [e for e in entities if not isinstance(e, parser.Junk)]
self.updateStats(missing, {'missingInFiles': len(entities)})
missing_w = 0
for e in entities:
missing_w += e.count_words()
self.updateStats(missing, {'missing_w': missing_w})
def doUnchanged(self, entity):
# overload this if needed
@ -581,52 +589,54 @@ class ContentComparer:
pass
def compareApp(app, other_observer=None, merge_stage=None, clobber=False):
'''Compare locales set in app.
Optional arguments are:
- other_observer. A object implementing
notify(category, _file, data)
The return values of that callback are ignored.
- merge_stage. A directory to be used for staging the output of
l10n-merge.
- clobber. Clobber the module subdirectories of the merge dir as we go.
Use wisely, as it might cause data loss.
'''
comparer = ContentComparer()
if other_observer is not None:
comparer.add_observer(other_observer)
comparer.observer.filter = app.filter
for module, reference, locales in app:
dir_comp = DirectoryCompare(reference)
dir_comp.setWatcher(comparer)
for _, localization in locales:
if merge_stage is not None:
locale_merge = merge_stage.format(ab_CD=localization.locale)
comparer.set_merge_stage(locale_merge)
if clobber:
# if clobber, remove the stage for the module if it exists
clobberdir = os.path.join(locale_merge, module)
def compareProjects(project_configs, stat_observer=None,
file_stats=False,
merge_stage=None, clobber_merge=False):
locales = set()
observers = []
for project in project_configs:
observers.append(
Observer(filter=project.filter, file_stats=file_stats))
locales.update(project.locales)
if stat_observer is not None:
stat_observers = [stat_observer]
else:
stat_observers = None
comparer = ContentComparer(observers, stat_observers=stat_observers)
for locale in sorted(locales):
files = paths.ProjectFiles(locale, project_configs,
mergebase=merge_stage)
root = mozpath.commonprefix([m['l10n'].prefix for m in files.matchers])
if merge_stage is not None:
if clobber_merge:
mergematchers = set(_m.get('merge') for _m in files.matchers)
mergematchers.discard(None)
for matcher in mergematchers:
clobberdir = matcher.prefix
if os.path.exists(clobberdir):
shutil.rmtree(clobberdir)
print "clobbered " + clobberdir
dir_comp.compareWith(localization)
return comparer.observer
def compareDirs(reference, locale, other_observer=None, merge_stage=None):
'''Compare reference and locale dir.
Optional arguments are:
- other_observer. A object implementing
notify(category, _file, data)
The return values of that callback are ignored.
'''
comparer = ContentComparer()
if other_observer is not None:
comparer.add_observer(other_observer)
comparer.set_merge_stage(merge_stage)
dir_comp = DirectoryCompare(paths.EnumerateDir(reference))
dir_comp.setWatcher(comparer)
dir_comp.compareWith(paths.EnumerateDir(locale))
return comparer.observer
for l10npath, refpath, mergepath, extra_tests in files:
# module and file path are needed for legacy filter.py support
module = None
fpath = mozpath.relpath(l10npath, root)
for _m in files.matchers:
if _m['l10n'].match(l10npath):
if _m['module']:
# legacy ini support, set module, and resolve
# local path against the matcher prefix,
# which includes the module
module = _m['module']
fpath = mozpath.relpath(l10npath, _m['l10n'].prefix)
break
reffile = paths.File(refpath, fpath or refpath, module=module)
l10n = paths.File(l10npath, fpath or l10npath,
module=module, locale=locale)
if not os.path.exists(l10npath):
comparer.add(reffile, l10n)
continue
if not os.path.exists(refpath):
comparer.remove(l10n)
continue
comparer.compare(reffile, l10n, mergepath, extra_tests)
return observers

137
third_party/python/compare-locales/compare_locales/mozpath.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,137 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
import posixpath
import os
import re
'''
Like os.path, with a reduced set of functions, and with normalized path
separators (always use forward slashes).
Also contains a few additional utilities not found in os.path.
'''
def normsep(path):
'''
Normalize path separators, by using forward slashes instead of whatever
os.sep is.
'''
if os.sep != '/':
path = path.replace(os.sep, '/')
if os.altsep and os.altsep != '/':
path = path.replace(os.altsep, '/')
return path
def relpath(path, start):
rel = normsep(os.path.relpath(path, start))
return '' if rel == '.' else rel
def realpath(path):
return normsep(os.path.realpath(path))
def abspath(path):
return normsep(os.path.abspath(path))
def join(*paths):
return normsep(os.path.join(*paths))
def normpath(path):
return posixpath.normpath(normsep(path))
def dirname(path):
return posixpath.dirname(normsep(path))
def commonprefix(paths):
return posixpath.commonprefix([normsep(path) for path in paths])
def basename(path):
return os.path.basename(path)
def splitext(path):
return posixpath.splitext(normsep(path))
def split(path):
'''
Return the normalized path as a list of its components.
split('foo/bar/baz') returns ['foo', 'bar', 'baz']
'''
return normsep(path).split('/')
def basedir(path, bases):
'''
Given a list of directories (bases), return which one contains the given
path. If several matches are found, the deepest base directory is returned.
basedir('foo/bar/baz', ['foo', 'baz', 'foo/bar']) returns 'foo/bar'
('foo' and 'foo/bar' both match, but 'foo/bar' is the deepest match)
'''
path = normsep(path)
bases = [normsep(b) for b in bases]
if path in bases:
return path
for b in sorted(bases, reverse=True):
if b == '' or path.startswith(b + '/'):
return b
re_cache = {}
def match(path, pattern):
'''
Return whether the given path matches the given pattern.
An asterisk can be used to match any string, including the null string, in
one part of the path:
'foo' matches '*', 'f*' or 'fo*o'
However, an asterisk matching a subdirectory may not match the null string:
'foo/bar' does *not* match 'foo/*/bar'
If the pattern matches one of the ancestor directories of the path, the
patch is considered matching:
'foo/bar' matches 'foo'
Two adjacent asterisks can be used to match files and zero or more
directories and subdirectories.
'foo/bar' matches 'foo/**/bar', or '**/bar'
'''
if not pattern:
return True
if pattern not in re_cache:
p = re.escape(pattern)
p = re.sub(r'(^|\\\/)\\\*\\\*\\\/', r'\1(?:.+/)?', p)
p = re.sub(r'(^|\\\/)\\\*\\\*$', r'(?:\1.+)?', p)
p = p.replace(r'\*', '[^/]*') + '(?:/.*)?$'
re_cache[pattern] = re.compile(p)
return re_cache[pattern].match(path) is not None
def rebase(oldbase, base, relativepath):
'''
Return relativepath relative to base instead of oldbase.
'''
if base == oldbase:
return relativepath
if len(base) < len(oldbase):
assert basedir(oldbase, [base]) == base
relbase = relpath(oldbase, base)
result = join(relbase, relativepath)
else:
assert basedir(base, [oldbase]) == oldbase
relbase = relpath(base, oldbase)
result = relpath(relativepath, relbase)
result = normpath(result)
if relativepath.endswith('/') and not result.endswith('/'):
result += '/'
return result

Просмотреть файл

@ -5,11 +5,28 @@
import re
import bisect
import codecs
from collections import Counter
import logging
from fluent.syntax import FluentParser as FTLParser
from fluent.syntax import ast as ftl
__constructors = []
# The allowed capabilities for the Parsers. They define the exact strategy
# used by ContentComparer.merge.
# Don't perform any merging
CAN_NONE = 0
# Copy the entire reference file
CAN_COPY = 1
# Remove broken entities from localization
CAN_SKIP = 2
# Add missing and broken entities from the reference to localization
CAN_MERGE = 4
class EntityBase(object):
'''
Abstraction layer for a localizable entity.
@ -25,7 +42,7 @@ class EntityBase(object):
<-------[2]--------->
'''
def __init__(self, ctx, pp, pre_comment,
def __init__(self, ctx, pre_comment,
span, pre_ws_span, def_span,
key_span, val_span, post_span):
self.ctx = ctx
@ -35,7 +52,6 @@ class EntityBase(object):
self.key_span = key_span
self.val_span = val_span
self.post_span = post_span
self.pp = pp
self.pre_comment = pre_comment
pass
@ -77,9 +93,6 @@ class EntityBase(object):
def get_key(self):
return self.ctx.contents[self.key_span[0]:self.key_span[1]]
def get_val(self):
return self.pp(self.ctx.contents[self.val_span[0]:self.val_span[1]])
def get_raw_val(self):
return self.ctx.contents[self.val_span[0]:self.val_span[1]]
@ -92,13 +105,27 @@ class EntityBase(object):
pre_ws = property(get_pre_ws)
definition = property(get_def)
key = property(get_key)
val = property(get_val)
val = property(get_raw_val)
raw_val = property(get_raw_val)
post = property(get_post)
def __repr__(self):
return self.key
re_br = re.compile('<br\s*/?>', re.U)
re_sgml = re.compile('</?\w+.*?>', re.U | re.M)
def count_words(self):
"""Count the words in an English string.
Replace a couple of xml markup to make that safer, too.
"""
value = self.re_br.sub(u'\n', self.val)
value = self.re_sgml.sub(u'', value)
return len(value.split())
def equals(self, other):
return self.key == other.key and self.val == other.val
class Entity(EntityBase):
pass
@ -112,7 +139,6 @@ class Comment(EntityBase):
self.pre_ws_span = pre_ws_span
self.def_span = def_span
self.post_span = post_span
self.pp = lambda v: v
@property
def key(self):
@ -174,14 +200,13 @@ class Whitespace(EntityBase):
self.key_span = self.val_span = self.span = span
self.def_span = self.pre_ws_span = (span[0], span[0])
self.post_span = (span[1], span[1])
self.pp = lambda v: v
def __repr__(self):
return self.raw_val
class Parser:
canMerge = True
class Parser(object):
capabilities = CAN_SKIP | CAN_MERGE
tail = re.compile('\s+\Z')
class Context(object):
@ -233,9 +258,6 @@ class Parser:
l.append(e)
return (l, m)
def postProcessValue(self, val):
return val
def __iter__(self):
return self.walk(onlyEntities=True)
@ -249,7 +271,7 @@ class Parser:
entity, offset = self.getEntity(ctx, offset)
while entity:
if (not onlyEntities or
type(entity) is Entity or
isinstance(entity, Entity) or
type(entity) is Junk):
yield entity
entity, offset = self.getEntity(ctx, offset)
@ -284,11 +306,18 @@ class Parser:
return (Junk(ctx, (offset, junkend)), junkend)
def createEntity(self, ctx, m):
pre_comment = unicode(self.last_comment) if self.last_comment else ''
self.last_comment = ''
return Entity(ctx, self.postProcessValue, pre_comment,
pre_comment = self.last_comment
self.last_comment = None
return Entity(ctx, pre_comment,
*[m.span(i) for i in xrange(6)])
@classmethod
def findDuplicates(cls, entities):
found = Counter(entity.key for entity in entities)
for entity_id, cnt in found.items():
if cnt > 1:
yield '{} occurs {} times'.format(entity_id, cnt)
def getParser(path):
for item in __constructors:
@ -311,6 +340,22 @@ def getParser(path):
# <-------[3]---------><------[6]------>
class DTDEntity(Entity):
def value_position(self, offset=0):
# DTDChecker already returns tuples of (line, col) positions
if isinstance(offset, tuple):
line_pos, col_pos = offset
line, col = super(DTDEntity, self).value_position()
if line_pos == 1:
col = col + col_pos
else:
col = col_pos
line += line_pos - 1
return line, col
else:
return super(DTDEntity, self).value_position(offset)
class DTDParser(Parser):
# http://www.w3.org/TR/2006/REC-xml11-20060816/#NT-NameStartChar
# ":" | [A-Z] | "_" | [a-z] |
@ -357,28 +402,41 @@ class DTDParser(Parser):
m = self.rePE.match(ctx.contents, offset)
if m:
inneroffset = m.end()
self.last_comment = ''
entity = Entity(ctx, self.postProcessValue, '',
*[m.span(i) for i in xrange(6)])
self.last_comment = None
entity = DTDEntity(ctx, '', *[m.span(i) for i in xrange(6)])
return (entity, inneroffset)
def createEntity(self, ctx, m):
valspan = m.span('val')
valspan = (valspan[0]+1, valspan[1]-1)
pre_comment = unicode(self.last_comment) if self.last_comment else ''
self.last_comment = ''
return Entity(ctx, self.postProcessValue, pre_comment,
m.span(),
m.span('pre'),
m.span('entity'), m.span('key'), valspan,
m.span('post'))
pre_comment = self.last_comment
self.last_comment = None
return DTDEntity(ctx, pre_comment,
m.span(),
m.span('pre'),
m.span('entity'), m.span('key'), valspan,
m.span('post'))
class PropertiesParser(Parser):
class PropertiesEntity(Entity):
escape = re.compile(r'\\((?P<uni>u[0-9a-fA-F]{1,4})|'
'(?P<nl>\n\s*)|(?P<single>.))', re.M)
known_escapes = {'n': '\n', 'r': '\r', 't': '\t', '\\': '\\'}
@property
def val(self):
def unescape(m):
found = m.groupdict()
if found['uni']:
return unichr(int(found['uni'][1:], 16))
if found['nl']:
return ''
return self.known_escapes.get(found['single'], found['single'])
return self.escape.sub(unescape, self.raw_val)
class PropertiesParser(Parser):
def __init__(self):
self.reKey = re.compile('^(\s*)'
'([^#!\s\n][^=:\n]*?)\s*[:=][ \t]*', re.M)
@ -424,31 +482,19 @@ class PropertiesParser(Parser):
if ws:
endval = ws.start()
offset = ws.end()
pre_comment = (unicode(self.last_comment) if self.last_comment
else '')
self.last_comment = ''
entity = Entity(ctx, self.postProcessValue, pre_comment,
(m.start(), offset), # full span
m.span(1), # leading whitespan
(m.start(2), offset), # entity def span
m.span(2), # key span
(m.end(), endval), # value span
(offset, offset)) # post comment span, empty
pre_comment = self.last_comment
self.last_comment = None
entity = PropertiesEntity(
ctx, pre_comment,
(m.start(), offset), # full span
m.span(1), # leading whitespan
(m.start(2), offset), # entity def span
m.span(2), # key span
(m.end(), endval), # value span
(offset, offset)) # post comment span, empty
return (entity, offset)
return self.getTrailing(ctx, offset, self.reKey, self.reComment)
def postProcessValue(self, val):
def unescape(m):
found = m.groupdict()
if found['uni']:
return unichr(int(found['uni'][1:], 16))
if found['nl']:
return ''
return self.known_escapes.get(found['single'], found['single'])
val = self.escape.sub(unescape, val)
return val
class DefinesInstruction(EntityBase):
'''Entity-like object representing processing instructions in inc files
@ -460,7 +506,6 @@ class DefinesInstruction(EntityBase):
self.def_span = def_span
self.key_span = self.val_span = val_span
self.post_span = post_span
self.pp = lambda v: v
def __repr__(self):
return self.raw_val
@ -468,7 +513,7 @@ class DefinesInstruction(EntityBase):
class DefinesParser(Parser):
# can't merge, #unfilter needs to be the last item, which we don't support
canMerge = False
capabilities = CAN_COPY
tail = re.compile(r'(?!)') # never match
def __init__(self):
@ -516,7 +561,6 @@ class IniSection(EntityBase):
self.def_span = def_span
self.key_span = self.val_span = val_span
self.post_span = post_span
self.pp = lambda v: v
def __repr__(self):
return self.raw_val
@ -566,7 +610,108 @@ class IniParser(Parser):
self.reComment, self.reSection, self.reKey)
class FluentAttribute(EntityBase):
ignored_fields = ['span']
def __init__(self, entity, attr_node):
self.ctx = entity.ctx
self.attr = attr_node
self.key_span = (attr_node.id.span.start, attr_node.id.span.end)
self.val_span = (attr_node.value.span.start, attr_node.value.span.end)
def equals(self, other):
if not isinstance(other, FluentAttribute):
return False
return self.attr.equals(
other.attr, ignored_fields=self.ignored_fields)
class FluentEntity(Entity):
# Fields ignored when comparing two entities.
ignored_fields = ['comment', 'span', 'tags']
def __init__(self, ctx, entry):
start = entry.span.start
end = entry.span.end
self.ctx = ctx
self.span = (start, end)
self.key_span = (entry.id.span.start, entry.id.span.end)
if entry.value is not None:
self.val_span = (entry.value.span.start, entry.value.span.end)
else:
self.val_span = (0, 0)
self.entry = entry
_word_count = None
def count_words(self):
if self._word_count is None:
self._word_count = 0
def count_words(node):
if isinstance(node, ftl.TextElement):
self._word_count += len(node.value.split())
return node
self.entry.traverse(count_words)
return self._word_count
def equals(self, other):
return self.entry.equals(
other.entry, ignored_fields=self.ignored_fields)
# Positions yielded by FluentChecker.check are absolute offsets from the
# beginning of the file. This is different from the base Checker behavior
# which yields offsets from the beginning of the current entity's value.
def position(self, pos=None):
if pos is None:
pos = self.entry.span.start
return self.ctx.lines(pos)[0]
# FluentEntities don't differentiate between entity and value positions
# because all positions are absolute from the beginning of the file.
def value_position(self, pos=None):
return self.position(pos)
@property
def attributes(self):
for attr_node in self.entry.attributes:
yield FluentAttribute(self, attr_node)
class FluentParser(Parser):
capabilities = CAN_SKIP
def __init__(self):
super(FluentParser, self).__init__()
self.ftl_parser = FTLParser()
def walk(self, onlyEntities=False):
if not self.ctx:
# loading file failed, or we just didn't load anything
return
resource = self.ftl_parser.parse(self.ctx.contents)
for entry in resource.body:
if isinstance(entry, ftl.Message):
yield FluentEntity(self.ctx, entry)
elif isinstance(entry, ftl.Junk):
start = entry.span.start
end = entry.span.end
# strip leading whitespace
start += re.match('\s*', entry.content).end()
# strip trailing whitespace
ws, we = re.search('\s*$', entry.content).span()
end -= we - ws
yield Junk(self.ctx, (start, end))
__constructors = [('\\.dtd$', DTDParser()),
('\\.properties$', PropertiesParser()),
('\\.ini$', IniParser()),
('\\.inc$', DefinesParser())]
('\\.inc$', DefinesParser()),
('\\.ftl$', FluentParser())]

Просмотреть файл

@ -2,14 +2,517 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os.path
import os
import re
from ConfigParser import ConfigParser, NoSectionError, NoOptionError
from urlparse import urlparse, urljoin
from urllib import pathname2url, url2pathname
from urllib2 import urlopen
from collections import defaultdict
from compare_locales import util
import errno
import itertools
import logging
from compare_locales import util, mozpath
import pytoml as toml
class Matcher(object):
'''Path pattern matcher
Supports path matching similar to mozpath.match(), but does
not match trailing file paths without trailing wildcards.
Also gets a prefix, which is the path before the first wildcard,
which is good for filesystem iterations, and allows to replace
the own matches in a path on a different Matcher. compare-locales
uses that to transform l10n and en-US paths back and forth.
'''
def __init__(self, pattern):
'''Create regular expression similar to mozpath.match().
'''
prefix = pattern.split("*", 1)[0]
p = re.escape(pattern)
p = re.sub(r'(^|\\\/)\\\*\\\*\\\/', r'\1(.+/)?', p)
p = re.sub(r'(^|\\\/)\\\*\\\*$', r'(\1.+)?', p)
p = p.replace(r'\*', '([^/]*)') + '$'
r = re.escape(pattern)
r = re.sub(r'(^|\\\/)\\\*\\\*\\\/', r'\\\\0', r)
r = re.sub(r'(^|\\\/)\\\*\\\*$', r'\\\\0', r)
r = r.replace(r'\*', r'\\0')
backref = itertools.count(1)
r = re.sub(r'\\0', lambda m: '\\%s' % backref.next(), r)
r = re.sub(r'\\(.)', r'\1', r)
self.prefix = prefix
self.regex = re.compile(p)
self.placable = r
def match(self, path):
'''
True if the given path matches the file pattern.
'''
return self.regex.match(path) is not None
def sub(self, other, path):
'''
Replace the wildcard matches in this pattern into the
pattern of the other Match object.
'''
if not self.match(path):
return None
return self.regex.sub(other.placable, path)
class ProjectConfig(object):
'''Abstraction of l10n project configuration data.
'''
def __init__(self):
self.filter_py = None # legacy filter code
# {
# 'l10n': pattern,
# 'reference': pattern, # optional
# 'locales': [], # optional
# 'test': [], # optional
# }
self.paths = []
self.rules = []
self.locales = []
self.environ = {}
self.children = []
self._cache = None
variable = re.compile('{\s*([\w]+)\s*}')
def expand(self, path, env=None):
if env is None:
env = {}
def _expand(m):
_var = m.group(1)
for _env in (env, self.environ):
if _var in _env:
return self.expand(_env[_var], env)
return '{{{}}}'.format(_var)
return self.variable.sub(_expand, path)
def lazy_expand(self, pattern):
def lazy_l10n_expanded_pattern(env):
return Matcher(self.expand(pattern, env))
return lazy_l10n_expanded_pattern
def add_global_environment(self, **kwargs):
self.add_environment(**kwargs)
for child in self.children:
child.add_global_environment(**kwargs)
def add_environment(self, **kwargs):
self.environ.update(kwargs)
def add_paths(self, *paths):
'''Add path dictionaries to this config.
The dictionaries must have a `l10n` key. For monolingual files,
`reference` is also required.
An optional key `test` is allowed to enable additional tests for this
path pattern.
'''
for d in paths:
rv = {
'l10n': self.lazy_expand(d['l10n']),
'module': d.get('module')
}
if 'reference' in d:
rv['reference'] = Matcher(d['reference'])
if 'test' in d:
rv['test'] = d['test']
if 'locales' in d:
rv['locales'] = d['locales'][:]
self.paths.append(rv)
def set_filter_py(self, filter):
'''Set legacy filter.py code.
Assert that no rules are set.
Also, normalize output already here.
'''
assert not self.rules
def filter_(module, path, entity=None):
try:
rv = filter(module, path, entity=entity)
except:
return 'error'
rv = {
True: 'error',
False: 'ignore',
'report': 'warning'
}.get(rv, rv)
assert rv in ('error', 'ignore', 'warning', None)
return rv
self.filter_py = filter_
def add_rules(self, *rules):
'''Add rules to filter on.
Assert that there's no legacy filter.py code hooked up.
'''
assert self.filter_py is None
for rule in rules:
self.rules.extend(self._compile_rule(rule))
def add_child(self, child):
self.children.append(child)
def set_locales(self, locales, deep=False):
self.locales = locales
for child in self.children:
if not child.locales or deep:
child.set_locales(locales, deep=True)
else:
locs = [loc for loc in locales if loc in child.locales]
child.set_locales(locs)
@property
def configs(self):
'Recursively get all configs in this project and its children'
yield self
for child in self.children:
for config in child.configs:
yield config
def filter(self, l10n_file, entity=None):
'''Filter a localization file or entities within, according to
this configuration file.'''
if self.filter_py is not None:
return self.filter_py(l10n_file.module, l10n_file.file,
entity=entity)
rv = self._filter(l10n_file, entity=entity)
if rv is None:
return 'ignore'
return rv
class FilterCache(object):
def __init__(self, locale):
self.locale = locale
self.rules = []
self.l10n_paths = []
def cache(self, locale):
if self._cache and self._cache.locale == locale:
return self._cache
self._cache = self.FilterCache(locale)
for paths in self.paths:
self._cache.l10n_paths.append(paths['l10n']({
"locale": locale
}))
for rule in self.rules:
cached_rule = rule.copy()
cached_rule['path'] = rule['path']({
"locale": locale
})
self._cache.rules.append(cached_rule)
return self._cache
def _filter(self, l10n_file, entity=None):
actions = set(
child._filter(l10n_file, entity=entity)
for child in self.children)
if 'error' in actions:
# return early if we know we'll error
return 'error'
cached = self.cache(l10n_file.locale)
if any(p.match(l10n_file.fullpath) for p in cached.l10n_paths):
action = 'error'
for rule in reversed(cached.rules):
if not rule['path'].match(l10n_file.fullpath):
continue
if ('key' in rule) ^ (entity is not None):
# key/file mismatch, not a matching rule
continue
if 'key' in rule and not rule['key'].match(entity):
continue
action = rule['action']
break
actions.add(action)
if 'error' in actions:
return 'error'
if 'warning' in actions:
return 'warning'
if 'ignore' in actions:
return 'ignore'
def _compile_rule(self, rule):
assert 'path' in rule
if isinstance(rule['path'], list):
for path in rule['path']:
_rule = rule.copy()
_rule['path'] = self.lazy_expand(path)
for __rule in self._compile_rule(_rule):
yield __rule
return
if isinstance(rule['path'], basestring):
rule['path'] = self.lazy_expand(rule['path'])
if 'key' not in rule:
yield rule
return
if not isinstance(rule['key'], basestring):
for key in rule['key']:
_rule = rule.copy()
_rule['key'] = key
for __rule in self._compile_rule(_rule):
yield __rule
return
rule = rule.copy()
key = rule['key']
if key.startswith('re:'):
key = key[3:]
else:
key = re.escape(key) + '$'
rule['key'] = re.compile(key)
yield rule
class ProjectFiles(object):
'''Iterable object to get all files and tests for a locale and a
list of ProjectConfigs.
'''
def __init__(self, locale, projects, mergebase=None):
self.locale = locale
self.matchers = []
self.mergebase = mergebase
configs = []
for project in projects:
configs.extend(project.configs)
for pc in configs:
if locale not in pc.locales:
continue
for paths in pc.paths:
if 'locales' in paths and locale not in paths['locales']:
continue
m = {
'l10n': paths['l10n']({
"locale": locale
}),
'module': paths.get('module'),
}
if 'reference' in paths:
m['reference'] = paths['reference']
if self.mergebase is not None:
m['merge'] = paths['l10n']({
"locale": locale,
"l10n_base": self.mergebase
})
m['test'] = set(paths.get('test', []))
if 'locales' in paths:
m['locales'] = paths['locales'][:]
self.matchers.append(m)
self.matchers.reverse() # we always iterate last first
# Remove duplicate patterns, comparing each matcher
# against all other matchers.
# Avoid n^2 comparisons by only scanning the upper triangle
# of a n x n matrix of all possible combinations.
# Using enumerate and keeping track of indexes, as we can't
# modify the list while iterating over it.
drops = set() # duplicate matchers to remove
for i, m in enumerate(self.matchers[:-1]):
if i in drops:
continue # we're dropping this anyway, don't search again
for i_, m_ in enumerate(self.matchers[(i+1):]):
if (mozpath.realpath(m['l10n'].prefix) !=
mozpath.realpath(m_['l10n'].prefix)):
# ok, not the same thing, continue
continue
# check that we're comparing the same thing
if 'reference' in m:
if (mozpath.realpath(m['reference'].prefix) !=
mozpath.realpath(m_.get('reference').prefix)):
raise RuntimeError('Mismatch in reference for ' +
mozpath.realpath(m['l10n'].prefix))
drops.add(i_ + i + 1)
m['test'] |= m_['test']
drops = sorted(drops, reverse=True)
for i in drops:
del self.matchers[i]
def __iter__(self):
known = {}
for matchers in self.matchers:
matcher = matchers['l10n']
for path in self._files(matcher):
if path not in known:
known[path] = {'test': matchers.get('test')}
if 'reference' in matchers:
known[path]['reference'] = matcher.sub(
matchers['reference'], path)
if 'merge' in matchers:
known[path]['merge'] = matcher.sub(
matchers['merge'], path)
if 'reference' not in matchers:
continue
matcher = matchers['reference']
for path in self._files(matcher):
l10npath = matcher.sub(matchers['l10n'], path)
if l10npath not in known:
known[l10npath] = {
'reference': path,
'test': matchers.get('test')
}
if 'merge' in matchers:
known[l10npath]['merge'] = \
matcher.sub(matchers['merge'], path)
for path, d in sorted(known.items()):
yield (path, d.get('reference'), d.get('merge'), d['test'])
def _files(self, matcher):
'''Base implementation of getting all files in a hierarchy
using the file system.
Subclasses might replace this method to support different IO
patterns.
'''
base = matcher.prefix
if os.path.isfile(base):
if matcher.match(base):
yield base
return
for d, dirs, files in os.walk(base):
for f in files:
p = mozpath.join(d, f)
if matcher.match(p):
yield p
def match(self, path):
'''Return the tuple of l10n_path, reference, mergepath, tests
if the given path matches any config, otherwise None.
This routine doesn't check that the files actually exist.
'''
for matchers in self.matchers:
matcher = matchers['l10n']
if matcher.match(path):
ref = merge = None
if 'reference' in matchers:
ref = matcher.sub(matchers['reference'], path)
if 'merge' in matchers:
merge = matcher.sub(matchers['merge'], path)
return path, ref, merge, matchers.get('test')
if 'reference' not in matchers:
continue
matcher = matchers['reference']
if matcher.match(path):
merge = None
l10n = matcher.sub(matchers['l10n'], path)
if 'merge' in matchers:
merge = matcher.sub(matchers['merge'], path)
return l10n, path, merge, matchers.get('test')
class ConfigNotFound(EnvironmentError):
def __init__(self, path):
super(ConfigNotFound, self).__init__(
errno.ENOENT,
'Configuration file not found',
path)
class TOMLParser(object):
@classmethod
def parse(cls, path, env=None, ignore_missing_includes=False):
parser = cls(path, env=env,
ignore_missing_includes=ignore_missing_includes)
parser.load()
parser.processEnv()
parser.processPaths()
parser.processFilters()
parser.processIncludes()
parser.processLocales()
return parser.asConfig()
def __init__(self, path, env=None, ignore_missing_includes=False):
self.path = path
self.env = env if env is not None else {}
self.ignore_missing_includes = ignore_missing_includes
self.data = None
self.pc = ProjectConfig()
self.pc.PATH = path
def load(self):
try:
with open(self.path, 'rb') as fin:
self.data = toml.load(fin)
except:
raise ConfigNotFound(self.path)
def processEnv(self):
assert self.data is not None
self.pc.add_environment(**self.data.get('env', {}))
def processLocales(self):
assert self.data is not None
if 'locales' in self.data:
self.pc.set_locales(self.data['locales'])
def processPaths(self):
assert self.data is not None
for data in self.data.get('paths', []):
l10n = data['l10n']
if not l10n.startswith('{'):
# l10n isn't relative to a variable, expand
l10n = self.resolvepath(l10n)
paths = {
"l10n": l10n,
}
if 'locales' in data:
paths['locales'] = data['locales']
if 'reference' in data:
paths['reference'] = self.resolvepath(data['reference'])
self.pc.add_paths(paths)
def processFilters(self):
assert self.data is not None
for data in self.data.get('filters', []):
paths = data['path']
if isinstance(paths, basestring):
paths = [paths]
# expand if path isn't relative to a variable
paths = [
self.resolvepath(path) if not path.startswith('{')
else path
for path in paths
]
rule = {
"path": paths,
"action": data['action']
}
if 'key' in data:
rule['key'] = data['key']
self.pc.add_rules(rule)
def processIncludes(self):
assert self.data is not None
if 'includes' not in self.data:
return
for include in self.data['includes']:
p = include['path']
p = self.resolvepath(p)
try:
child = self.parse(
p, env=self.env,
ignore_missing_includes=self.ignore_missing_includes
)
except ConfigNotFound as e:
if not self.ignore_missing_includes:
raise
(logging
.getLogger('compare-locales.io')
.error('%s: %s', e.strerror, e.filename))
continue
self.pc.add_child(child)
def resolvepath(self, path):
path = self.pc.expand(path, env=self.env)
path = mozpath.join(
mozpath.dirname(self.path),
self.data.get('basepath', '.'),
path)
return mozpath.normpath(path)
def asConfig(self):
return self.pc
class L10nConfigParser(object):
@ -25,11 +528,7 @@ class L10nConfigParser(object):
Optional keyword arguments are fowarded to the inner ConfigParser as
defaults.
"""
if os.path.isabs(inipath):
self.inipath = 'file:%s' % pathname2url(inipath)
else:
pwdurl = 'file:%s/' % pathname2url(os.getcwd())
self.inipath = urljoin(pwdurl, inipath)
self.inipath = mozpath.normpath(inipath)
# l10n.ini files can import other l10n.ini files, store the
# corresponding L10nConfigParsers
self.children = []
@ -40,8 +539,6 @@ class L10nConfigParser(object):
def getDepth(self, cp):
'''Get the depth for the comparison from the parsed l10n.ini.
Overloadable to get the source depth for fennec and friends.
'''
try:
depth = cp.get('general', 'depth')
@ -55,10 +552,10 @@ class L10nConfigParser(object):
Only works with synchronous loads, used by compare-locales, which
is local anyway.
'''
filterurl = urljoin(self.inipath, 'filter.py')
filter_path = mozpath.join(mozpath.dirname(self.inipath), 'filter.py')
try:
l = {}
execfile(url2pathname(urlparse(filterurl).path), {}, l)
execfile(filter_path, {}, l)
if 'test' in l and callable(l['test']):
filters = [l['test']]
else:
@ -78,14 +575,10 @@ class L10nConfigParser(object):
this behaviour. If you do, make sure to pass a file-like object
to onLoadConfig.
"""
self.onLoadConfig(urlopen(self.inipath))
def onLoadConfig(self, inifile):
"""Parse a file-like object for the loaded l10n.ini file."""
cp = ConfigParser(self.defaults)
cp.readfp(inifile)
cp.read(self.inipath)
depth = self.getDepth(cp)
self.baseurl = urljoin(self.inipath, depth)
self.base = mozpath.join(mozpath.dirname(self.inipath), depth)
# create child loaders for any other l10n.ini files to be included
try:
for title, path in cp.items('includes'):
@ -101,21 +594,11 @@ class L10nConfigParser(object):
self.dirs.extend(cp.get('compare', 'dirs').split())
except (NoOptionError, NoSectionError):
pass
# try getting a top level compare dir, as used for fennec
try:
self.tld = cp.get('compare', 'tld')
# remove tld from comparison dirs
if self.tld in self.dirs:
self.dirs.remove(self.tld)
except (NoOptionError, NoSectionError):
self.tld = None
# try to set "all_path" and "all_url"
try:
self.all_path = cp.get('general', 'all')
self.all_url = urljoin(self.baseurl, self.all_path)
self.all_path = mozpath.join(self.base, cp.get('general', 'all'))
except (NoOptionError, NoSectionError):
self.all_path = None
self.all_url = None
return cp
def addChild(self, title, path, orig_cp):
@ -125,27 +608,14 @@ class L10nConfigParser(object):
path -- indicates the path to the module's l10n.ini file
orig_cp -- the configuration parser of this l10n.ini
"""
cp = L10nConfigParser(urljoin(self.baseurl, path), **self.defaults)
cp = L10nConfigParser(mozpath.join(self.base, path), **self.defaults)
cp.loadConfigs()
self.children.append(cp)
def getTLDPathsTuple(self, basepath):
"""Given the basepath, return the path fragments to be used for
self.tld. For build runs, this is (basepath, self.tld), for
source runs, just (basepath,).
@see overwritten method in SourceTreeConfigParser.
"""
return (basepath, self.tld)
def dirsIter(self):
"""Iterate over all dirs and our base path for this l10n.ini"""
url = urlparse(self.baseurl)
basepath = url2pathname(url.path)
if self.tld is not None:
yield self.tld, self.getTLDPathsTuple(basepath)
for dir in self.dirs:
yield dir, (basepath, dir)
yield dir, (self.base, dir)
def directories(self):
"""Iterate over all dirs and base paths for this l10n.ini as well
@ -159,7 +629,7 @@ class L10nConfigParser(object):
def allLocales(self):
"""Return a list of all the locales of this project"""
return util.parseLocales(urlopen(self.all_url).read())
return util.parseLocales(open(self.all_path).read())
class SourceTreeConfigParser(L10nConfigParser):
@ -168,7 +638,7 @@ class SourceTreeConfigParser(L10nConfigParser):
we do for real builds.
'''
def __init__(self, inipath, basepath, redirects):
def __init__(self, inipath, base, redirects):
'''Add additional arguments basepath.
basepath is used to resolve local paths via branchnames.
@ -176,23 +646,8 @@ class SourceTreeConfigParser(L10nConfigParser):
repos to local clones.
'''
L10nConfigParser.__init__(self, inipath)
self.basepath = basepath
self.base = base
self.redirects = redirects
self.tld = None
def getDepth(self, cp):
'''Get the depth for the comparison from the parsed l10n.ini.
Overloaded to get the source depth for fennec and friends.
'''
try:
depth = cp.get('general', 'source-depth')
except:
try:
depth = cp.get('general', 'depth')
except:
depth = '.'
return depth
def addChild(self, title, path, orig_cp):
# check if there's a section with details for this include
@ -204,20 +659,14 @@ class SourceTreeConfigParser(L10nConfigParser):
branch = orig_cp.get(details, 'mozilla')
branch = self.redirects.get(branch, branch)
inipath = orig_cp.get(details, 'l10n.ini')
path = self.basepath + '/' + branch + '/' + inipath
path = mozpath.join(self.base, branch, inipath)
else:
path = urljoin(self.baseurl, path)
cp = SourceTreeConfigParser(path, self.basepath, self.redirects,
path = mozpath.join(self.base, path)
cp = SourceTreeConfigParser(path, self.base, self.redirects,
**self.defaults)
cp.loadConfigs()
self.children.append(cp)
def getTLDPathsTuple(self, basepath):
"""Overwrite L10nConfigParser's getTLDPathsTuple to just return
the basepath.
"""
return (basepath, )
class File(object):
@ -232,11 +681,15 @@ class File(object):
# open with universal line ending support and read
return open(self.fullpath, 'rU').read()
def __hash__(self):
@property
def localpath(self):
f = self.file
if self.module:
f = self.module + '/' + f
return hash(f)
f = mozpath.join(self.module, f)
return f
def __hash__(self):
return hash(self.localpath)
def __str__(self):
return self.fullpath
@ -250,72 +703,14 @@ class File(object):
return cmp(self.file, other.file)
class EnumerateDir(object):
ignore_dirs = ['CVS', '.svn', '.hg', '.git']
def __init__(self, basepath, module='', locale=None, ignore_subdirs=[]):
self.basepath = basepath
self.module = module
self.locale = locale
self.ignore_subdirs = ignore_subdirs
pass
def cloneFile(self, other):
'''
Return a File object that this enumerator would return, if it had it.
'''
return File(os.path.join(self.basepath, other.file), other.file,
self.module, self.locale)
def __iter__(self):
# our local dirs are given as a tuple of path segments, starting off
# with an empty sequence for the basepath.
dirs = [()]
while dirs:
dir = dirs.pop(0)
fulldir = os.path.join(self.basepath, *dir)
try:
entries = os.listdir(fulldir)
except OSError:
# we probably just started off in a non-existing dir, ignore
continue
entries.sort()
for entry in entries:
leaf = os.path.join(fulldir, entry)
if os.path.isdir(leaf):
if entry not in self.ignore_dirs and \
leaf not in [os.path.join(self.basepath, d)
for d in self.ignore_subdirs]:
dirs.append(dir + (entry,))
continue
yield File(leaf, '/'.join(dir + (entry,)),
self.module, self.locale)
class LocalesWrap(object):
def __init__(self, base, module, locales, ignore_subdirs=[]):
self.base = base
self.module = module
self.locales = locales
self.ignore_subdirs = ignore_subdirs
def __iter__(self):
for locale in self.locales:
path = os.path.join(self.base, locale, self.module)
yield (locale, EnumerateDir(path, self.module, locale,
self.ignore_subdirs))
class EnumerateApp(object):
reference = 'en-US'
def __init__(self, inipath, l10nbase, locales=None):
self.setupConfigParser(inipath)
self.modules = defaultdict(dict)
self.l10nbase = os.path.abspath(l10nbase)
self.l10nbase = mozpath.abspath(l10nbase)
self.filters = []
drive, tail = os.path.splitdrive(inipath)
self.addFilters(*self.config.getFilters())
self.locales = locales or self.config.allLocales()
self.locales.sort()
@ -327,58 +722,36 @@ class EnumerateApp(object):
def addFilters(self, *args):
self.filters += args
value_map = {None: None, 'error': 0, 'ignore': 1, 'report': 2}
def asConfig(self):
config = ProjectConfig()
self._config_for_ini(config, self.config)
filters = self.config.getFilters()
if filters:
config.set_filter_py(filters[0])
config.locales += self.locales
return config
def filter(self, l10n_file, entity=None):
'''Go through all added filters, and,
- map "error" -> 0, "ignore" -> 1, "report" -> 2
- if filter.test returns a bool, map that to
False -> "ignore" (1), True -> "error" (0)
- take the max of all reported
'''
rv = 0
for f in reversed(self.filters):
try:
_r = f(l10n_file.module, l10n_file.file, entity)
except:
# XXX error handling
continue
if isinstance(_r, bool):
_r = [1, 0][_r]
else:
# map string return value to int, default to 'error',
# None is None
_r = self.value_map.get(_r, 0)
if _r is not None:
rv = max(rv, _r)
return ['error', 'ignore', 'report'][rv]
def __iter__(self):
'''
Iterate over all modules, return en-US directory enumerator, and an
iterator over all locales in each iteration. Per locale, the locale
code and an directory enumerator will be given.
'''
dirmap = dict(self.config.directories())
mods = dirmap.keys()
mods.sort()
for mod in mods:
if self.reference == 'en-US':
base = os.path.join(*(dirmap[mod] + ('locales', 'en-US')))
else:
base = os.path.join(self.l10nbase, self.reference, mod)
yield (mod, EnumerateDir(base, mod, self.reference),
LocalesWrap(self.l10nbase, mod, self.locales,
[m[len(mod)+1:] for m in mods if m.startswith(mod+'/')]))
def _config_for_ini(self, projectconfig, aConfig):
for k, (basepath, module) in aConfig.dirsIter():
paths = {
'module': module,
'reference': mozpath.normpath('%s/%s/locales/en-US/**' %
(basepath, module)),
'l10n': mozpath.normpath('{l10n_base}/{locale}/%s/**' %
module)
}
if module == 'mobile/android/base':
paths['test'] = ['android-dtd']
projectconfig.add_paths(paths)
projectconfig.add_global_environment(l10n_base=self.l10nbase)
for child in aConfig.children:
self._config_for_ini(projectconfig, child)
class EnumerateSourceTreeApp(EnumerateApp):
'''Subclass EnumerateApp to work on side-by-side checked out
repos, and to no pay attention to how the source would actually
be checked out for building.
It's supporting applications like Fennec, too, which have
'locales/en-US/...' in their root dir, but claim to be 'mobile'.
'''
def __init__(self, inipath, basepath, l10nbase, redirects,
@ -391,16 +764,3 @@ class EnumerateSourceTreeApp(EnumerateApp):
self.config = SourceTreeConfigParser(inipath, self.basepath,
self.redirects)
self.config.loadConfigs()
def get_base_path(mod, loc):
'statics for path patterns and conversion'
__l10n = 'l10n/%(loc)s/%(mod)s'
__en_US = 'mozilla/%(mod)s/locales/en-US'
if loc == 'en-US':
return __en_US % {'mod': mod}
return __l10n % {'mod': mod, 'loc': loc}
def get_path(mod, loc, leaf):
return get_base_path(mod, loc) + '/' + leaf

Просмотреть файл

@ -0,0 +1,116 @@
import unittest
import os
import tempfile
import shutil
from compare_locales import mozpath
from compare_locales.paths import EnumerateApp, ProjectFiles
MAIL_INI = '''\
[general]
depth = ../..
all = mail/locales/all-locales
[compare]
dirs = mail
[includes]
# non-central apps might want to use %(topsrcdir)s here, or other vars
# RFE: that needs to be supported by compare-locales, too, though
toolkit = mozilla/toolkit/locales/l10n.ini
[include_toolkit]
type = hg
mozilla = mozilla-central
repo = http://hg.mozilla.org/
l10n.ini = toolkit/locales/l10n.ini
'''
MAIL_ALL_LOCALES = '''af
de
fr
'''
MAIL_FILTER_PY = '''
def test(mod, path, entity = None):
if mod == 'toolkit' and path == 'ignored_path':
return 'ignore'
return 'error'
'''
TOOLKIT_INI = '''[general]
depth = ../..
[compare]
dirs = toolkit
'''
class TestApp(unittest.TestCase):
def setUp(self):
self.stage = tempfile.mkdtemp()
mail = mozpath.join(self.stage, 'comm', 'mail', 'locales')
toolkit = mozpath.join(
self.stage, 'comm', 'mozilla', 'toolkit', 'locales')
l10n = mozpath.join(self.stage, 'l10n-central', 'de', 'toolkit')
os.makedirs(mozpath.join(mail, 'en-US'))
os.makedirs(mozpath.join(toolkit, 'en-US'))
os.makedirs(l10n)
with open(mozpath.join(mail, 'l10n.ini'), 'w') as f:
f.write(MAIL_INI)
with open(mozpath.join(mail, 'all-locales'), 'w') as f:
f.write(MAIL_ALL_LOCALES)
with open(mozpath.join(mail, 'filter.py'), 'w') as f:
f.write(MAIL_FILTER_PY)
with open(mozpath.join(toolkit, 'l10n.ini'), 'w') as f:
f.write(TOOLKIT_INI)
with open(mozpath.join(mail, 'en-US', 'mail.ftl'), 'w') as f:
f.write('')
with open(mozpath.join(toolkit, 'en-US', 'platform.ftl'), 'w') as f:
f.write('')
with open(mozpath.join(l10n, 'localized.ftl'), 'w') as f:
f.write('')
def tearDown(self):
shutil.rmtree(self.stage)
def test_app(self):
'Test parsing a App'
app = EnumerateApp(
mozpath.join(self.stage, 'comm', 'mail', 'locales', 'l10n.ini'),
mozpath.join(self.stage, 'l10n-central'))
self.assertListEqual(app.locales, ['af', 'de', 'fr'])
self.assertEqual(len(app.config.children), 1)
projectconfig = app.asConfig()
self.assertListEqual(projectconfig.locales, ['af', 'de', 'fr'])
files = ProjectFiles('de', [projectconfig])
files = list(files)
self.assertEqual(len(files), 3)
l10nfile, reffile, mergefile, test = files[0]
self.assertListEqual(mozpath.split(l10nfile)[-3:],
['de', 'mail', 'mail.ftl'])
self.assertListEqual(mozpath.split(reffile)[-4:],
['mail', 'locales', 'en-US', 'mail.ftl'])
self.assertIsNone(mergefile)
self.assertSetEqual(test, set())
l10nfile, reffile, mergefile, test = files[1]
self.assertListEqual(mozpath.split(l10nfile)[-3:],
['de', 'toolkit', 'localized.ftl'])
self.assertListEqual(
mozpath.split(reffile)[-6:],
['comm', 'mozilla', 'toolkit',
'locales', 'en-US', 'localized.ftl'])
self.assertIsNone(mergefile)
self.assertSetEqual(test, set())
l10nfile, reffile, mergefile, test = files[2]
self.assertListEqual(mozpath.split(l10nfile)[-3:],
['de', 'toolkit', 'platform.ftl'])
self.assertListEqual(
mozpath.split(reffile)[-6:],
['comm', 'mozilla', 'toolkit', 'locales', 'en-US', 'platform.ftl'])
self.assertIsNone(mergefile)
self.assertSetEqual(test, set())

Просмотреть файл

@ -6,7 +6,7 @@
import unittest
from compare_locales.checks import getChecker
from compare_locales.parser import getParser, Parser, Entity
from compare_locales.parser import getParser, Parser, DTDEntity
from compare_locales.paths import File
@ -19,19 +19,15 @@ class BaseHelper(unittest.TestCase):
p.readContents(self.refContent)
self.refList, self.refMap = p.parse()
def _test(self, content, refWarnOrErrors, with_ref_file=False):
def _test(self, content, refWarnOrErrors):
p = getParser(self.file.file)
p.readContents(content)
l10n = [e for e in p]
assert len(l10n) == 1
l10n = l10n[0]
if with_ref_file:
kwargs = {
'reference': self.refList
}
else:
kwargs = {}
checker = getChecker(self.file, **kwargs)
checker = getChecker(self.file)
if checker.needs_reference:
checker.set_reference(self.refList)
ref = self.refList[self.refMap[l10n.key]]
found = tuple(checker.check(ref, l10n))
self.assertEqual(found, refWarnOrErrors)
@ -184,28 +180,24 @@ class TestEntitiesInDTDs(BaseHelper):
'''
def testOK(self):
self._test('''<!ENTITY ent.start "Mit &brandShorterName;">''', tuple(),
with_ref_file=True)
self._test('''<!ENTITY ent.start "Mit &brandShorterName;">''', tuple())
def testMismatch(self):
self._test('''<!ENTITY ent.start "Mit &brandShortName;">''',
(('warning', (0, 0),
'Entity brandShortName referenced, '
'but brandShorterName used in context',
'xmlparse'),),
with_ref_file=True)
'xmlparse'),))
def testAcross(self):
self._test('''<!ENTITY ent.end "Mit &brandShorterName;">''',
tuple(),
with_ref_file=True)
tuple())
def testAcrossWithMismatch(self):
'''If we could tell that ent.start and ent.end are one string,
we should warn. Sadly, we can't, so this goes without warning.'''
self._test('''<!ENTITY ent.end "Mit &brandShortName;">''',
tuple(),
with_ref_file=True)
tuple())
def testUnknownWithRef(self):
self._test('''<!ENTITY ent.start "Mit &foopy;">''',
@ -214,8 +206,7 @@ class TestEntitiesInDTDs(BaseHelper):
'Referencing unknown entity `foopy` '
'(brandShorterName used in context, '
'brandShortName known)',
'xmlparse'),),
with_ref_file=True)
'xmlparse'),))
def testUnknown(self):
self._test('''<!ENTITY ent.end "Mit &foopy;">''',
@ -223,8 +214,7 @@ class TestEntitiesInDTDs(BaseHelper):
(0, 0),
'Referencing unknown entity `foopy`'
' (brandShortName, brandShorterName known)',
'xmlparse'),),
with_ref_file=True)
'xmlparse'),))
class TestAndroid(unittest.TestCase):
@ -240,16 +230,17 @@ class TestAndroid(unittest.TestCase):
def getEntity(self, v):
ctx = Parser.Context(v)
return Entity(ctx, lambda s: s, '', (0, len(v)), (), (), (),
(0, len(v)), ())
return DTDEntity(
ctx, '', (0, len(v)), (), (), (), (0, len(v)), ())
def getDTDEntity(self, v):
v = v.replace('"', '&quot;')
ctx = Parser.Context('<!ENTITY foo "%s">' % v)
return Entity(ctx,
lambda s: s, '',
(0, len(v) + 16), (), (), (9, 12),
(14, len(v) + 14), ())
return DTDEntity(
ctx,
'',
(0, len(v) + 16), (), (), (9, 12),
(14, len(v) + 14), ())
def test_android_dtd(self):
"""Testing the actual android checks. The logic is involved,
@ -257,7 +248,7 @@ class TestAndroid(unittest.TestCase):
"""
f = File("embedding/android/strings.dtd", "strings.dtd",
"embedding/android")
checker = getChecker(f)
checker = getChecker(f, extra_tests=['android-dtd'])
# good string
ref = self.getDTDEntity("plain string")
l10n = self.getDTDEntity("plain localized string")
@ -333,7 +324,7 @@ class TestAndroid(unittest.TestCase):
def test_android_prop(self):
f = File("embedding/android/strings.properties", "strings.properties",
"embedding/android")
checker = getChecker(f)
checker = getChecker(f, extra_tests=['android-dtd'])
# good plain string
ref = self.getEntity("plain string")
l10n = self.getEntity("plain localized string")
@ -381,7 +372,8 @@ class TestAndroid(unittest.TestCase):
p = getParser(f.file)
p.readContents('<!ENTITY other "some &good.ref;">')
ref = p.parse()
checker = getChecker(f, reference=ref[0])
checker = getChecker(f)
checker.set_reference(ref[0])
# good string
ref = self.getDTDEntity("plain string")
l10n = self.getDTDEntity("plain localized string")

Просмотреть файл

@ -4,7 +4,8 @@
import unittest
from compare_locales import compare
from compare_locales import compare, paths
from cPickle import loads, dumps
class TestTree(unittest.TestCase):
@ -37,14 +38,10 @@ class TestTree(unittest.TestCase):
self.assertDictEqual(
tree.toJSON(),
{
'children': [
('one/entry',
{'value': {'leaf': 1}}
),
('two/other',
{'value': {'leaf': 2}}
)
]
'one/entry':
{'leaf': 1},
'two/other':
{'leaf': 2}
}
)
self.assertMultiLineEqual(
@ -74,17 +71,194 @@ two/other
self.assertDictEqual(
tree.toJSON(),
{
'children': [
('one', {
'children': [
('entry',
{'value': {'leaf': 1}}
),
('other',
{'value': {'leaf': 2}}
)
]
})
]
'one': {
'entry':
{'leaf': 1},
'other':
{'leaf': 2}
}
}
)
class TestObserver(unittest.TestCase):
def test_simple(self):
obs = compare.Observer()
f = paths.File('/some/real/sub/path', 'sub/path', locale='de')
obs.notify('missingEntity', f, 'one')
obs.notify('missingEntity', f, 'two')
obs.updateStats(f, {'missing': 15})
self.assertDictEqual(obs.toJSON(), {
'summary': {
'de': {
'missing': 15
}
},
'details': {
'de/sub/path':
[{'missingEntity': 'one'},
{'missingEntity': 'two'}]
}
})
clone = loads(dumps(obs))
self.assertDictEqual(clone.summary, obs.summary)
self.assertDictEqual(clone.details.toJSON(), obs.details.toJSON())
self.assertIsNone(clone.file_stats)
def test_module(self):
obs = compare.Observer(file_stats=True)
f = paths.File('/some/real/sub/path', 'path',
module='sub', locale='de')
obs.notify('missingEntity', f, 'one')
obs.notify('obsoleteEntity', f, 'bar')
obs.notify('missingEntity', f, 'two')
obs.updateStats(f, {'missing': 15})
self.assertDictEqual(obs.toJSON(), {
'summary': {
'de': {
'missing': 15
}
},
'details': {
'de/sub/path':
[
{'missingEntity': 'one'},
{'obsoleteEntity': 'bar'},
{'missingEntity': 'two'},
]
}
})
self.assertDictEqual(obs.file_stats, {
'de': {
'sub/path': {
'missing': 15
}
}
})
self.assertEqual(obs.serialize(), '''\
de/sub/path
+one
-bar
+two
de:
missing: 15
0% of entries changed''')
clone = loads(dumps(obs))
self.assertDictEqual(clone.summary, obs.summary)
self.assertDictEqual(clone.details.toJSON(), obs.details.toJSON())
self.assertDictEqual(clone.file_stats, obs.file_stats)
def test_file_stats(self):
obs = compare.Observer(file_stats=True)
f = paths.File('/some/real/sub/path', 'sub/path', locale='de')
obs.notify('missingEntity', f, 'one')
obs.notify('missingEntity', f, 'two')
obs.updateStats(f, {'missing': 15})
self.assertDictEqual(obs.toJSON(), {
'summary': {
'de': {
'missing': 15
}
},
'details': {
'de/sub/path':
[
{'missingEntity': 'one'},
{'missingEntity': 'two'},
]
}
})
self.assertDictEqual(obs.file_stats, {
'de': {
'sub/path': {
'missing': 15
}
}
})
clone = loads(dumps(obs))
self.assertDictEqual(clone.summary, obs.summary)
self.assertDictEqual(clone.details.toJSON(), obs.details.toJSON())
self.assertDictEqual(clone.file_stats, obs.file_stats)
class TestAddRemove(unittest.TestCase):
def _test(self, left, right, ref_actions):
ar = compare.AddRemove()
ar.set_left(left)
ar.set_right(right)
actions = list(ar)
self.assertListEqual(actions, ref_actions)
def test_equal(self):
self._test(['z', 'a', 'p'], ['z', 'a', 'p'], [
('equal', 'z'),
('equal', 'a'),
('equal', 'p'),
])
def test_add_start(self):
self._test(['a', 'p'], ['z', 'a', 'p'], [
('add', 'z'),
('equal', 'a'),
('equal', 'p'),
])
def test_add_middle(self):
self._test(['z', 'p'], ['z', 'a', 'p'], [
('equal', 'z'),
('add', 'a'),
('equal', 'p'),
])
def test_add_end(self):
self._test(['z', 'a'], ['z', 'a', 'p'], [
('equal', 'z'),
('equal', 'a'),
('add', 'p'),
])
def test_delete_start(self):
self._test(['z', 'a', 'p'], ['a', 'p'], [
('delete', 'z'),
('equal', 'a'),
('equal', 'p'),
])
def test_delete_middle(self):
self._test(['z', 'a', 'p'], ['z', 'p'], [
('equal', 'z'),
('delete', 'a'),
('equal', 'p'),
])
def test_delete_end(self):
self._test(['z', 'a', 'p'], ['z', 'a'], [
('equal', 'z'),
('equal', 'a'),
('delete', 'p'),
])
def test_replace_start(self):
self._test(['b', 'a', 'p'], ['z', 'a', 'p'], [
('add', 'z'),
('delete', 'b'),
('equal', 'a'),
('equal', 'p'),
])
def test_replace_middle(self):
self._test(['z', 'b', 'p'], ['z', 'a', 'p'], [
('equal', 'z'),
('add', 'a'),
('delete', 'b'),
('equal', 'p'),
])
def test_replace_end(self):
self._test(['z', 'a', 'b'], ['z', 'a', 'p'], [
('equal', 'z'),
('equal', 'a'),
('add', 'p'),
('delete', 'b'),
])

Просмотреть файл

@ -0,0 +1,95 @@
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import unittest
from compare_locales.tests import ParserTestMixin
mpl2 = '''\
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
'''
class TestDefinesParser(ParserTestMixin, unittest.TestCase):
filename = 'defines.inc'
def testBrowser(self):
self._test(mpl2 + '''#filter emptyLines
#define MOZ_LANGPACK_CREATOR mozilla.org
# If non-English locales wish to credit multiple contributors, uncomment this
# variable definition and use the format specified.
# #define MOZ_LANGPACK_CONTRIBUTORS <em:contributor>Joe Solon</em:contributor>
#unfilter emptyLines
''', (
('Comment', mpl2),
('DefinesInstruction', 'filter emptyLines'),
('MOZ_LANGPACK_CREATOR', 'mozilla.org'),
('Comment', '#define'),
('DefinesInstruction', 'unfilter emptyLines')))
def testBrowserWithContributors(self):
self._test(mpl2 + '''#filter emptyLines
#define MOZ_LANGPACK_CREATOR mozilla.org
# If non-English locales wish to credit multiple contributors, uncomment this
# variable definition and use the format specified.
#define MOZ_LANGPACK_CONTRIBUTORS <em:contributor>Joe Solon</em:contributor>
#unfilter emptyLines
''', (
('Comment', mpl2),
('DefinesInstruction', 'filter emptyLines'),
('MOZ_LANGPACK_CREATOR', 'mozilla.org'),
('Comment', 'non-English'),
('MOZ_LANGPACK_CONTRIBUTORS',
'<em:contributor>Joe Solon</em:contributor>'),
('DefinesInstruction', 'unfilter emptyLines')))
def testCommentWithNonAsciiCharacters(self):
self._test(mpl2 + '''#filter emptyLines
# e.g. #define seamonkey_l10n <DT><A HREF="urn:foo">SeaMonkey v češtině</a>
#define seamonkey_l10n_long
#unfilter emptyLines
''', (
('Comment', mpl2),
('DefinesInstruction', 'filter emptyLines'),
('Comment', u'češtině'),
('seamonkey_l10n_long', ''),
('DefinesInstruction', 'unfilter emptyLines')))
def testToolkit(self):
self._test('''#define MOZ_LANG_TITLE English (US)
''', (
('MOZ_LANG_TITLE', 'English (US)'),))
def testToolkitEmpty(self):
self._test('', tuple())
def test_empty_file(self):
'''Test that empty files generate errors
defines.inc are interesting that way, as their
content is added to the generated file.
'''
self._test('\n', (('Junk', '\n'),))
self._test('\n\n', (('Junk', '\n\n'),))
self._test(' \n\n', (('Junk', ' \n\n'),))
if __name__ == '__main__':
unittest.main()

Просмотреть файл

@ -132,6 +132,19 @@ escaped value">
a, b = list(self.parser)
self.assertEqual(a.post, ' ')
def test_word_count(self):
self.parser.readContents('''\
<!ENTITY a "one">
<!ENTITY b "one<br>two">
<!ENTITY c "one<span>word</span>">
<!ENTITY d "one <a href='foo'>two</a> three">
''')
a, b, c, d = list(self.parser)
self.assertEqual(a.count_words(), 1)
self.assertEqual(b.count_words(), 2)
self.assertEqual(c.count_words(), 1)
self.assertEqual(d.count_words(), 3)
if __name__ == '__main__':
unittest.main()

Просмотреть файл

@ -0,0 +1,133 @@
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import unittest
from compare_locales.tests import ParserTestMixin
class TestFluentParser(ParserTestMixin, unittest.TestCase):
maxDiff = None
filename = 'foo.ftl'
def test_equality_same(self):
source = 'progress = Progress: { NUMBER($num, style: "percent") }.'
self.parser.readContents(source)
[ent1] = list(self.parser)
self.parser.readContents(source)
[ent2] = list(self.parser)
self.assertTrue(ent1.equals(ent2))
def test_equality_different_whitespace(self):
source1 = 'foo = { $arg }'
source2 = 'foo = { $arg }'
self.parser.readContents(source1)
[ent1] = list(self.parser)
self.parser.readContents(source2)
[ent2] = list(self.parser)
self.assertTrue(ent1.equals(ent2))
def test_word_count(self):
self.parser.readContents('''\
a = One
b = One two three
c = One { $arg } two
d =
One { $arg ->
*[x] Two three
[y] Four
} five.
e
.attr = One
f
.attr1 = One
.attr2 = Two
g = One two
.attr = Three
h =
One { $arg ->
*[x] Two three
[y] Four
} five.
.attr1 =
Six { $arg ->
*[x] Seven eight
[y] Nine
} ten.
''')
a, b, c, d, e, f, g, h = list(self.parser)
self.assertEqual(a.count_words(), 1)
self.assertEqual(b.count_words(), 3)
self.assertEqual(c.count_words(), 2)
self.assertEqual(d.count_words(), 5)
self.assertEqual(e.count_words(), 1)
self.assertEqual(f.count_words(), 2)
self.assertEqual(g.count_words(), 3)
self.assertEqual(h.count_words(), 10)
def test_simple_message(self):
self.parser.readContents('a = A')
[a] = list(self.parser)
self.assertEqual(a.key, 'a')
self.assertEqual(a.val, 'A')
self.assertEqual(a.all, 'a = A')
attributes = list(a.attributes)
self.assertEqual(len(attributes), 0)
def test_complex_message(self):
self.parser.readContents('abc = A { $arg } B { msg } C')
[abc] = list(self.parser)
self.assertEqual(abc.key, 'abc')
self.assertEqual(abc.val, 'A { $arg } B { msg } C')
self.assertEqual(abc.all, 'abc = A { $arg } B { msg } C')
def test_multiline_message(self):
self.parser.readContents('''\
abc =
A
B
C
''')
[abc] = list(self.parser)
self.assertEqual(abc.key, 'abc')
self.assertEqual(abc.val, '\n A\n B\n C')
self.assertEqual(abc.all, 'abc =\n A\n B\n C')
def test_message_with_attribute(self):
self.parser.readContents('''\
abc = ABC
.attr = Attr
''')
[abc] = list(self.parser)
self.assertEqual(abc.key, 'abc')
self.assertEqual(abc.val, 'ABC')
self.assertEqual(abc.all, 'abc = ABC\n .attr = Attr')
def test_message_with_attribute_and_no_value(self):
self.parser.readContents('''\
abc
.attr = Attr
''')
[abc] = list(self.parser)
self.assertEqual(abc.key, 'abc')
self.assertEqual(abc.val, '')
self.assertEqual(abc.all, 'abc\n .attr = Attr')
attributes = list(abc.attributes)
self.assertEqual(len(attributes), 1)
attr = attributes[0]
self.assertEqual(attr.key, 'attr')
self.assertEqual(attr.val, 'Attr')

Просмотреть файл

@ -135,5 +135,6 @@ Good=other string
self._test('\n\n', (('Whitespace', '\n\n'),))
self._test(' \n\n', (('Whitespace', ' \n\n'),))
if __name__ == '__main__':
unittest.main()

Просмотреть файл

@ -9,18 +9,19 @@ import shutil
from compare_locales.parser import getParser
from compare_locales.paths import File
from compare_locales.compare import ContentComparer
from compare_locales.compare import ContentComparer, Observer
from compare_locales import mozpath
class ContentMixin(object):
extension = None # OVERLOAD
def reference(self, content):
self.ref = os.path.join(self.tmp, "en-reference" + self.extension)
self.ref = mozpath.join(self.tmp, "en-reference" + self.extension)
open(self.ref, "w").write(content)
def localized(self, content):
self.l10n = os.path.join(self.tmp, "l10n" + self.extension)
self.l10n = mozpath.join(self.tmp, "l10n" + self.extension)
open(self.l10n, "w").write(content)
@ -30,7 +31,7 @@ class TestProperties(unittest.TestCase, ContentMixin):
def setUp(self):
self.maxDiff = None
self.tmp = mkdtemp()
os.mkdir(os.path.join(self.tmp, "merge"))
os.mkdir(mozpath.join(self.tmp, "merge"))
def tearDown(self):
shutil.rmtree(self.tmp)
@ -38,27 +39,28 @@ class TestProperties(unittest.TestCase, ContentMixin):
def testGood(self):
self.assertTrue(os.path.isdir(self.tmp))
self.reference("""foo = fooVal
bar = barVal
self.reference("""foo = fooVal word
bar = barVal word
eff = effVal""")
self.localized("""foo = lFoo
bar = lBar
eff = lEff
eff = lEff word
""")
cc = ContentComparer()
cc.set_merge_stage(os.path.join(self.tmp, "merge"))
cc = ContentComparer([Observer()])
cc.compare(File(self.ref, "en-reference.properties", ""),
File(self.l10n, "l10n.properties", ""))
File(self.l10n, "l10n.properties", ""),
mozpath.join(self.tmp, "merge", "l10n.properties"))
self.assertDictEqual(
cc.observer.toJSON(),
cc.observers[0].toJSON(),
{'summary':
{None: {
'changed': 3
'changed': 3,
'changed_w': 5
}},
'details': {}
}
)
self.assert_(not os.path.exists(os.path.join(cc.merge_stage,
self.assert_(not os.path.exists(mozpath.join(self.tmp, "merge",
'l10n.properties')))
def testMissing(self):
@ -68,30 +70,31 @@ bar = barVal
eff = effVal""")
self.localized("""bar = lBar
""")
cc = ContentComparer()
cc.set_merge_stage(os.path.join(self.tmp, "merge"))
cc = ContentComparer([Observer()])
cc.compare(File(self.ref, "en-reference.properties", ""),
File(self.l10n, "l10n.properties", ""))
File(self.l10n, "l10n.properties", ""),
mozpath.join(self.tmp, "merge", "l10n.properties"))
self.assertDictEqual(
cc.observer.toJSON(),
cc.observers[0].toJSON(),
{'summary':
{None: {
'changed': 1, 'missing': 2
'changed': 1,
'changed_w': 1,
'missing': 2,
'missing_w': 2
}},
'details': {
'children': [
('l10n.properties',
{'value': {'missingEntity': [u'eff', u'foo']}}
)
]}
}
)
mergefile = os.path.join(self.tmp, "merge", "l10n.properties")
'l10n.properties': [
{'missingEntity': u'foo'},
{'missingEntity': u'eff'}]
}
})
mergefile = mozpath.join(self.tmp, "merge", "l10n.properties")
self.assertTrue(os.path.isfile(mergefile))
p = getParser(mergefile)
p.readFile(mergefile)
[m, n] = p.parse()
self.assertEqual(map(lambda e: e.key, m), ["bar", "eff", "foo"])
self.assertEqual(map(lambda e: e.key, m), ["bar", "foo", "eff"])
def testError(self):
self.assertTrue(os.path.isdir(self.tmp))
@ -102,28 +105,28 @@ eff = effVal""")
bar = %S lBar
eff = leffVal
""")
cc = ContentComparer()
cc.set_merge_stage(os.path.join(self.tmp, "merge"))
cc = ContentComparer([Observer()])
cc.compare(File(self.ref, "en-reference.properties", ""),
File(self.l10n, "l10n.properties", ""))
File(self.l10n, "l10n.properties", ""),
mozpath.join(self.tmp, "merge", "l10n.properties"))
self.assertDictEqual(
cc.observer.toJSON(),
cc.observers[0].toJSON(),
{'summary':
{None: {
'changed': 2, 'errors': 1, 'missing': 1
'changed': 2,
'changed_w': 3,
'errors': 1,
'missing': 1,
'missing_w': 1
}},
'details': {
'children': [
('l10n.properties',
{'value': {
'error': [u'argument 1 `S` should be `d` '
u'at line 1, column 7 for bar'],
'missingEntity': [u'foo']}}
)
]}
}
)
mergefile = os.path.join(self.tmp, "merge", "l10n.properties")
'l10n.properties': [
{'missingEntity': u'foo'},
{'error': u'argument 1 `S` should be `d` '
u'at line 1, column 7 for bar'}]
}
})
mergefile = mozpath.join(self.tmp, "merge", "l10n.properties")
self.assertTrue(os.path.isfile(mergefile))
p = getParser(mergefile)
p.readFile(mergefile)
@ -139,22 +142,58 @@ eff = effVal""")
other = obsolete
eff = leffVal
""")
cc = ContentComparer()
cc.set_merge_stage(os.path.join(self.tmp, "merge"))
cc = ContentComparer([Observer()])
cc.compare(File(self.ref, "en-reference.properties", ""),
File(self.l10n, "l10n.properties", ""))
File(self.l10n, "l10n.properties", ""),
mozpath.join(self.tmp, "merge", "l10n.properties"))
self.assertDictEqual(
cc.observer.toJSON(),
cc.observers[0].toJSON(),
{'summary':
{None: {
'changed': 1, 'obsolete': 1, 'unchanged': 1
'changed': 1,
'changed_w': 1,
'obsolete': 1,
'unchanged': 1,
'unchanged_w': 1
}},
'details': {
'children': [
('l10n.properties',
{'value': {'obsoleteEntity': [u'other']}})]},
}
)
'l10n.properties': [
{'obsoleteEntity': u'other'}]
}
})
def test_duplicate(self):
self.assertTrue(os.path.isdir(self.tmp))
self.reference("""foo = fooVal
bar = barVal
eff = effVal
foo = other val for foo""")
self.localized("""foo = localized
bar = lBar
eff = localized eff
bar = duplicated bar
""")
cc = ContentComparer([Observer()])
cc.compare(File(self.ref, "en-reference.properties", ""),
File(self.l10n, "l10n.properties", ""),
mozpath.join(self.tmp, "merge", "l10n.properties"))
self.assertDictEqual(
cc.observers[0].toJSON(),
{'summary':
{None: {
'errors': 1,
'warnings': 1,
'changed': 3,
'changed_w': 6
}},
'details': {
'l10n.properties': [
{'warning': u'foo occurs 2 times'},
{'error': u'bar occurs 2 times'}]
}
})
mergefile = mozpath.join(self.tmp, "merge", "l10n.properties")
self.assertFalse(os.path.isfile(mergefile))
class TestDTD(unittest.TestCase, ContentMixin):
@ -163,7 +202,7 @@ class TestDTD(unittest.TestCase, ContentMixin):
def setUp(self):
self.maxDiff = None
self.tmp = mkdtemp()
os.mkdir(os.path.join(self.tmp, "merge"))
os.mkdir(mozpath.join(self.tmp, "merge"))
def tearDown(self):
shutil.rmtree(self.tmp)
@ -178,21 +217,22 @@ class TestDTD(unittest.TestCase, ContentMixin):
<!ENTITY bar 'lBar'>
<!ENTITY eff 'lEff'>
""")
cc = ContentComparer()
cc.set_merge_stage(os.path.join(self.tmp, "merge"))
cc = ContentComparer([Observer()])
cc.compare(File(self.ref, "en-reference.dtd", ""),
File(self.l10n, "l10n.dtd", ""))
File(self.l10n, "l10n.dtd", ""),
mozpath.join(self.tmp, "merge", "l10n.dtd"))
self.assertDictEqual(
cc.observer.toJSON(),
cc.observers[0].toJSON(),
{'summary':
{None: {
'changed': 3
'changed': 3,
'changed_w': 3
}},
'details': {}
}
)
self.assert_(
not os.path.exists(os.path.join(cc.merge_stage, 'l10n.dtd')))
not os.path.exists(mozpath.join(self.tmp, "merge", 'l10n.dtd')))
def testMissing(self):
self.assertTrue(os.path.isdir(self.tmp))
@ -201,30 +241,31 @@ class TestDTD(unittest.TestCase, ContentMixin):
<!ENTITY eff 'effVal'>""")
self.localized("""<!ENTITY bar 'lBar'>
""")
cc = ContentComparer()
cc.set_merge_stage(os.path.join(self.tmp, "merge"))
cc = ContentComparer([Observer()])
cc.compare(File(self.ref, "en-reference.dtd", ""),
File(self.l10n, "l10n.dtd", ""))
File(self.l10n, "l10n.dtd", ""),
mozpath.join(self.tmp, "merge", "l10n.dtd"))
self.assertDictEqual(
cc.observer.toJSON(),
cc.observers[0].toJSON(),
{'summary':
{None: {
'changed': 1, 'missing': 2
'changed': 1,
'changed_w': 1,
'missing': 2,
'missing_w': 2
}},
'details': {
'children': [
('l10n.dtd',
{'value': {'missingEntity': [u'eff', u'foo']}}
)
]}
}
)
mergefile = os.path.join(self.tmp, "merge", "l10n.dtd")
'l10n.dtd': [
{'missingEntity': u'foo'},
{'missingEntity': u'eff'}]
}
})
mergefile = mozpath.join(self.tmp, "merge", "l10n.dtd")
self.assertTrue(os.path.isfile(mergefile))
p = getParser(mergefile)
p.readFile(mergefile)
[m, n] = p.parse()
self.assertEqual(map(lambda e: e.key, m), ["bar", "eff", "foo"])
self.assertEqual(map(lambda e: e.key, m), ["bar", "foo", "eff"])
def testJunk(self):
self.assertTrue(os.path.isdir(self.tmp))
@ -235,35 +276,567 @@ class TestDTD(unittest.TestCase, ContentMixin):
<!ENTY bar 'gimmick'>
<!ENTITY eff 'effVal'>
""")
cc = ContentComparer()
cc.set_merge_stage(os.path.join(self.tmp, "merge"))
cc = ContentComparer([Observer()])
cc.compare(File(self.ref, "en-reference.dtd", ""),
File(self.l10n, "l10n.dtd", ""))
File(self.l10n, "l10n.dtd", ""),
mozpath.join(self.tmp, "merge", "l10n.dtd"))
self.assertDictEqual(
cc.observer.toJSON(),
cc.observers[0].toJSON(),
{'summary':
{None: {
'errors': 1, 'missing': 1, 'unchanged': 2
'errors': 1,
'missing': 1,
'missing_w': 1,
'unchanged': 2,
'unchanged_w': 2
}},
'details': {
'children': [
('l10n.dtd',
{'value': {
'error': [u'Unparsed content "<!ENTY bar '
u'\'gimmick\'>" '
u'from line 2 colum 1 to '
u'line 2 column 22'],
'missingEntity': [u'bar']}}
)
]}
}
)
mergefile = os.path.join(self.tmp, "merge", "l10n.dtd")
'l10n.dtd': [
{'error': u'Unparsed content "<!ENTY bar '
u'\'gimmick\'>" '
u'from line 2 column 1 to '
u'line 2 column 22'},
{'missingEntity': u'bar'}]
}
})
mergefile = mozpath.join(self.tmp, "merge", "l10n.dtd")
self.assertTrue(os.path.isfile(mergefile))
p = getParser(mergefile)
p.readFile(mergefile)
[m, n] = p.parse()
self.assertEqual(map(lambda e: e.key, m), ["foo", "eff", "bar"])
def test_reference_junk(self):
self.assertTrue(os.path.isdir(self.tmp))
self.reference("""<!ENTITY foo 'fooVal'>
<!ENT bar 'bad val'>
<!ENTITY eff 'effVal'>""")
self.localized("""<!ENTITY foo 'fooVal'>
<!ENTITY eff 'effVal'>
""")
cc = ContentComparer([Observer()])
cc.compare(File(self.ref, "en-reference.dtd", ""),
File(self.l10n, "l10n.dtd", ""),
mozpath.join(self.tmp, "merge", "l10n.dtd"))
self.assertDictEqual(
cc.observers[0].toJSON(),
{'summary':
{None: {
'warnings': 1,
'unchanged': 2,
'unchanged_w': 2
}},
'details': {
'l10n.dtd': [
{'warning': 'Parser error in en-US'}]
}
})
def test_reference_xml_error(self):
self.assertTrue(os.path.isdir(self.tmp))
self.reference("""<!ENTITY foo 'fooVal'>
<!ENTITY bar 'bad &val'>
<!ENTITY eff 'effVal'>""")
self.localized("""<!ENTITY foo 'fooVal'>
<!ENTITY bar 'good val'>
<!ENTITY eff 'effVal'>
""")
cc = ContentComparer([Observer()])
cc.compare(File(self.ref, "en-reference.dtd", ""),
File(self.l10n, "l10n.dtd", ""),
mozpath.join(self.tmp, "merge", "l10n.dtd"))
self.assertDictEqual(
cc.observers[0].toJSON(),
{'summary':
{None: {
'warnings': 1,
'unchanged': 2,
'unchanged_w': 2,
'changed': 1,
'changed_w': 2
}},
'details': {
'l10n.dtd': [
{'warning': u"can't parse en-US value at line 1, "
u"column 0 for bar"}]
}
})
class TestFluent(unittest.TestCase):
maxDiff = None # we got big dictionaries to compare
def reference(self, content):
self.ref = os.path.join(self.tmp, "en-reference.ftl")
open(self.ref, "w").write(content)
def localized(self, content):
self.l10n = os.path.join(self.tmp, "l10n.ftl")
open(self.l10n, "w").write(content)
def setUp(self):
self.tmp = mkdtemp()
os.mkdir(os.path.join(self.tmp, "merge"))
self.ref = self.l10n = None
def tearDown(self):
shutil.rmtree(self.tmp)
del self.tmp
del self.ref
del self.l10n
def testGood(self):
self.reference("""\
foo = fooVal
bar = barVal
eff = effVal
""")
self.localized("""\
foo = lFoo
bar = lBar
eff = lEff
""")
cc = ContentComparer([Observer()])
cc.compare(File(self.ref, "en-reference.ftl", ""),
File(self.l10n, "l10n.ftl", ""),
mozpath.join(self.tmp, "merge", "l10n.ftl"))
self.assertDictEqual(
cc.observers[0].toJSON(),
{'summary':
{None: {
'changed': 3,
'changed_w': 3
}},
'details': {}
}
)
# validate merge results
mergepath = mozpath.join(self.tmp, "merge", "l10n.ftl")
self.assert_(not os.path.exists(mergepath))
def testMissing(self):
self.reference("""\
foo = fooVal
bar = barVal
eff = effVal
""")
self.localized("""\
foo = lFoo
eff = lEff
""")
cc = ContentComparer([Observer()])
cc.compare(File(self.ref, "en-reference.ftl", ""),
File(self.l10n, "l10n.ftl", ""),
mozpath.join(self.tmp, "merge", "l10n.ftl"))
self.assertDictEqual(
cc.observers[0].toJSON(),
{
'details': {
'l10n.ftl': [
{'missingEntity': u'bar'}
],
},
'summary': {
None: {
'changed': 2,
'changed_w': 2,
'missing': 1,
'missing_w': 1
}
}
}
)
# validate merge results
mergepath = mozpath.join(self.tmp, "merge", "l10n.ftl")
self.assert_(not os.path.exists(mergepath))
def testBroken(self):
self.reference("""\
foo = fooVal
bar = barVal
eff = effVal
""")
self.localized("""\
-- Invalid Comment
foo = lFoo
bar lBar
eff = lEff {
""")
cc = ContentComparer([Observer()])
cc.compare(File(self.ref, "en-reference.ftl", ""),
File(self.l10n, "l10n.ftl", ""),
mozpath.join(self.tmp, "merge", "l10n.ftl"))
self.assertDictEqual(
cc.observers[0].toJSON(),
{
'details': {
'l10n.ftl': [
{'error': u'Unparsed content "-- Invalid Comment" '
u'from line 1 column 1 '
u'to line 1 column 19'},
{'error': u'Unparsed content "bar lBar" '
u'from line 3 column 1 '
u'to line 3 column 9'},
{'error': u'Unparsed content "eff = lEff {" '
u'from line 4 column 1 '
u'to line 4 column 13'},
{'missingEntity': u'bar'},
{'missingEntity': u'eff'},
],
},
'summary': {
None: {
'changed': 1,
'changed_w': 1,
'missing': 2,
'missing_w': 2,
'errors': 3
}
}
}
)
# validate merge results
mergepath = mozpath.join(self.tmp, "merge", "l10n.ftl")
self.assert_(os.path.exists(mergepath))
p = getParser(mergepath)
p.readFile(mergepath)
merged_entities, merged_map = p.parse()
self.assertEqual([e.key for e in merged_entities], ["foo"])
merged_foo = merged_entities[merged_map['foo']]
# foo should be l10n
p.readFile(self.l10n)
l10n_entities, l10n_map = p.parse()
l10n_foo = l10n_entities[l10n_map['foo']]
self.assertTrue(merged_foo.equals(l10n_foo))
def testMismatchingAttributes(self):
self.reference("""
foo = Foo
bar = Bar
.tender = Attribute value
eff = Eff
""")
self.localized("""\
foo = lFoo
.obsolete = attr
bar = lBar
eff = lEff
""")
cc = ContentComparer([Observer()])
cc.compare(File(self.ref, "en-reference.ftl", ""),
File(self.l10n, "l10n.ftl", ""),
mozpath.join(self.tmp, "merge", "l10n.ftl"))
self.assertDictEqual(
cc.observers[0].toJSON(),
{
'details': {
'l10n.ftl': [
{
'error':
u'Obsolete attribute: '
'obsolete at line 2, column 3 for foo'
},
{
'error':
u'Missing attribute: tender at line 3,'
' column 1 for bar',
},
],
},
'summary': {
None: {'changed': 3, 'changed_w': 5, 'errors': 2}
}
}
)
# validate merge results
mergepath = mozpath.join(self.tmp, "merge", "l10n.ftl")
self.assert_(os.path.exists(mergepath))
p = getParser(mergepath)
p.readFile(mergepath)
merged_entities, merged_map = p.parse()
self.assertEqual([e.key for e in merged_entities], ["eff"])
merged_eff = merged_entities[merged_map['eff']]
# eff should be l10n
p.readFile(self.l10n)
l10n_entities, l10n_map = p.parse()
l10n_eff = l10n_entities[l10n_map['eff']]
self.assertTrue(merged_eff.equals(l10n_eff))
def testMismatchingValues(self):
self.reference("""
foo = Foo
.foottr = something
bar
.tender = Attribute value
""")
self.localized("""\
foo
.foottr = attr
bar = lBar
.tender = localized
""")
cc = ContentComparer([Observer()])
cc.compare(File(self.ref, "en-reference.ftl", ""),
File(self.l10n, "l10n.ftl", ""),
mozpath.join(self.tmp, "merge", "l10n.ftl"))
self.assertDictEqual(
cc.observers[0].toJSON(),
{
'details': {
'l10n.ftl': [
{
'error':
u'Missing value at line 1, column 1 for foo'
},
{
'error':
u'Obsolete value at line 3, column 7 for bar',
},
]
},
'summary': {
None: {'changed': 2, 'changed_w': 4, 'errors': 2}
}
}
)
# validate merge results
mergepath = mozpath.join(self.tmp, "merge", "l10n.ftl")
self.assert_(os.path.exists(mergepath))
p = getParser(mergepath)
p.readFile(mergepath)
merged_entities, _ = p.parse()
self.assertEqual([e.key for e in merged_entities], [])
def testMissingSection(self):
self.reference("""\
foo = fooVal
[[ Section ]]
bar = barVal
""")
self.localized("""\
foo = lFoo
bar = lBar
""")
cc = ContentComparer([Observer()])
cc.compare(File(self.ref, "en-reference.ftl", ""),
File(self.l10n, "l10n.ftl", ""),
mozpath.join(self.tmp, "merge", "l10n.ftl"))
self.assertDictEqual(
cc.observers[0].toJSON(),
{
'details': {},
'summary': {
None: {
'changed': 2,
'changed_w': 2,
}
}
}
)
# validate merge results
mergepath = mozpath.join(self.tmp, "merge", "l10n.ftl")
self.assert_(not os.path.exists(mergepath))
def testMissingAttachedComment(self):
self.reference("""\
foo = fooVal
// Attached Comment
bar = barVal
""")
self.localized("""\
foo = lFoo
bar = barVal
""")
cc = ContentComparer([Observer()])
cc.compare(File(self.ref, "en-reference.ftl", ""),
File(self.l10n, "l10n.ftl", ""),
mozpath.join(self.tmp, "merge", "l10n.ftl"))
self.assertDictEqual(
cc.observers[0].toJSON(),
{
'details': {},
'summary': {
None: {
'changed': 1,
'changed_w': 1,
'unchanged': 1,
'unchanged_w': 1,
}
}
}
)
# validate merge results
mergepath = mozpath.join(self.tmp, "merge", "l10n.ftl")
self.assert_(not os.path.exists(mergepath))
def testObsoleteStandaloneComment(self):
self.reference("""\
foo = fooVal
bar = barVal
""")
self.localized("""\
foo = lFoo
// Standalone Comment
bar = lBar
""")
cc = ContentComparer([Observer()])
cc.compare(File(self.ref, "en-reference.ftl", ""),
File(self.l10n, "l10n.ftl", ""),
mozpath.join(self.tmp, "merge", "l10n.ftl"))
self.assertDictEqual(
cc.observers[0].toJSON(),
{
'details': {},
'summary': {
None: {
'changed': 2,
'changed_w': 2,
}
}
}
)
# validate merge results
mergepath = mozpath.join(self.tmp, "merge", "l10n.ftl")
self.assert_(not os.path.exists(mergepath))
def test_duplicate(self):
self.assertTrue(os.path.isdir(self.tmp))
self.reference("""foo = fooVal
bar = barVal
eff = effVal
foo = other val for foo""")
self.localized("""foo = localized
bar = lBar
eff = localized eff
bar = duplicated bar
""")
cc = ContentComparer([Observer()])
cc.compare(File(self.ref, "en-reference.ftl", ""),
File(self.l10n, "l10n.ftl", ""),
mozpath.join(self.tmp, "merge", "l10n.ftl"))
self.assertDictEqual(
cc.observers[0].toJSON(),
{'summary':
{None: {
'errors': 1,
'warnings': 1,
'changed': 3,
'changed_w': 6
}},
'details': {
'l10n.ftl': [
{'warning': u'foo occurs 2 times'},
{'error': u'bar occurs 2 times'}]
}
})
mergefile = mozpath.join(self.tmp, "merge", "l10n.ftl")
self.assertFalse(os.path.isfile(mergefile))
def test_duplicate_attributes(self):
self.assertTrue(os.path.isdir(self.tmp))
self.reference("""foo = fooVal
.attr = good""")
self.localized("""foo = localized
.attr = not
.attr = so
.attr = good
""")
cc = ContentComparer([Observer()])
cc.compare(File(self.ref, "en-reference.ftl", ""),
File(self.l10n, "l10n.ftl", ""),
mozpath.join(self.tmp, "merge", "l10n.ftl"))
self.assertDictEqual(
cc.observers[0].toJSON(),
{'summary':
{None: {
'warnings': 1,
'changed': 1,
'changed_w': 2
}},
'details': {
'l10n.ftl': [
{'warning':
u'Attribute "attr" occurs 3 times '
u'at line 4, column 5 for foo'
}]
}
})
mergefile = mozpath.join(self.tmp, "merge", "l10n.ftl")
self.assertFalse(os.path.isfile(mergefile))
def test_unmatched_tags(self):
self.assertTrue(os.path.isdir(self.tmp))
self.reference("""foo = fooVal
#yes
""")
self.localized("""foo = fooVal
#no
""")
cc = ContentComparer([Observer()])
cc.compare(File(self.ref, "en-reference.ftl", ""),
File(self.l10n, "l10n.ftl", ""),
mozpath.join(self.tmp, "merge", "l10n.ftl"))
self.assertDictEqual(
cc.observers[0].toJSON(),
{'summary':
{None: {
'unchanged': 1,
'unchanged_w': 1
}},
'details': {}
})
mergefile = mozpath.join(self.tmp, "merge", "l10n.ftl")
self.assertFalse(os.path.isfile(mergefile))
def test_matching_tags(self):
self.assertTrue(os.path.isdir(self.tmp))
self.reference("""foo = fooVal
#yes
""")
self.localized("""foo = fooVal
#yes
""")
cc = ContentComparer([Observer()])
cc.compare(File(self.ref, "en-reference.ftl", ""),
File(self.l10n, "l10n.ftl", ""),
mozpath.join(self.tmp, "merge", "l10n.ftl"))
self.assertDictEqual(
cc.observers[0].toJSON(),
{'summary':
{None: {
'unchanged': 1,
'unchanged_w': 1
}},
'details': {}
})
mergefile = mozpath.join(self.tmp, "merge", "l10n.ftl")
self.assertFalse(os.path.isfile(mergefile))
if __name__ == '__main__':
unittest.main()

Просмотреть файл

@ -0,0 +1,138 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from compare_locales.mozpath import (
relpath,
join,
normpath,
dirname,
commonprefix,
basename,
split,
splitext,
basedir,
match,
rebase,
)
import unittest
import os
class TestPath(unittest.TestCase):
SEP = os.sep
def test_relpath(self):
self.assertEqual(relpath('foo', 'foo'), '')
self.assertEqual(relpath(self.SEP.join(('foo', 'bar')), 'foo/bar'), '')
self.assertEqual(relpath(self.SEP.join(('foo', 'bar')), 'foo'), 'bar')
self.assertEqual(relpath(self.SEP.join(('foo', 'bar', 'baz')), 'foo'),
'bar/baz')
self.assertEqual(relpath(self.SEP.join(('foo', 'bar')), 'foo/bar/baz'),
'..')
self.assertEqual(relpath(self.SEP.join(('foo', 'bar')), 'foo/baz'),
'../bar')
self.assertEqual(relpath('foo/', 'foo'), '')
self.assertEqual(relpath('foo/bar/', 'foo'), 'bar')
def test_join(self):
self.assertEqual(join('foo', 'bar', 'baz'), 'foo/bar/baz')
self.assertEqual(join('foo', '', 'bar'), 'foo/bar')
self.assertEqual(join('', 'foo', 'bar'), 'foo/bar')
self.assertEqual(join('', 'foo', '/bar'), '/bar')
def test_normpath(self):
self.assertEqual(normpath(self.SEP.join(('foo', 'bar', 'baz',
'..', 'qux'))), 'foo/bar/qux')
def test_dirname(self):
self.assertEqual(dirname('foo/bar/baz'), 'foo/bar')
self.assertEqual(dirname('foo/bar'), 'foo')
self.assertEqual(dirname('foo'), '')
self.assertEqual(dirname('foo/bar/'), 'foo/bar')
def test_commonprefix(self):
self.assertEqual(commonprefix([self.SEP.join(('foo', 'bar', 'baz')),
'foo/qux', 'foo/baz/qux']), 'foo/')
self.assertEqual(commonprefix([self.SEP.join(('foo', 'bar', 'baz')),
'foo/qux', 'baz/qux']), '')
def test_basename(self):
self.assertEqual(basename('foo/bar/baz'), 'baz')
self.assertEqual(basename('foo/bar'), 'bar')
self.assertEqual(basename('foo'), 'foo')
self.assertEqual(basename('foo/bar/'), '')
def test_split(self):
self.assertEqual(split(self.SEP.join(('foo', 'bar', 'baz'))),
['foo', 'bar', 'baz'])
def test_splitext(self):
self.assertEqual(splitext(self.SEP.join(('foo', 'bar', 'baz.qux'))),
('foo/bar/baz', '.qux'))
def test_basedir(self):
foobarbaz = self.SEP.join(('foo', 'bar', 'baz'))
self.assertEqual(basedir(foobarbaz, ['foo', 'bar', 'baz']), 'foo')
self.assertEqual(basedir(foobarbaz, ['foo', 'foo/bar', 'baz']),
'foo/bar')
self.assertEqual(basedir(foobarbaz, ['foo/bar', 'foo', 'baz']),
'foo/bar')
self.assertEqual(basedir(foobarbaz, ['foo', 'bar', '']), 'foo')
self.assertEqual(basedir(foobarbaz, ['bar', 'baz', '']), '')
def test_match(self):
self.assertTrue(match('foo', ''))
self.assertTrue(match('foo/bar/baz.qux', 'foo/bar'))
self.assertTrue(match('foo/bar/baz.qux', 'foo'))
self.assertTrue(match('foo', '*'))
self.assertTrue(match('foo/bar/baz.qux', 'foo/bar/*'))
self.assertTrue(match('foo/bar/baz.qux', 'foo/bar/*'))
self.assertTrue(match('foo/bar/baz.qux', 'foo/bar/*'))
self.assertTrue(match('foo/bar/baz.qux', 'foo/bar/*'))
self.assertTrue(match('foo/bar/baz.qux', 'foo/*/baz.qux'))
self.assertTrue(match('foo/bar/baz.qux', '*/bar/baz.qux'))
self.assertTrue(match('foo/bar/baz.qux', '*/*/baz.qux'))
self.assertTrue(match('foo/bar/baz.qux', '*/*/*'))
self.assertTrue(match('foo/bar/baz.qux', 'foo/*/*'))
self.assertTrue(match('foo/bar/baz.qux', 'foo/*/*.qux'))
self.assertTrue(match('foo/bar/baz.qux', 'foo/b*/*z.qux'))
self.assertTrue(match('foo/bar/baz.qux', 'foo/b*r/ba*z.qux'))
self.assertFalse(match('foo/bar/baz.qux', 'foo/b*z/ba*r.qux'))
self.assertTrue(match('foo/bar/baz.qux', '**'))
self.assertTrue(match('foo/bar/baz.qux', '**/baz.qux'))
self.assertTrue(match('foo/bar/baz.qux', '**/bar/baz.qux'))
self.assertTrue(match('foo/bar/baz.qux', 'foo/**/baz.qux'))
self.assertTrue(match('foo/bar/baz.qux', 'foo/**/*.qux'))
self.assertTrue(match('foo/bar/baz.qux', '**/foo/bar/baz.qux'))
self.assertTrue(match('foo/bar/baz.qux', 'foo/**/bar/baz.qux'))
self.assertTrue(match('foo/bar/baz.qux', 'foo/**/bar/*.qux'))
self.assertTrue(match('foo/bar/baz.qux', 'foo/**/*.qux'))
self.assertTrue(match('foo/bar/baz.qux', '**/*.qux'))
self.assertFalse(match('foo/bar/baz.qux', '**.qux'))
self.assertFalse(match('foo/bar', 'foo/*/bar'))
self.assertTrue(match('foo/bar/baz.qux', 'foo/**/bar/**'))
self.assertFalse(match('foo/nobar/baz.qux', 'foo/**/bar/**'))
self.assertTrue(match('foo/bar', 'foo/**/bar/**'))
def test_rebase(self):
self.assertEqual(rebase('foo', 'foo/bar', 'bar/baz'), 'baz')
self.assertEqual(rebase('foo', 'foo', 'bar/baz'), 'bar/baz')
self.assertEqual(rebase('foo/bar', 'foo', 'baz'), 'bar/baz')
if os.altsep:
class TestAltPath(TestPath):
SEP = os.altsep
class TestReverseAltPath(TestPath):
def setUp(self):
sep = os.sep
os.sep = os.altsep
os.altsep = sep
def tearDown(self):
self.setUp()
class TestAltReverseAltPath(TestReverseAltPath):
SEP = os.altsep

Просмотреть файл

@ -0,0 +1,44 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import unittest
from compare_locales import parser
class TestParserContext(unittest.TestCase):
def test_lines(self):
"Test that Parser.Context.lines returns 1-based tuples"
ctx = parser.Parser.Context('''first line
second line
third line
''')
self.assertEqual(
ctx.lines(0, 1),
[(1, 1), (1, 2)]
)
self.assertEqual(
ctx.lines(len('first line')),
[(1, len('first line') + 1)]
)
self.assertEqual(
ctx.lines(len('first line') + 1),
[(2, 1)]
)
self.assertEqual(
ctx.lines(len(ctx.contents)),
[(4, 1)]
)
def test_empty_parser(self):
p = parser.Parser()
entities, _map = p.parse()
self.assertListEqual(
entities,
[]
)
self.assertDictEqual(
_map,
{}
)

Просмотреть файл

@ -0,0 +1,473 @@
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import unittest
from compare_locales.paths import ProjectConfig, File, ProjectFiles, Matcher
from compare_locales import mozpath
class TestMatcher(unittest.TestCase):
def test_matcher(self):
one = Matcher('foo/*')
self.assertTrue(one.match('foo/baz'))
self.assertFalse(one.match('foo/baz/qux'))
other = Matcher('bar/*')
self.assertTrue(other.match('bar/baz'))
self.assertFalse(other.match('bar/baz/qux'))
self.assertEqual(one.sub(other, 'foo/baz'), 'bar/baz')
self.assertIsNone(one.sub(other, 'bar/baz'))
one = Matcher('foo/**')
self.assertTrue(one.match('foo/baz'))
self.assertTrue(one.match('foo/baz/qux'))
other = Matcher('bar/**')
self.assertTrue(other.match('bar/baz'))
self.assertTrue(other.match('bar/baz/qux'))
self.assertEqual(one.sub(other, 'foo/baz'), 'bar/baz')
self.assertEqual(one.sub(other, 'foo/baz/qux'), 'bar/baz/qux')
one = Matcher('foo/*/one/**')
self.assertTrue(one.match('foo/baz/one/qux'))
self.assertFalse(one.match('foo/baz/bez/one/qux'))
other = Matcher('bar/*/other/**')
self.assertTrue(other.match('bar/baz/other/qux'))
self.assertFalse(other.match('bar/baz/bez/other/qux'))
self.assertEqual(one.sub(other, 'foo/baz/one/qux'),
'bar/baz/other/qux')
self.assertEqual(one.sub(other, 'foo/baz/one/qux/zzz'),
'bar/baz/other/qux/zzz')
self.assertIsNone(one.sub(other, 'foo/baz/bez/one/qux'))
class SetupMixin(object):
def setUp(self):
self.cfg = ProjectConfig()
self.file = File(
'/tmp/somedir/de/browser/one/two/file.ftl',
'file.ftl',
module='browser', locale='de')
self.other_file = File(
'/tmp/somedir/de/toolkit/two/one/file.ftl',
'file.ftl',
module='toolkit', locale='de')
class TestConfigLegacy(SetupMixin, unittest.TestCase):
def test_filter_py_true(self):
'Test filter.py just return bool(True)'
def filter(mod, path, entity=None):
return True
self.cfg.set_filter_py(filter)
with self.assertRaises(AssertionError):
self.cfg.add_rules({})
rv = self.cfg.filter(self.file)
self.assertEqual(rv, 'error')
rv = self.cfg.filter(self.file, entity='one_entity')
self.assertEqual(rv, 'error')
def test_filter_py_false(self):
'Test filter.py just return bool(False)'
def filter(mod, path, entity=None):
return False
self.cfg.set_filter_py(filter)
with self.assertRaises(AssertionError):
self.cfg.add_rules({})
rv = self.cfg.filter(self.file)
self.assertEqual(rv, 'ignore')
rv = self.cfg.filter(self.file, entity='one_entity')
self.assertEqual(rv, 'ignore')
def test_filter_py_error(self):
'Test filter.py just return str("error")'
def filter(mod, path, entity=None):
return 'error'
self.cfg.set_filter_py(filter)
with self.assertRaises(AssertionError):
self.cfg.add_rules({})
rv = self.cfg.filter(self.file)
self.assertEqual(rv, 'error')
rv = self.cfg.filter(self.file, entity='one_entity')
self.assertEqual(rv, 'error')
def test_filter_py_ignore(self):
'Test filter.py just return str("ignore")'
def filter(mod, path, entity=None):
return 'ignore'
self.cfg.set_filter_py(filter)
with self.assertRaises(AssertionError):
self.cfg.add_rules({})
rv = self.cfg.filter(self.file)
self.assertEqual(rv, 'ignore')
rv = self.cfg.filter(self.file, entity='one_entity')
self.assertEqual(rv, 'ignore')
def test_filter_py_report(self):
'Test filter.py just return str("report") and match to "warning"'
def filter(mod, path, entity=None):
return 'report'
self.cfg.set_filter_py(filter)
with self.assertRaises(AssertionError):
self.cfg.add_rules({})
rv = self.cfg.filter(self.file)
self.assertEqual(rv, 'warning')
rv = self.cfg.filter(self.file, entity='one_entity')
self.assertEqual(rv, 'warning')
def test_filter_py_module(self):
'Test filter.py to return str("error") for browser or "ignore"'
def filter(mod, path, entity=None):
return 'error' if mod == 'browser' else 'ignore'
self.cfg.set_filter_py(filter)
with self.assertRaises(AssertionError):
self.cfg.add_rules({})
rv = self.cfg.filter(self.file)
self.assertEqual(rv, 'error')
rv = self.cfg.filter(self.file, entity='one_entity')
self.assertEqual(rv, 'error')
rv = self.cfg.filter(self.other_file)
self.assertEqual(rv, 'ignore')
rv = self.cfg.filter(self.other_file, entity='one_entity')
self.assertEqual(rv, 'ignore')
class TestConfigRules(SetupMixin, unittest.TestCase):
def test_filter_empty(self):
'Test that an empty config works'
self.cfg.add_paths({
'l10n': '/tmp/somedir/{locale}/browser/**'
})
rv = self.cfg.filter(self.file)
self.assertEqual(rv, 'error')
rv = self.cfg.filter(self.file, entity='one_entity')
self.assertEqual(rv, 'error')
rv = self.cfg.filter(self.other_file)
self.assertEqual(rv, 'ignore')
rv = self.cfg.filter(self.other_file, entity='one_entity')
self.assertEqual(rv, 'ignore')
def test_single_file_rule(self):
'Test a single rule for just a single file, no key'
self.cfg.add_paths({
'l10n': '/tmp/somedir/{locale}/browser/**'
})
self.cfg.add_rules({
'path': '/tmp/somedir/{locale}/browser/one/two/file.ftl',
'action': 'ignore'
})
rv = self.cfg.filter(self.file)
self.assertEqual(rv, 'ignore')
rv = self.cfg.filter(self.file, 'one_entity')
self.assertEqual(rv, 'error')
rv = self.cfg.filter(self.other_file)
self.assertEqual(rv, 'ignore')
rv = self.cfg.filter(self.other_file, 'one_entity')
self.assertEqual(rv, 'ignore')
def test_single_key_rule(self):
'Test a single rule with file and key'
self.cfg.add_paths({
'l10n': '/tmp/somedir/{locale}/browser/**'
})
self.cfg.add_rules({
'path': '/tmp/somedir/{locale}/browser/one/two/file.ftl',
'key': 'one_entity',
'action': 'ignore'
})
rv = self.cfg.filter(self.file)
self.assertEqual(rv, 'error')
rv = self.cfg.filter(self.file, 'one_entity')
self.assertEqual(rv, 'ignore')
rv = self.cfg.filter(self.other_file)
self.assertEqual(rv, 'ignore')
rv = self.cfg.filter(self.other_file, 'one_entity')
self.assertEqual(rv, 'ignore')
def test_single_non_matching_key_rule(self):
'Test a single key rule with regex special chars that should not match'
self.cfg.add_paths({
'l10n': '/tmp/somedir/{locale}/**'
})
self.cfg.add_rules({
'path': '/tmp/somedir/{locale}/browser/one/two/file.ftl',
'key': '.ne_entit.',
'action': 'ignore'
})
rv = self.cfg.filter(self.file, 'one_entity')
self.assertEqual(rv, 'error')
def test_single_matching_re_key_rule(self):
'Test a single key with regular expression'
self.cfg.add_paths({
'l10n': '/tmp/somedir/{locale}/**'
})
self.cfg.add_rules({
'path': '/tmp/somedir/{locale}/browser/one/two/file.ftl',
'key': 're:.ne_entit.$',
'action': 'ignore'
})
rv = self.cfg.filter(self.file, 'one_entity')
self.assertEqual(rv, 'ignore')
def test_double_file_rule(self):
'Test path shortcut, one for each of our files'
self.cfg.add_paths({
'l10n': '/tmp/somedir/{locale}/**'
})
self.cfg.add_rules({
'path': [
'/tmp/somedir/{locale}/browser/one/two/file.ftl',
'/tmp/somedir/{locale}/toolkit/two/one/file.ftl',
],
'action': 'ignore'
})
rv = self.cfg.filter(self.file)
self.assertEqual(rv, 'ignore')
rv = self.cfg.filter(self.other_file)
self.assertEqual(rv, 'ignore')
def test_double_file_key_rule(self):
'Test path and key shortcut, one key matching, one not'
self.cfg.add_paths({
'l10n': '/tmp/somedir/{locale}/**'
})
self.cfg.add_rules({
'path': [
'/tmp/somedir/{locale}/browser/one/two/file.ftl',
'/tmp/somedir/{locale}/toolkit/two/one/file.ftl',
],
'key': [
'one_entity',
'other_entity',
],
'action': 'ignore'
})
rv = self.cfg.filter(self.file)
self.assertEqual(rv, 'error')
rv = self.cfg.filter(self.file, 'one_entity')
self.assertEqual(rv, 'ignore')
rv = self.cfg.filter(self.other_file)
self.assertEqual(rv, 'error')
rv = self.cfg.filter(self.other_file, 'one_entity')
self.assertEqual(rv, 'ignore')
def test_single_wildcard_rule(self):
'Test single wildcard'
self.cfg.add_paths({
'l10n': '/tmp/somedir/{locale}/browser/**'
})
self.cfg.add_rules({
'path': [
'/tmp/somedir/{locale}/browser/one/*/*',
],
'action': 'ignore'
})
rv = self.cfg.filter(self.file)
self.assertEqual(rv, 'ignore')
rv = self.cfg.filter(self.other_file)
self.assertEqual(rv, 'ignore')
def test_double_wildcard_rule(self):
'Test double wildcard'
self.cfg.add_paths({
'l10n': '/tmp/somedir/{locale}/**'
})
self.cfg.add_rules({
'path': [
'/tmp/somedir/{locale}/**',
],
'action': 'ignore'
})
rv = self.cfg.filter(self.file)
self.assertEqual(rv, 'ignore')
rv = self.cfg.filter(self.other_file)
self.assertEqual(rv, 'ignore')
class MockProjectFiles(ProjectFiles):
def __init__(self, mocks, locale, projects, mergebase=None):
(super(MockProjectFiles, self)
.__init__(locale, projects, mergebase=mergebase))
self.mocks = mocks
def _files(self, matcher):
base = matcher.prefix
for path in self.mocks.get(base, []):
p = mozpath.join(base, path)
if matcher.match(p):
yield p
class TestProjectPaths(unittest.TestCase):
def test_l10n_path(self):
cfg = ProjectConfig()
cfg.locales.append('de')
cfg.add_paths({
'l10n': '{l10n_base}/{locale}/*'
})
cfg.add_environment(l10n_base='/tmp')
mocks = {
'/tmp/de/': [
'good.ftl',
'not/subdir/bad.ftl'
],
'/tmp/fr/': [
'good.ftl',
'not/subdir/bad.ftl'
],
}
files = MockProjectFiles(mocks, 'de', [cfg])
self.assertListEqual(
list(files), [('/tmp/de/good.ftl', None, None, set())])
self.assertTupleEqual(
files.match('/tmp/de/something.ftl'),
('/tmp/de/something.ftl', None, None, set()))
self.assertIsNone(files.match('/tmp/fr/something.ftl'))
files = MockProjectFiles(mocks, 'de', [cfg], mergebase='merging')
self.assertListEqual(
list(files),
[('/tmp/de/good.ftl', None, 'merging/de/good.ftl', set())])
self.assertTupleEqual(
files.match('/tmp/de/something.ftl'),
('/tmp/de/something.ftl', None, 'merging/de/something.ftl', set()))
# 'fr' is not in the locale list, should return no files
files = MockProjectFiles(mocks, 'fr', [cfg])
self.assertListEqual(list(files), [])
def test_reference_path(self):
cfg = ProjectConfig()
cfg.locales.append('de')
cfg.add_paths({
'l10n': '{l10n_base}/{locale}/*',
'reference': '/tmp/reference/*'
})
cfg.add_environment(l10n_base='/tmp/l10n')
mocks = {
'/tmp/l10n/de/': [
'good.ftl',
'not/subdir/bad.ftl'
],
'/tmp/l10n/fr/': [
'good.ftl',
'not/subdir/bad.ftl'
],
'/tmp/reference/': [
'ref.ftl',
'not/subdir/bad.ftl'
],
}
files = MockProjectFiles(mocks, 'de', [cfg])
self.assertListEqual(
list(files),
[
('/tmp/l10n/de/good.ftl', '/tmp/reference/good.ftl', None,
set()),
('/tmp/l10n/de/ref.ftl', '/tmp/reference/ref.ftl', None,
set()),
])
self.assertTupleEqual(
files.match('/tmp/l10n/de/good.ftl'),
('/tmp/l10n/de/good.ftl', '/tmp/reference/good.ftl', None,
set()),
)
self.assertTupleEqual(
files.match('/tmp/reference/good.ftl'),
('/tmp/l10n/de/good.ftl', '/tmp/reference/good.ftl', None,
set()),
)
self.assertIsNone(files.match('/tmp/l10n/de/subdir/bad.ftl'))
self.assertIsNone(files.match('/tmp/reference/subdir/bad.ftl'))
files = MockProjectFiles(mocks, 'de', [cfg], mergebase='merging')
self.assertListEqual(
list(files),
[
('/tmp/l10n/de/good.ftl', '/tmp/reference/good.ftl',
'merging/de/good.ftl', set()),
('/tmp/l10n/de/ref.ftl', '/tmp/reference/ref.ftl',
'merging/de/ref.ftl', set()),
])
self.assertTupleEqual(
files.match('/tmp/l10n/de/good.ftl'),
('/tmp/l10n/de/good.ftl', '/tmp/reference/good.ftl',
'merging/de/good.ftl', set()),
)
self.assertTupleEqual(
files.match('/tmp/reference/good.ftl'),
('/tmp/l10n/de/good.ftl', '/tmp/reference/good.ftl',
'merging/de/good.ftl', set()),
)
# 'fr' is not in the locale list, should return no files
files = MockProjectFiles(mocks, 'fr', [cfg])
self.assertListEqual(list(files), [])
def test_partial_l10n(self):
cfg = ProjectConfig()
cfg.locales.extend(['de', 'fr'])
cfg.add_paths({
'l10n': '/tmp/{locale}/major/*'
}, {
'l10n': '/tmp/{locale}/minor/*',
'locales': ['de']
})
mocks = {
'/tmp/de/major/': [
'good.ftl',
'not/subdir/bad.ftl'
],
'/tmp/de/minor/': [
'good.ftl',
],
'/tmp/fr/major/': [
'good.ftl',
'not/subdir/bad.ftl'
],
'/tmp/fr/minor/': [
'good.ftl',
],
}
files = MockProjectFiles(mocks, 'de', [cfg])
self.assertListEqual(
list(files),
[
('/tmp/de/major/good.ftl', None, None, set()),
('/tmp/de/minor/good.ftl', None, None, set()),
])
self.assertTupleEqual(
files.match('/tmp/de/major/some.ftl'),
('/tmp/de/major/some.ftl', None, None, set()))
self.assertIsNone(files.match('/tmp/de/other/some.ftl'))
# 'fr' is not in the locale list of minor, should only return major
files = MockProjectFiles(mocks, 'fr', [cfg])
self.assertListEqual(
list(files),
[
('/tmp/fr/major/good.ftl', None, None, set()),
])
self.assertIsNone(files.match('/tmp/fr/minor/some.ftl'))
class TestProjectConfig(unittest.TestCase):
def test_expand_paths(self):
pc = ProjectConfig()
pc.add_environment(one="first_path")
self.assertEqual(pc.expand('foo'), 'foo')
self.assertEqual(pc.expand('foo{one}bar'), 'foofirst_pathbar')
pc.add_environment(l10n_base='../tmp/localizations')
self.assertEqual(
pc.expand('{l}dir', {'l': '{l10n_base}/{locale}/'}),
'../tmp/localizations/{locale}/dir')
self.assertEqual(
pc.expand('{l}dir', {
'l': '{l10n_base}/{locale}/',
'l10n_base': '../merge-base'
}),
'../merge-base/{locale}/dir')
def test_children(self):
pc = ProjectConfig()
child = ProjectConfig()
pc.add_child(child)
self.assertListEqual([pc, child], list(pc.configs))

Просмотреть файл

@ -146,5 +146,6 @@ escaped value
self.assertEqual(two.value_position(-1), (3, 14))
self.assertEqual(two.value_position(10), (3, 3))
if __name__ == '__main__':
unittest.main()

Просмотреть файл

@ -1,41 +0,0 @@
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import unittest
from compare_locales import webapps
class TestFileComparison(unittest.TestCase):
def mock_FileComparison(self, mock_listdir):
class Target(webapps.FileComparison):
def _listdir(self):
return mock_listdir()
return Target('.', 'en-US')
def test_just_reference(self):
def _listdir():
return ['my_app.en-US.properties']
filecomp = self.mock_FileComparison(_listdir)
filecomp.files()
self.assertEqual(filecomp.locales(), [])
self.assertEqual(filecomp._reference.keys(), ['my_app'])
file_ = filecomp._reference['my_app']
self.assertEqual(file_.file, 'locales/my_app.en-US.properties')
def test_just_locales(self):
def _listdir():
return ['my_app.ar.properties',
'my_app.sr-Latn.properties',
'my_app.sv-SE.properties',
'my_app.po_SI.properties']
filecomp = self.mock_FileComparison(_listdir)
filecomp.files()
self.assertEqual(filecomp.locales(),
['ar', 'sr-Latn', 'sv-SE'])
self.assertEqual(filecomp._files['ar'].keys(), ['my_app'])
file_ = filecomp._files['ar']['my_app']
self.assertEqual(file_.file, 'locales/my_app.ar.properties')

Просмотреть файл

@ -1,235 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
'''gaia-style web apps support
This variant supports manifest.webapp localization as well as
.properties files with a naming scheme of locales/foo.*.properties.
'''
from collections import defaultdict
import json
import os
import os.path
import re
from compare_locales.paths import File, EnumerateDir
from compare_locales.compare import AddRemove, ContentComparer
class WebAppCompare(object):
'''For a given directory, analyze
/manifest.webapp
/locales/*.*.properties
Deduce the present locale codes.
'''
ignore_dirs = EnumerateDir.ignore_dirs
reference_locale = 'en-US'
def __init__(self, basedir):
'''Constructor
:param basedir: Directory of the web app to inspect
'''
self.basedir = basedir
self.manifest = Manifest(basedir, self.reference_locale)
self.files = FileComparison(basedir, self.reference_locale)
self.watcher = None
def compare(self, locales):
'''Compare the manifest.webapp and the locales/*.*.properties
'''
if not locales:
locales = self.locales()
self.manifest.compare(locales)
self.files.compare(locales)
def setWatcher(self, watcher):
self.watcher = watcher
self.manifest.watcher = watcher
self.files.watcher = watcher
def locales(self):
'''Inspect files on disk to find present languages.
:rtype: List of locales, sorted, including reference.
'''
locales = set(self.manifest.strings.keys())
locales.update(self.files.locales())
locales = list(sorted(locales))
return locales
class Manifest(object):
'''Class that helps with parsing and inspection of manifest.webapp.
'''
def __init__(self, basedir, reference_locale):
self.file = File(os.path.join(basedir, 'manifest.webapp'),
'manifest.webapp')
self.reference_locale = reference_locale
self._strings = None
self.watcher = None
@property
def strings(self):
if self._strings is None:
self._strings = self.load_and_parse()
return self._strings
def load_and_parse(self):
try:
manifest = json.load(open(self.file.fullpath))
except (ValueError, IOError), e:
if self.watcher:
self.watcher.notify('error', self.file, str(e))
return {}
return self.extract_manifest_strings(manifest)
def extract_manifest_strings(self, manifest_fragment):
'''Extract localizable strings from a manifest dict.
This method is recursive, and returns a two-level dict,
first level being locale codes, second level being generated
key and localized value. Keys are generated by concatenating
each level in the json with a ".".
'''
rv = defaultdict(dict)
localizable = manifest_fragment.pop('locales', {})
if localizable:
for locale, keyvalue in localizable.iteritems():
for key, value in keyvalue.iteritems():
key = '.'.join(['locales', 'AB_CD', key])
rv[locale][key] = value
for key, sub_manifest in manifest_fragment.iteritems():
if not isinstance(sub_manifest, dict):
continue
subdict = self.extract_manifest_strings(sub_manifest)
if subdict:
for locale, keyvalue in subdict:
rv[locale].update((key + '.' + subkey, value)
for subkey, value
in keyvalue.iteritems())
return rv
def compare(self, locales):
strings = self.strings
if not strings:
return
# create a copy so that we can mock around with it
strings = strings.copy()
reference = strings.pop(self.reference_locale)
for locale in locales:
if locale == self.reference_locale:
continue
self.compare_strings(reference,
strings.get(locale, {}),
locale)
def compare_strings(self, reference, l10n, locale):
add_remove = AddRemove()
add_remove.set_left(sorted(reference.keys()))
add_remove.set_right(sorted(l10n.keys()))
missing = obsolete = changed = unchanged = 0
for op, item_or_pair in add_remove:
if op == 'equal':
if reference[item_or_pair[0]] == l10n[item_or_pair[1]]:
unchanged += 1
else:
changed += 1
else:
key = item_or_pair.replace('.AB_CD.',
'.%s.' % locale)
if op == 'add':
# obsolete entry
obsolete += 1
self.watcher.notify('obsoleteEntity', self.file, key)
else:
# missing entry
missing += 1
self.watcher.notify('missingEntity', self.file, key)
class FileComparison(object):
'''Compare the locales/*.*.properties files inside a webapp.
'''
prop = re.compile('(?P<base>.*)\\.'
'(?P<locale>[a-zA-Z]+(?:-[a-zA-Z]+)*)'
'\\.properties$')
def __init__(self, basedir, reference_locale):
self.basedir = basedir
self.reference_locale = reference_locale
self.watcher = None
self._reference = self._files = None
def locales(self):
'''Get the locales present in the webapp
'''
self.files()
locales = self._files.keys()
locales.sort()
return locales
def compare(self, locales):
self.files()
for locale in locales:
l10n = self._files[locale]
filecmp = AddRemove()
filecmp.set_left(sorted(self._reference.keys()))
filecmp.set_right(sorted(l10n.keys()))
for op, item_or_pair in filecmp:
if op == 'equal':
self.watcher.compare(self._reference[item_or_pair[0]],
l10n[item_or_pair[1]])
elif op == 'add':
# obsolete file
self.watcher.remove(l10n[item_or_pair])
else:
# missing file
_path = '.'.join([item_or_pair, locale, 'properties'])
missingFile = File(
os.path.join(self.basedir, 'locales', _path),
'locales/' + _path)
self.watcher.add(self._reference[item_or_pair],
missingFile)
def files(self):
'''Read the list of locales from disk.
'''
if self._reference:
return
self._reference = {}
self._files = defaultdict(dict)
path_list = self._listdir()
for path in path_list:
match = self.prop.match(path)
if match is None:
continue
locale = match.group('locale')
if locale == self.reference_locale:
target = self._reference
else:
target = self._files[locale]
fullpath = os.path.join(self.basedir, 'locales', path)
target[match.group('base')] = File(fullpath, 'locales/' + path)
def _listdir(self):
'Monkey-patch this for testing.'
return os.listdir(os.path.join(self.basedir, 'locales'))
def compare_web_app(basedir, locales, other_observer=None):
'''Compare gaia-style web app.
Optional arguments are:
- other_observer. A object implementing
notify(category, _file, data)
The return values of that callback are ignored.
'''
comparer = ContentComparer()
if other_observer is not None:
comparer.add_observer(other_observer)
webapp_comp = WebAppCompare(basedir)
webapp_comp.setWatcher(comparer)
webapp_comp.compare(locales)
return comparer.observer

16
third_party/python/fluent/PKG-INFO поставляемый Normal file
Просмотреть файл

@ -0,0 +1,16 @@
Metadata-Version: 1.1
Name: fluent
Version: 0.4.2
Summary: Localization library for expressive translations.
Home-page: https://github.com/projectfluent/python-fluent
Author: Mozilla
Author-email: l10n-drivers@mozilla.org
License: APL 2
Description: UNKNOWN
Keywords: fluent,localization,l10n
Platform: UNKNOWN
Classifier: Development Status :: 3 - Alpha
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: Apache Software License
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3.5

0
third_party/python/fluent/fluent/__init__.py поставляемый Normal file
Просмотреть файл

10
third_party/python/fluent/fluent/migrate/__init__.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,10 @@
# coding=utf8
from .context import MergeContext # noqa: F401
from .transforms import ( # noqa: F401
Source, COPY, REPLACE_IN_TEXT, REPLACE, PLURALS, CONCAT
)
from .helpers import ( # noqa: F401
LITERAL, EXTERNAL_ARGUMENT, MESSAGE_REFERENCE
)
from .changesets import convert_blame_to_changesets # noqa: F401

58
third_party/python/fluent/fluent/migrate/changesets.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,58 @@
# coding=utf8
import time
def by_first_commit(item):
"""Order two changesets by their first commit date."""
return item['first_commit']
def convert_blame_to_changesets(blame_json):
"""Convert a blame dict into a list of changesets.
The blame information in `blame_json` should be a dict of the following
structure:
{
'authors': [
'A.N. Author <author@example.com>',
],
'blame': {
'path/one': {
'key1': [0, 1346095921.0],
},
}
}
It will be transformed into a list of changesets which can be fed into
`MergeContext.serialize_changeset`:
[
{
'author': 'A.N. Author <author@example.com>',
'first_commit': 1346095921.0,
'changes': {
('path/one', 'key1'),
}
},
]
"""
now = time.time()
changesets = [
{
'author': author,
'first_commit': now,
'changes': set()
} for author in blame_json['authors']
]
for path, keys_info in blame_json['blame'].items():
for key, (author_index, timestamp) in keys_info.items():
changeset = changesets[author_index]
changeset['changes'].add((path, key))
if timestamp < changeset['first_commit']:
changeset['first_commit'] = timestamp
return sorted(changesets, key=by_first_commit)

55
third_party/python/fluent/fluent/migrate/cldr.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,55 @@
# coding=utf8
import pkgutil
import json
def in_canonical_order(item):
return canonical_order.index(item)
cldr_plurals = json.loads(
pkgutil.get_data('fluent.migrate', 'cldr_data/plurals.json').decode('utf-8')
)
rules = cldr_plurals['supplemental']['plurals-type-cardinal']
canonical_order = ('zero', 'one', 'two', 'few', 'many', 'other')
categories = {}
for lang, rules in rules.items():
categories[lang] = tuple(sorted(map(
lambda key: key.replace('pluralRule-count-', ''),
rules.keys()
), key=in_canonical_order))
def get_plural_categories(lang):
"""Return a tuple of CLDR plural categories for `lang`.
If an exact match for `lang` is not available, recursively fall back to
a language code with the last subtag stripped. That is, if `ja-JP-mac` is
not defined in CLDR, the code will try `ja-JP` and then `ja`.
If no matches are found, a `RuntimeError` is raised.
>>> get_plural_categories('sl')
('one', 'two', 'few', 'other')
>>> get_plural_categories('ga-IE')
('one', 'few', 'two', 'few', 'other')
>>> get_plural_categories('ja-JP-mac')
('other')
"""
langs_categories = categories.get(lang, None)
if langs_categories is None:
# Remove the trailing subtag.
fallback_lang, _, _ = lang.rpartition('-')
if fallback_lang == '':
raise RuntimeError('Unknown language: {}'.format(lang))
return get_plural_categories(fallback_lang)
return langs_categories

857
third_party/python/fluent/fluent/migrate/cldr_data/plurals.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1,857 @@
{
"supplemental": {
"version": {
"_number": "$Revision: 12805 $",
"_unicodeVersion": "9.0.0",
"_cldrVersion": "30"
},
"plurals-type-cardinal": {
"af": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ak": {
"pluralRule-count-one": "n = 0..1 @integer 0, 1 @decimal 0.0, 1.0, 0.00, 1.00, 0.000, 1.000, 0.0000, 1.0000",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"am": {
"pluralRule-count-one": "i = 0 or n = 1 @integer 0, 1 @decimal 0.0~1.0, 0.00~0.04",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 1.1~2.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ar": {
"pluralRule-count-zero": "n = 0 @integer 0 @decimal 0.0, 0.00, 0.000, 0.0000",
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-few": "n % 100 = 3..10 @integer 3~10, 103~110, 1003, … @decimal 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 103.0, 1003.0, …",
"pluralRule-count-many": "n % 100 = 11..99 @integer 11~26, 111, 1011, … @decimal 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 111.0, 1011.0, …",
"pluralRule-count-other": " @integer 100~102, 200~202, 300~302, 400~402, 500~502, 600, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.1, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ars": {
"pluralRule-count-zero": "n = 0 @integer 0 @decimal 0.0, 0.00, 0.000, 0.0000",
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-few": "n % 100 = 3..10 @integer 3~10, 103~110, 1003, … @decimal 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 103.0, 1003.0, …",
"pluralRule-count-many": "n % 100 = 11..99 @integer 11~26, 111, 1011, … @decimal 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 111.0, 1011.0, …",
"pluralRule-count-other": " @integer 100~102, 200~202, 300~302, 400~402, 500~502, 600, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.1, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"as": {
"pluralRule-count-one": "i = 0 or n = 1 @integer 0, 1 @decimal 0.0~1.0, 0.00~0.04",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 1.1~2.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"asa": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ast": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"az": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"be": {
"pluralRule-count-one": "n % 10 = 1 and n % 100 != 11 @integer 1, 21, 31, 41, 51, 61, 71, 81, 101, 1001, … @decimal 1.0, 21.0, 31.0, 41.0, 51.0, 61.0, 71.0, 81.0, 101.0, 1001.0, …",
"pluralRule-count-few": "n % 10 = 2..4 and n % 100 != 12..14 @integer 2~4, 22~24, 32~34, 42~44, 52~54, 62, 102, 1002, … @decimal 2.0, 3.0, 4.0, 22.0, 23.0, 24.0, 32.0, 33.0, 102.0, 1002.0, …",
"pluralRule-count-many": "n % 10 = 0 or n % 10 = 5..9 or n % 100 = 11..14 @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …",
"pluralRule-count-other": " @decimal 0.1~0.9, 1.1~1.7, 10.1, 100.1, 1000.1, …"
},
"bem": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"bez": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"bg": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"bh": {
"pluralRule-count-one": "n = 0..1 @integer 0, 1 @decimal 0.0, 1.0, 0.00, 1.00, 0.000, 1.000, 0.0000, 1.0000",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"bm": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"bn": {
"pluralRule-count-one": "i = 0 or n = 1 @integer 0, 1 @decimal 0.0~1.0, 0.00~0.04",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 1.1~2.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"bo": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"br": {
"pluralRule-count-one": "n % 10 = 1 and n % 100 != 11,71,91 @integer 1, 21, 31, 41, 51, 61, 81, 101, 1001, … @decimal 1.0, 21.0, 31.0, 41.0, 51.0, 61.0, 81.0, 101.0, 1001.0, …",
"pluralRule-count-two": "n % 10 = 2 and n % 100 != 12,72,92 @integer 2, 22, 32, 42, 52, 62, 82, 102, 1002, … @decimal 2.0, 22.0, 32.0, 42.0, 52.0, 62.0, 82.0, 102.0, 1002.0, …",
"pluralRule-count-few": "n % 10 = 3..4,9 and n % 100 != 10..19,70..79,90..99 @integer 3, 4, 9, 23, 24, 29, 33, 34, 39, 43, 44, 49, 103, 1003, … @decimal 3.0, 4.0, 9.0, 23.0, 24.0, 29.0, 33.0, 34.0, 103.0, 1003.0, …",
"pluralRule-count-many": "n != 0 and n % 1000000 = 0 @integer 1000000, … @decimal 1000000.0, 1000000.00, 1000000.000, …",
"pluralRule-count-other": " @integer 0, 5~8, 10~20, 100, 1000, 10000, 100000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, …"
},
"brx": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"bs": {
"pluralRule-count-one": "v = 0 and i % 10 = 1 and i % 100 != 11 or f % 10 = 1 and f % 100 != 11 @integer 1, 21, 31, 41, 51, 61, 71, 81, 101, 1001, … @decimal 0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1, 10.1, 100.1, 1000.1, …",
"pluralRule-count-few": "v = 0 and i % 10 = 2..4 and i % 100 != 12..14 or f % 10 = 2..4 and f % 100 != 12..14 @integer 2~4, 22~24, 32~34, 42~44, 52~54, 62, 102, 1002, … @decimal 0.2~0.4, 1.2~1.4, 2.2~2.4, 3.2~3.4, 4.2~4.4, 5.2, 10.2, 100.2, 1000.2, …",
"pluralRule-count-other": " @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 0.5~1.0, 1.5~2.0, 2.5~2.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ca": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ce": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"cgg": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"chr": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ckb": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"cs": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-few": "i = 2..4 and v = 0 @integer 2~4",
"pluralRule-count-many": "v != 0 @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …",
"pluralRule-count-other": " @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, …"
},
"cy": {
"pluralRule-count-zero": "n = 0 @integer 0 @decimal 0.0, 0.00, 0.000, 0.0000",
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-few": "n = 3 @integer 3 @decimal 3.0, 3.00, 3.000, 3.0000",
"pluralRule-count-many": "n = 6 @integer 6 @decimal 6.0, 6.00, 6.000, 6.0000",
"pluralRule-count-other": " @integer 4, 5, 7~20, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"da": {
"pluralRule-count-one": "n = 1 or t != 0 and i = 0,1 @integer 1 @decimal 0.1~1.6",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 2.0~3.4, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"de": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"dsb": {
"pluralRule-count-one": "v = 0 and i % 100 = 1 or f % 100 = 1 @integer 1, 101, 201, 301, 401, 501, 601, 701, 1001, … @decimal 0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1, 10.1, 100.1, 1000.1, …",
"pluralRule-count-two": "v = 0 and i % 100 = 2 or f % 100 = 2 @integer 2, 102, 202, 302, 402, 502, 602, 702, 1002, … @decimal 0.2, 1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2, 10.2, 100.2, 1000.2, …",
"pluralRule-count-few": "v = 0 and i % 100 = 3..4 or f % 100 = 3..4 @integer 3, 4, 103, 104, 203, 204, 303, 304, 403, 404, 503, 504, 603, 604, 703, 704, 1003, … @decimal 0.3, 0.4, 1.3, 1.4, 2.3, 2.4, 3.3, 3.4, 4.3, 4.4, 5.3, 5.4, 6.3, 6.4, 7.3, 7.4, 10.3, 100.3, 1000.3, …",
"pluralRule-count-other": " @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 0.5~1.0, 1.5~2.0, 2.5~2.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"dv": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"dz": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ee": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"el": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"en": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"eo": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"es": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"et": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"eu": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"fa": {
"pluralRule-count-one": "i = 0 or n = 1 @integer 0, 1 @decimal 0.0~1.0, 0.00~0.04",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 1.1~2.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ff": {
"pluralRule-count-one": "i = 0,1 @integer 0, 1 @decimal 0.0~1.5",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 2.0~3.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"fi": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"fil": {
"pluralRule-count-one": "v = 0 and i = 1,2,3 or v = 0 and i % 10 != 4,6,9 or v != 0 and f % 10 != 4,6,9 @integer 0~3, 5, 7, 8, 10~13, 15, 17, 18, 20, 21, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.3, 0.5, 0.7, 0.8, 1.0~1.3, 1.5, 1.7, 1.8, 2.0, 2.1, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …",
"pluralRule-count-other": " @integer 4, 6, 9, 14, 16, 19, 24, 26, 104, 1004, … @decimal 0.4, 0.6, 0.9, 1.4, 1.6, 1.9, 2.4, 2.6, 10.4, 100.4, 1000.4, …"
},
"fo": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"fr": {
"pluralRule-count-one": "i = 0,1 @integer 0, 1 @decimal 0.0~1.5",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 2.0~3.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"fur": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"fy": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ga": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-few": "n = 3..6 @integer 3~6 @decimal 3.0, 4.0, 5.0, 6.0, 3.00, 4.00, 5.00, 6.00, 3.000, 4.000, 5.000, 6.000, 3.0000, 4.0000, 5.0000, 6.0000",
"pluralRule-count-many": "n = 7..10 @integer 7~10 @decimal 7.0, 8.0, 9.0, 10.0, 7.00, 8.00, 9.00, 10.00, 7.000, 8.000, 9.000, 10.000, 7.0000, 8.0000, 9.0000, 10.0000",
"pluralRule-count-other": " @integer 0, 11~25, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.1, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"gd": {
"pluralRule-count-one": "n = 1,11 @integer 1, 11 @decimal 1.0, 11.0, 1.00, 11.00, 1.000, 11.000, 1.0000",
"pluralRule-count-two": "n = 2,12 @integer 2, 12 @decimal 2.0, 12.0, 2.00, 12.00, 2.000, 12.000, 2.0000",
"pluralRule-count-few": "n = 3..10,13..19 @integer 3~10, 13~19 @decimal 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 3.00",
"pluralRule-count-other": " @integer 0, 20~34, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.1, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"gl": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"gsw": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"gu": {
"pluralRule-count-one": "i = 0 or n = 1 @integer 0, 1 @decimal 0.0~1.0, 0.00~0.04",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 1.1~2.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"guw": {
"pluralRule-count-one": "n = 0..1 @integer 0, 1 @decimal 0.0, 1.0, 0.00, 1.00, 0.000, 1.000, 0.0000, 1.0000",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"gv": {
"pluralRule-count-one": "v = 0 and i % 10 = 1 @integer 1, 11, 21, 31, 41, 51, 61, 71, 101, 1001, …",
"pluralRule-count-two": "v = 0 and i % 10 = 2 @integer 2, 12, 22, 32, 42, 52, 62, 72, 102, 1002, …",
"pluralRule-count-few": "v = 0 and i % 100 = 0,20,40,60,80 @integer 0, 20, 40, 60, 80, 100, 120, 140, 1000, 10000, 100000, 1000000, …",
"pluralRule-count-many": "v != 0 @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …",
"pluralRule-count-other": " @integer 3~10, 13~19, 23, 103, 1003, …"
},
"ha": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"haw": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"he": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-two": "i = 2 and v = 0 @integer 2",
"pluralRule-count-many": "v = 0 and n != 0..10 and n % 10 = 0 @integer 20, 30, 40, 50, 60, 70, 80, 90, 100, 1000, 10000, 100000, 1000000, …",
"pluralRule-count-other": " @integer 0, 3~17, 101, 1001, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"hi": {
"pluralRule-count-one": "i = 0 or n = 1 @integer 0, 1 @decimal 0.0~1.0, 0.00~0.04",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 1.1~2.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"hr": {
"pluralRule-count-one": "v = 0 and i % 10 = 1 and i % 100 != 11 or f % 10 = 1 and f % 100 != 11 @integer 1, 21, 31, 41, 51, 61, 71, 81, 101, 1001, … @decimal 0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1, 10.1, 100.1, 1000.1, …",
"pluralRule-count-few": "v = 0 and i % 10 = 2..4 and i % 100 != 12..14 or f % 10 = 2..4 and f % 100 != 12..14 @integer 2~4, 22~24, 32~34, 42~44, 52~54, 62, 102, 1002, … @decimal 0.2~0.4, 1.2~1.4, 2.2~2.4, 3.2~3.4, 4.2~4.4, 5.2, 10.2, 100.2, 1000.2, …",
"pluralRule-count-other": " @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 0.5~1.0, 1.5~2.0, 2.5~2.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"hsb": {
"pluralRule-count-one": "v = 0 and i % 100 = 1 or f % 100 = 1 @integer 1, 101, 201, 301, 401, 501, 601, 701, 1001, … @decimal 0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1, 10.1, 100.1, 1000.1, …",
"pluralRule-count-two": "v = 0 and i % 100 = 2 or f % 100 = 2 @integer 2, 102, 202, 302, 402, 502, 602, 702, 1002, … @decimal 0.2, 1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2, 10.2, 100.2, 1000.2, …",
"pluralRule-count-few": "v = 0 and i % 100 = 3..4 or f % 100 = 3..4 @integer 3, 4, 103, 104, 203, 204, 303, 304, 403, 404, 503, 504, 603, 604, 703, 704, 1003, … @decimal 0.3, 0.4, 1.3, 1.4, 2.3, 2.4, 3.3, 3.4, 4.3, 4.4, 5.3, 5.4, 6.3, 6.4, 7.3, 7.4, 10.3, 100.3, 1000.3, …",
"pluralRule-count-other": " @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 0.5~1.0, 1.5~2.0, 2.5~2.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"hu": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"hy": {
"pluralRule-count-one": "i = 0,1 @integer 0, 1 @decimal 0.0~1.5",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 2.0~3.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"id": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ig": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ii": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"in": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"is": {
"pluralRule-count-one": "t = 0 and i % 10 = 1 and i % 100 != 11 or t != 0 @integer 1, 21, 31, 41, 51, 61, 71, 81, 101, 1001, … @decimal 0.1~1.6, 10.1, 100.1, 1000.1, …",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"it": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"iu": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-other": " @integer 0, 3~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"iw": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-two": "i = 2 and v = 0 @integer 2",
"pluralRule-count-many": "v = 0 and n != 0..10 and n % 10 = 0 @integer 20, 30, 40, 50, 60, 70, 80, 90, 100, 1000, 10000, 100000, 1000000, …",
"pluralRule-count-other": " @integer 0, 3~17, 101, 1001, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ja": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"jbo": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"jgo": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ji": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"jmc": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"jv": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"jw": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ka": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"kab": {
"pluralRule-count-one": "i = 0,1 @integer 0, 1 @decimal 0.0~1.5",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 2.0~3.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"kaj": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"kcg": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"kde": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"kea": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"kk": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"kkj": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"kl": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"km": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"kn": {
"pluralRule-count-one": "i = 0 or n = 1 @integer 0, 1 @decimal 0.0~1.0, 0.00~0.04",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 1.1~2.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ko": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ks": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ksb": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ksh": {
"pluralRule-count-zero": "n = 0 @integer 0 @decimal 0.0, 0.00, 0.000, 0.0000",
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ku": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"kw": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-other": " @integer 0, 3~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ky": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"lag": {
"pluralRule-count-zero": "n = 0 @integer 0 @decimal 0.0, 0.00, 0.000, 0.0000",
"pluralRule-count-one": "i = 0,1 and n != 0 @integer 1 @decimal 0.1~1.6",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 2.0~3.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"lb": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"lg": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"lkt": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ln": {
"pluralRule-count-one": "n = 0..1 @integer 0, 1 @decimal 0.0, 1.0, 0.00, 1.00, 0.000, 1.000, 0.0000, 1.0000",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"lo": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"lt": {
"pluralRule-count-one": "n % 10 = 1 and n % 100 != 11..19 @integer 1, 21, 31, 41, 51, 61, 71, 81, 101, 1001, … @decimal 1.0, 21.0, 31.0, 41.0, 51.0, 61.0, 71.0, 81.0, 101.0, 1001.0, …",
"pluralRule-count-few": "n % 10 = 2..9 and n % 100 != 11..19 @integer 2~9, 22~29, 102, 1002, … @decimal 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 22.0, 102.0, 1002.0, …",
"pluralRule-count-many": "f != 0 @decimal 0.1~0.9, 1.1~1.7, 10.1, 100.1, 1000.1, …",
"pluralRule-count-other": " @integer 0, 10~20, 30, 40, 50, 60, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"lv": {
"pluralRule-count-zero": "n % 10 = 0 or n % 100 = 11..19 or v = 2 and f % 100 = 11..19 @integer 0, 10~20, 30, 40, 50, 60, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …",
"pluralRule-count-one": "n % 10 = 1 and n % 100 != 11 or v = 2 and f % 10 = 1 and f % 100 != 11 or v != 2 and f % 10 = 1 @integer 1, 21, 31, 41, 51, 61, 71, 81, 101, 1001, … @decimal 0.1, 1.0, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1, 10.1, 100.1, 1000.1, …",
"pluralRule-count-other": " @integer 2~9, 22~29, 102, 1002, … @decimal 0.2~0.9, 1.2~1.9, 10.2, 100.2, 1000.2, …"
},
"mas": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"mg": {
"pluralRule-count-one": "n = 0..1 @integer 0, 1 @decimal 0.0, 1.0, 0.00, 1.00, 0.000, 1.000, 0.0000, 1.0000",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"mgo": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"mk": {
"pluralRule-count-one": "v = 0 and i % 10 = 1 or f % 10 = 1 @integer 1, 11, 21, 31, 41, 51, 61, 71, 101, 1001, … @decimal 0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1, 10.1, 100.1, 1000.1, …",
"pluralRule-count-other": " @integer 0, 2~10, 12~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 0.2~1.0, 1.2~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ml": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"mn": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"mo": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-few": "v != 0 or n = 0 or n != 1 and n % 100 = 1..19 @integer 0, 2~16, 101, 1001, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …",
"pluralRule-count-other": " @integer 20~35, 100, 1000, 10000, 100000, 1000000, …"
},
"mr": {
"pluralRule-count-one": "i = 0 or n = 1 @integer 0, 1 @decimal 0.0~1.0, 0.00~0.04",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 1.1~2.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ms": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"mt": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-few": "n = 0 or n % 100 = 2..10 @integer 0, 2~10, 102~107, 1002, … @decimal 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 10.0, 102.0, 1002.0, …",
"pluralRule-count-many": "n % 100 = 11..19 @integer 11~19, 111~117, 1011, … @decimal 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 111.0, 1011.0, …",
"pluralRule-count-other": " @integer 20~35, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.1, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"my": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"nah": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"naq": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-other": " @integer 0, 3~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"nb": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"nd": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ne": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"nl": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"nn": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"nnh": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"no": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"nqo": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"nr": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"nso": {
"pluralRule-count-one": "n = 0..1 @integer 0, 1 @decimal 0.0, 1.0, 0.00, 1.00, 0.000, 1.000, 0.0000, 1.0000",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ny": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"nyn": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"om": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"or": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"os": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"pa": {
"pluralRule-count-one": "n = 0..1 @integer 0, 1 @decimal 0.0, 1.0, 0.00, 1.00, 0.000, 1.000, 0.0000, 1.0000",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"pap": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"pl": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-few": "v = 0 and i % 10 = 2..4 and i % 100 != 12..14 @integer 2~4, 22~24, 32~34, 42~44, 52~54, 62, 102, 1002, …",
"pluralRule-count-many": "v = 0 and i != 1 and i % 10 = 0..1 or v = 0 and i % 10 = 5..9 or v = 0 and i % 100 = 12..14 @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, …",
"pluralRule-count-other": " @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"prg": {
"pluralRule-count-zero": "n % 10 = 0 or n % 100 = 11..19 or v = 2 and f % 100 = 11..19 @integer 0, 10~20, 30, 40, 50, 60, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …",
"pluralRule-count-one": "n % 10 = 1 and n % 100 != 11 or v = 2 and f % 10 = 1 and f % 100 != 11 or v != 2 and f % 10 = 1 @integer 1, 21, 31, 41, 51, 61, 71, 81, 101, 1001, … @decimal 0.1, 1.0, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1, 10.1, 100.1, 1000.1, …",
"pluralRule-count-other": " @integer 2~9, 22~29, 102, 1002, … @decimal 0.2~0.9, 1.2~1.9, 10.2, 100.2, 1000.2, …"
},
"ps": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"pt": {
"pluralRule-count-one": "n = 0..2 and n != 2 @integer 0, 1 @decimal 0.0, 1.0, 0.00, 1.00, 0.000, 1.000, 0.0000, 1.0000",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"pt-PT": {
"pluralRule-count-one": "n = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"rm": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ro": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-few": "v != 0 or n = 0 or n != 1 and n % 100 = 1..19 @integer 0, 2~16, 101, 1001, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …",
"pluralRule-count-other": " @integer 20~35, 100, 1000, 10000, 100000, 1000000, …"
},
"rof": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"root": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ru": {
"pluralRule-count-one": "v = 0 and i % 10 = 1 and i % 100 != 11 @integer 1, 21, 31, 41, 51, 61, 71, 81, 101, 1001, …",
"pluralRule-count-few": "v = 0 and i % 10 = 2..4 and i % 100 != 12..14 @integer 2~4, 22~24, 32~34, 42~44, 52~54, 62, 102, 1002, …",
"pluralRule-count-many": "v = 0 and i % 10 = 0 or v = 0 and i % 10 = 5..9 or v = 0 and i % 100 = 11..14 @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, …",
"pluralRule-count-other": " @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"rwk": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sah": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"saq": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sdh": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"se": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-other": " @integer 0, 3~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"seh": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ses": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sg": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sh": {
"pluralRule-count-one": "v = 0 and i % 10 = 1 and i % 100 != 11 or f % 10 = 1 and f % 100 != 11 @integer 1, 21, 31, 41, 51, 61, 71, 81, 101, 1001, … @decimal 0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1, 10.1, 100.1, 1000.1, …",
"pluralRule-count-few": "v = 0 and i % 10 = 2..4 and i % 100 != 12..14 or f % 10 = 2..4 and f % 100 != 12..14 @integer 2~4, 22~24, 32~34, 42~44, 52~54, 62, 102, 1002, … @decimal 0.2~0.4, 1.2~1.4, 2.2~2.4, 3.2~3.4, 4.2~4.4, 5.2, 10.2, 100.2, 1000.2, …",
"pluralRule-count-other": " @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 0.5~1.0, 1.5~2.0, 2.5~2.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"shi": {
"pluralRule-count-one": "i = 0 or n = 1 @integer 0, 1 @decimal 0.0~1.0, 0.00~0.04",
"pluralRule-count-few": "n = 2..10 @integer 2~10 @decimal 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 2.00, 3.00, 4.00, 5.00, 6.00, 7.00, 8.00",
"pluralRule-count-other": " @integer 11~26, 100, 1000, 10000, 100000, 1000000, … @decimal 1.1~1.9, 2.1~2.7, 10.1, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"si": {
"pluralRule-count-one": "n = 0,1 or i = 0 and f = 1 @integer 0, 1 @decimal 0.0, 0.1, 1.0, 0.00, 0.01, 1.00, 0.000, 0.001, 1.000, 0.0000, 0.0001, 1.0000",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.2~0.9, 1.1~1.8, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sk": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-few": "i = 2..4 and v = 0 @integer 2~4",
"pluralRule-count-many": "v != 0 @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …",
"pluralRule-count-other": " @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, …"
},
"sl": {
"pluralRule-count-one": "v = 0 and i % 100 = 1 @integer 1, 101, 201, 301, 401, 501, 601, 701, 1001, …",
"pluralRule-count-two": "v = 0 and i % 100 = 2 @integer 2, 102, 202, 302, 402, 502, 602, 702, 1002, …",
"pluralRule-count-few": "v = 0 and i % 100 = 3..4 or v != 0 @integer 3, 4, 103, 104, 203, 204, 303, 304, 403, 404, 503, 504, 603, 604, 703, 704, 1003, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …",
"pluralRule-count-other": " @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, …"
},
"sma": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-other": " @integer 0, 3~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"smi": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-other": " @integer 0, 3~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"smj": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-other": " @integer 0, 3~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"smn": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-other": " @integer 0, 3~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sms": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-other": " @integer 0, 3~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sn": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"so": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sq": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sr": {
"pluralRule-count-one": "v = 0 and i % 10 = 1 and i % 100 != 11 or f % 10 = 1 and f % 100 != 11 @integer 1, 21, 31, 41, 51, 61, 71, 81, 101, 1001, … @decimal 0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1, 10.1, 100.1, 1000.1, …",
"pluralRule-count-few": "v = 0 and i % 10 = 2..4 and i % 100 != 12..14 or f % 10 = 2..4 and f % 100 != 12..14 @integer 2~4, 22~24, 32~34, 42~44, 52~54, 62, 102, 1002, … @decimal 0.2~0.4, 1.2~1.4, 2.2~2.4, 3.2~3.4, 4.2~4.4, 5.2, 10.2, 100.2, 1000.2, …",
"pluralRule-count-other": " @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 0.5~1.0, 1.5~2.0, 2.5~2.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ss": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ssy": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"st": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sv": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sw": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"syr": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ta": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"te": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"teo": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"th": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ti": {
"pluralRule-count-one": "n = 0..1 @integer 0, 1 @decimal 0.0, 1.0, 0.00, 1.00, 0.000, 1.000, 0.0000, 1.0000",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"tig": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"tk": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"tl": {
"pluralRule-count-one": "v = 0 and i = 1,2,3 or v = 0 and i % 10 != 4,6,9 or v != 0 and f % 10 != 4,6,9 @integer 0~3, 5, 7, 8, 10~13, 15, 17, 18, 20, 21, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.3, 0.5, 0.7, 0.8, 1.0~1.3, 1.5, 1.7, 1.8, 2.0, 2.1, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …",
"pluralRule-count-other": " @integer 4, 6, 9, 14, 16, 19, 24, 26, 104, 1004, … @decimal 0.4, 0.6, 0.9, 1.4, 1.6, 1.9, 2.4, 2.6, 10.4, 100.4, 1000.4, …"
},
"tn": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"to": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"tr": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ts": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"tzm": {
"pluralRule-count-one": "n = 0..1 or n = 11..99 @integer 0, 1, 11~24 @decimal 0.0, 1.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0",
"pluralRule-count-other": " @integer 2~10, 100~106, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ug": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"uk": {
"pluralRule-count-one": "v = 0 and i % 10 = 1 and i % 100 != 11 @integer 1, 21, 31, 41, 51, 61, 71, 81, 101, 1001, …",
"pluralRule-count-few": "v = 0 and i % 10 = 2..4 and i % 100 != 12..14 @integer 2~4, 22~24, 32~34, 42~44, 52~54, 62, 102, 1002, …",
"pluralRule-count-many": "v = 0 and i % 10 = 0 or v = 0 and i % 10 = 5..9 or v = 0 and i % 100 = 11..14 @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, …",
"pluralRule-count-other": " @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ur": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"uz": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ve": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"vi": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"vo": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"vun": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"wa": {
"pluralRule-count-one": "n = 0..1 @integer 0, 1 @decimal 0.0, 1.0, 0.00, 1.00, 0.000, 1.000, 0.0000, 1.0000",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"wae": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"wo": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"xh": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"xog": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"yi": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"yo": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"yue": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"zh": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"zu": {
"pluralRule-count-one": "i = 0 or n = 1 @integer 0, 1 @decimal 0.0~1.0, 0.00~0.04",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 1.1~2.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
}
}
}
}

Просмотреть файл

@ -0,0 +1,53 @@
UNICODE, INC. LICENSE AGREEMENT - DATA FILES AND SOFTWARE
Unicode Data Files include all data files under the directories
http://www.unicode.org/Public/, http://www.unicode.org/reports/,
http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/,
and http://www.unicode.org/utility/trac/browser/.
Unicode Data Files do not include PDF online code charts under the
directory http://www.unicode.org/Public/.
Software includes any source code published in the Unicode Standard
or under the directories
http://www.unicode.org/Public/, http://www.unicode.org/reports/,
http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/,
and http://www.unicode.org/utility/trac/browser/.
NOTICE TO USER: Carefully read the following legal agreement.
BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S DATA
FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"), YOU UNEQUIVOCALLY ACCEPT,
AND AGREE TO BE BOUND BY, ALL OF THE TERMS AND CONDITIONS OF THIS AGREEMENT.
IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE THE
DATA FILES OR SOFTWARE.
COPYRIGHT AND PERMISSION NOTICE
Copyright © 1991-2016 Unicode, Inc. All rights reserved. Distributed under
the Terms of Use in http://www.unicode.org/copyright.html.
Permission is hereby granted, free of charge, to any person obtaining a copy
of the Unicode data files and any associated documentation (the "Data Files")
or Unicode software and any associated documentation (the "Software") to deal
in the Data Files or Software without restriction, including without
limitation the rights to use, copy, modify, merge, publish, distribute,
and/or sell copies of the Data Files or Software, and to permit persons to
whom the Data Files or Software are furnished to do so, provided that either
(a) this copyright and permission notice appear with all copies of the Data
Files or Software, or
(b) this copyright and permission notice appear in associated Documentation.
THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR
CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
OF THE DATA FILES OR SOFTWARE.
Except as contained in this notice, the name of a copyright holder shall not
be used in advertising or otherwise to promote the sale, use or other
dealings in these Data Files or Software without prior written authorization
of the copyright holder.

270
third_party/python/fluent/fluent/migrate/context.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,270 @@
# coding=utf8
from __future__ import unicode_literals
import os
import codecs
import logging
import fluent.syntax.ast as FTL
from fluent.syntax.parser import FluentParser
from fluent.syntax.serializer import FluentSerializer
from fluent.util import fold
try:
from compare_locales.parser import getParser
except ImportError:
def getParser(path):
raise RuntimeError('compare-locales required')
from .cldr import get_plural_categories
from .transforms import Source
from .merge import merge_resource
from .util import get_message
class MergeContext(object):
"""Stateful context for merging translation resources.
`MergeContext` must be configured with the target language and the
directory locations of the input data.
The transformation takes four types of input data:
- The en-US FTL reference files which will be used as templates for
message order, comments and sections.
- The current FTL files for the given language.
- The legacy (DTD, properties) translation files for the given
language. The translations from these files will be transformed
into FTL and merged into the existing FTL files for this language.
- A list of `FTL.Message` objects some of whose nodes are special
helper or transform nodes:
helpers: LITERAL, EXTERNAL_ARGUMENT, MESSAGE_REFERENCE
transforms: COPY, REPLACE_IN_TEXT, REPLACE, PLURALS, CONCAT
"""
def __init__(self, lang, reference_dir, localization_dir):
self.fluent_parser = FluentParser(with_spans=False)
self.fluent_serializer = FluentSerializer()
# An iterable of plural category names relevant to the context's
# language. E.g. ('one', 'other') for English.
try:
self.plural_categories = get_plural_categories(lang)
except RuntimeError as e:
print(e.message)
self.plural_categories = 'en'
# Paths to directories with input data, relative to CWD.
self.reference_dir = reference_dir
self.localization_dir = localization_dir
# Parsed input resources stored by resource path.
self.reference_resources = {}
self.localization_resources = {}
# An iterable of `FTL.Message` objects some of whose nodes can be the
# transform operations.
self.transforms = {}
# A dict whose keys are `(path, key)` tuples corresponding to target
# FTL translations, and values are sets of `(path, key)` tuples
# corresponding to localized entities which will be migrated.
self.dependencies = {}
def read_ftl_resource(self, path):
"""Read an FTL resource and parse it into an AST."""
f = codecs.open(path, 'r', 'utf8')
try:
contents = f.read()
finally:
f.close()
ast = self.fluent_parser.parse(contents)
annots = [
annot
for entry in ast.body
for annot in entry.annotations
]
if len(annots):
logger = logging.getLogger('migrate')
for annot in annots:
msg = annot.message
logger.warn(u'Syntax error in {}: {}'.format(path, msg))
return ast
def read_legacy_resource(self, path):
"""Read a legacy resource and parse it into a dict."""
parser = getParser(path)
parser.readFile(path)
# Transform the parsed result which is an iterator into a dict.
return {entity.key: entity.val for entity in parser}
def add_reference(self, path, realpath=None):
"""Add an FTL AST to this context's reference resources."""
fullpath = os.path.join(self.reference_dir, realpath or path)
try:
ast = self.read_ftl_resource(fullpath)
except IOError as err:
logger = logging.getLogger('migrate')
logger.error(u'Missing reference file: {}'.format(path))
raise err
except UnicodeDecodeError as err:
logger = logging.getLogger('migrate')
logger.error(u'Error reading file {}: {}'.format(path, err))
raise err
else:
self.reference_resources[path] = ast
def add_localization(self, path):
"""Add an existing localization resource.
If it's an FTL resource, add an FTL AST. Otherwise, it's a legacy
resource. Use a compare-locales parser to create a dict of (key,
string value) tuples.
"""
fullpath = os.path.join(self.localization_dir, path)
if fullpath.endswith('.ftl'):
try:
ast = self.read_ftl_resource(fullpath)
except IOError:
logger = logging.getLogger('migrate')
logger.warn(u'Missing localization file: {}'.format(path))
except UnicodeDecodeError as err:
logger = logging.getLogger('migrate')
logger.warn(u'Error reading file {}: {}'.format(path, err))
else:
self.localization_resources[path] = ast
else:
try:
collection = self.read_legacy_resource(fullpath)
except IOError:
logger = logging.getLogger('migrate')
logger.warn(u'Missing localization file: {}'.format(path))
else:
self.localization_resources[path] = collection
def add_transforms(self, path, transforms):
"""Define transforms for path.
Each transform is an extended FTL node with `Transform` nodes as some
values. Transforms are stored in their lazy AST form until
`merge_changeset` is called, at which point they are evaluated to real
FTL nodes with migrated translations.
Each transform is scanned for `Source` nodes which will be used to
build the list of dependencies for the transformed message.
"""
def get_sources(acc, cur):
if isinstance(cur, Source):
acc.add((cur.path, cur.key))
return acc
for node in transforms:
# Scan `node` for `Source` nodes and collect the information they
# store into a set of dependencies.
dependencies = fold(get_sources, node, set())
# Set these sources as dependencies for the current transform.
self.dependencies[(path, node.id.name)] = dependencies
path_transforms = self.transforms.setdefault(path, [])
path_transforms += transforms
def get_source(self, path, key):
"""Get an entity value from the localized source.
Used by the `Source` transform.
"""
if path.endswith('.ftl'):
resource = self.localization_resources[path]
return get_message(resource.body, key)
else:
resource = self.localization_resources[path]
return resource.get(key, None)
def merge_changeset(self, changeset=None):
"""Return a generator of FTL ASTs for the changeset.
The input data must be configured earlier using the `add_*` methods.
if given, `changeset` must be a set of (path, key) tuples describing
which legacy translations are to be merged.
Given `changeset`, return a dict whose keys are resource paths and
values are `FTL.Resource` instances. The values will also be used to
update this context's existing localization resources.
"""
if changeset is None:
# Merge all known legacy translations.
changeset = {
(path, key)
for path, strings in self.localization_resources.iteritems()
for key in strings.iterkeys()
}
for path, reference in self.reference_resources.iteritems():
current = self.localization_resources.get(path, FTL.Resource())
transforms = self.transforms.get(path, [])
def in_changeset(ident):
"""Check if entity should be merged.
If at least one dependency of the entity is in the current
set of changeset, merge it.
"""
message_deps = self.dependencies.get((path, ident), None)
# Don't merge if we don't have a transform for this message.
if message_deps is None:
return False
# As a special case, if a transform exists but has no
# dependecies, it's a hardcoded `FTL.Node` which doesn't
# migrate any existing translation but rather creates a new
# one. Merge it.
if len(message_deps) == 0:
return True
# If the intersection of the dependencies and the current
# changeset is non-empty, merge this message.
return message_deps & changeset
# Merge legacy translations with the existing ones using the
# reference as a template.
snapshot = merge_resource(
self, reference, current, transforms, in_changeset
)
# If none of the transforms is in the given changeset, the merged
# snapshot is identical to the current translation. We compare
# JSON trees rather then use filtering by `in_changeset` to account
# for translations removed from `reference`.
if snapshot.to_json() == current.to_json():
continue
# Store the merged snapshot on the context so that the next merge
# already takes it into account as the existing localization.
self.localization_resources[path] = snapshot
# The result for this path is a complete `FTL.Resource`.
yield path, snapshot
def serialize_changeset(self, changeset):
"""Return a dict of serialized FTLs for the changeset.
Given `changeset`, return a dict whose keys are resource paths and
values are serialized FTL snapshots.
"""
return {
path: self.fluent_serializer.serialize(snapshot)
for path, snapshot in self.merge_changeset(changeset)
}
logging.basicConfig()

35
third_party/python/fluent/fluent/migrate/helpers.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,35 @@
# coding=utf8
"""Fluent AST helpers.
The functions defined in this module offer a shorthand for defining common AST
nodes.
They take a string argument and immediately return a corresponding AST node.
(As opposed to Transforms which are AST nodes on their own and only return the
migrated AST nodes when they are evaluated by a MergeContext.) """
from __future__ import unicode_literals
import fluent.syntax.ast as FTL
def LITERAL(value):
"""Create a Pattern with a single TextElement."""
elements = [FTL.TextElement(value)]
return FTL.Pattern(elements)
def EXTERNAL_ARGUMENT(name):
"""Create an ExternalArgument expression."""
return FTL.ExternalArgument(
id=FTL.Identifier(name)
)
def MESSAGE_REFERENCE(name):
"""Create a MessageReference expression."""
return FTL.MessageReference(
id=FTL.Identifier(name)
)

58
third_party/python/fluent/fluent/migrate/merge.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,58 @@
# coding=utf8
from __future__ import unicode_literals
import fluent.syntax.ast as FTL
from .transforms import evaluate
from .util import get_message, get_transform
def merge_resource(ctx, reference, current, transforms, in_changeset):
"""Transform legacy translations into FTL.
Use the `reference` FTL AST as a template. For each en-US string in the
reference, first check if it's in the currently processed changeset with
`in_changeset`; then check for an existing translation in the current FTL
`localization` or for a migration specification in `transforms`.
"""
def merge_body(body):
return [
entry
for entry in map(merge_entry, body)
if entry is not None
]
def merge_entry(entry):
# All standalone comments will be merged.
if isinstance(entry, FTL.Comment):
return entry
# All section headers will be merged.
if isinstance(entry, FTL.Section):
return entry
# Ignore Junk
if isinstance(entry, FTL.Junk):
return None
ident = entry.id.name
# If the message is present in the existing localization, we add it to
# the resulting resource. This ensures consecutive merges don't remove
# translations but rather create supersets of them.
existing = get_message(current.body, ident)
if existing is not None:
return existing
transform = get_transform(transforms, ident)
# Make sure this message is supposed to be migrated as part of the
# current changeset.
if transform is not None and in_changeset(ident):
if transform.comment is None:
transform.comment = entry.comment
return evaluate(ctx, transform)
body = merge_body(reference.body)
return FTL.Resource(body, reference.comment)

315
third_party/python/fluent/fluent/migrate/transforms.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,315 @@
# coding=utf8
"""Migration Transforms.
Transforms are AST nodes which describe how legacy translations should be
migrated. They are created inert and only return the migrated AST nodes when
they are evaluated by a MergeContext.
All Transforms evaluate to Fluent Patterns. This makes them suitable for
defining migrations of values of message, attributes and variants. The special
CONCAT Transform is capable of joining multiple Patterns returned by evaluating
other Transforms into a single Pattern. It can also concatenate Fluent
Expressions, like MessageReferences and ExternalArguments.
The COPY, REPLACE and PLURALS Transforms inherit from Source which is a special
AST Node defining the location (the file path and the id) of the legacy
translation. During the migration, the current MergeContext scans the
migration spec for Source nodes and extracts the information about all legacy
translations being migrated. Thus,
COPY('file.dtd', 'hello')
is equivalent to:
LITERAL(Source('file.dtd', 'hello'))
where LITERAL is a helper defined in the helpers.py module for creating Fluent
Patterns from the text passed as the argument.
The LITERAL helper and the special REPLACE_IN_TEXT Transforms are useful for
working with text rather than (path, key) source definitions. This is the case
when the migrated translation requires some hardcoded text, e.g. <a> and </a>
when multiple translations become a single one with a DOM overlay.
FTL.Message(
id=FTL.Identifier('update-failed'),
value=CONCAT(
COPY('aboutDialog.dtd', 'update.failed.start'),
LITERAL('<a>'),
COPY('aboutDialog.dtd', 'update.failed.linkText'),
LITERAL('</a>'),
COPY('aboutDialog.dtd', 'update.failed.end'),
)
)
The REPLACE_IN_TEXT Transform also takes text as input, making in possible to
pass it as the foreach function of the PLURALS Transform. In this case, each
slice of the plural string will be run through a REPLACE_IN_TEXT operation.
Those slices are strings, so a REPLACE(path, key, ) isn't suitable for them.
FTL.Message(
FTL.Identifier('delete-all'),
value=PLURALS(
'aboutDownloads.dtd',
'deleteAll',
EXTERNAL_ARGUMENT('num'),
lambda text: REPLACE_IN_TEXT(
text,
{
'#1': EXTERNAL_ARGUMENT('num')
}
)
)
)
"""
from __future__ import unicode_literals
import fluent.syntax.ast as FTL
from .helpers import LITERAL
def evaluate(ctx, node):
def eval_node(subnode):
if isinstance(subnode, Transform):
return subnode(ctx)
else:
return subnode
return node.traverse(eval_node)
class Transform(FTL.BaseNode):
def __call__(self, ctx):
raise NotImplementedError
class Source(Transform):
"""Declare the source translation to be migrated with other transforms.
When evaluated `Source` returns a simple string value. All \\uXXXX from
the original translations are converted beforehand to the literal
characters they encode.
HTML entities are left unchanged for now because we can't know if they
should be converted to the characters they represent or not. Consider the
following example in which `&amp;` could be replaced with the literal `&`:
Privacy &amp; History
vs. these two examples where the HTML encoding should be preserved:
Erreur&nbsp;!
Use /help &lt;command&gt; for more information.
"""
# XXX Perhaps there's a strict subset of HTML entities which must or must
# not be replaced?
def __init__(self, path, key):
self.path = path
self.key = key
def __call__(self, ctx):
return ctx.get_source(self.path, self.key)
class COPY(Source):
"""Create a Pattern with the translation value from the given source."""
def __call__(self, ctx):
source = super(self.__class__, self).__call__(ctx)
return LITERAL(source)
class REPLACE_IN_TEXT(Transform):
"""Replace various placeables in the translation with FTL placeables.
The original placeables are defined as keys on the `replacements` dict.
For each key the value is defined as a list of FTL Expressions to be
interpolated.
"""
def __init__(self, value, replacements):
self.value = value
self.replacements = replacements
def __call__(self, ctx):
# Only replace placeable which are present in the translation.
replacements = {
key: evaluate(ctx, repl)
for key, repl in self.replacements.iteritems()
if key in self.value
}
# Order the original placeables by their position in the translation.
keys_in_order = sorted(
replacements.keys(),
lambda x, y: self.value.find(x) - self.value.find(y)
)
# Used to reduce the `keys_in_order` list.
def replace(acc, cur):
"""Convert original placeables and text into FTL Nodes.
For each original placeable the translation will be partitioned
around it and the text before it will be converted into an
`FTL.TextElement` and the placeable will be replaced with its
replacement. The text following the placebale will be fed again to
the `replace` function.
"""
parts, rest = acc
before, key, after = rest.value.partition(cur)
placeable = FTL.Placeable(replacements[key])
# Return the elements found and converted so far, and the remaining
# text which hasn't been scanned for placeables yet.
return (
parts + [FTL.TextElement(before), placeable],
FTL.TextElement(after)
)
def is_non_empty(elem):
"""Used for filtering empty `FTL.TextElement` nodes out."""
return not isinstance(elem, FTL.TextElement) or len(elem.value)
# Start with an empty list of elements and the original translation.
init = ([], FTL.TextElement(self.value))
parts, tail = reduce(replace, keys_in_order, init)
# Explicitly concat the trailing part to get the full list of elements
# and filter out the empty ones.
elements = filter(is_non_empty, parts + [tail])
return FTL.Pattern(elements)
class REPLACE(Source):
"""Create a Pattern with interpolations from given source.
Interpolations in the translation value from the given source will be
replaced with FTL placeables using the `REPLACE_IN_TEXT` transform.
"""
def __init__(self, path, key, replacements):
super(self.__class__, self).__init__(path, key)
self.replacements = replacements
def __call__(self, ctx):
value = super(self.__class__, self).__call__(ctx)
return REPLACE_IN_TEXT(value, self.replacements)(ctx)
class PLURALS(Source):
"""Create a Pattern with plurals from given source.
Build an `FTL.SelectExpression` with the supplied `selector` and variants
extracted from the source. The source needs to be a semicolon-separated
list of variants. Each variant will be run through the `foreach` function,
which should return an `FTL.Node` or a `Transform`.
"""
def __init__(self, path, key, selector, foreach=LITERAL):
super(self.__class__, self).__init__(path, key)
self.selector = selector
self.foreach = foreach
def __call__(self, ctx):
value = super(self.__class__, self).__call__(ctx)
selector = evaluate(ctx, self.selector)
variants = value.split(';')
keys = ctx.plural_categories
last_index = min(len(variants), len(keys)) - 1
def createVariant(zipped_enum):
index, (key, variant) = zipped_enum
# Run the legacy variant through `foreach` which returns an
# `FTL.Node` describing the transformation required for each
# variant. Then evaluate it to a migrated FTL node.
value = evaluate(ctx, self.foreach(variant))
return FTL.Variant(
key=FTL.Symbol(key),
value=value,
default=index == last_index
)
select = FTL.SelectExpression(
expression=selector,
variants=map(createVariant, enumerate(zip(keys, variants)))
)
placeable = FTL.Placeable(select)
return FTL.Pattern([placeable])
class CONCAT(Transform):
"""Concatenate elements of many patterns."""
def __init__(self, *patterns):
self.patterns = list(patterns)
def __call__(self, ctx):
# Flatten the list of patterns of which each has a list of elements.
def concat_elements(acc, cur):
if isinstance(cur, FTL.Pattern):
acc.extend(cur.elements)
return acc
elif (isinstance(cur, FTL.TextElement) or
isinstance(cur, FTL.Placeable)):
acc.append(cur)
return acc
raise RuntimeError(
'CONCAT accepts FTL Patterns and Expressions.'
)
# Merge adjecent `FTL.TextElement` nodes.
def merge_adjecent_text(acc, cur):
if type(cur) == FTL.TextElement and len(acc):
last = acc[-1]
if type(last) == FTL.TextElement:
last.value += cur.value
else:
acc.append(cur)
else:
acc.append(cur)
return acc
elements = reduce(concat_elements, self.patterns, [])
elements = reduce(merge_adjecent_text, elements, [])
return FTL.Pattern(elements)
def traverse(self, fun):
def visit(value):
if isinstance(value, FTL.BaseNode):
return value.traverse(fun)
if isinstance(value, list):
return fun(map(visit, value))
else:
return fun(value)
node = self.__class__(
*[
visit(value) for value in self.patterns
]
)
return fun(node)
def to_json(self):
def to_json(value):
if isinstance(value, FTL.BaseNode):
return value.to_json()
else:
return value
return {
'type': self.__class__.__name__,
'patterns': [
to_json(value) for value in self.patterns
]
}

56
third_party/python/fluent/fluent/migrate/util.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,56 @@
# coding=utf8
from __future__ import unicode_literals
import fluent.syntax.ast as FTL
from fluent.syntax.parser import FluentParser
from fluent.util import ftl
fluent_parser = FluentParser(with_spans=False)
def parse(Parser, string):
if Parser is FluentParser:
return fluent_parser.parse(string)
# Parsing a legacy resource.
# Parse the string into the internal Context.
parser = Parser()
# compare-locales expects ASCII strings.
parser.readContents(string.encode('utf8'))
# Transform the parsed result which is an iterator into a dict.
return {ent.key: ent for ent in parser}
def ftl_resource_to_ast(code):
return fluent_parser.parse(ftl(code))
def ftl_resource_to_json(code):
return fluent_parser.parse(ftl(code)).to_json()
def ftl_message_to_json(code):
return fluent_parser.parse_entry(ftl(code)).to_json()
def to_json(merged_iter):
return {
path: resource.to_json()
for path, resource in merged_iter
}
def get_message(body, ident):
"""Get message called `ident` from the `body` iterable."""
for entity in body:
if isinstance(entity, FTL.Message) and entity.id.name == ident:
return entity
def get_transform(body, ident):
"""Get entity called `ident` from the `body` iterable."""
for transform in body:
if transform.id.name == ident:
return transform

12
third_party/python/fluent/fluent/syntax/__init__.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,12 @@
from .parser import FluentParser
from .serializer import FluentSerializer
def parse(source, **kwargs):
parser = FluentParser(**kwargs)
return parser.parse(source)
def serialize(resource, **kwargs):
serializer = FluentSerializer(**kwargs)
return serializer.serialize(resource)

309
third_party/python/fluent/fluent/syntax/ast.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,309 @@
from __future__ import unicode_literals
import sys
import json
def to_json(value):
if isinstance(value, BaseNode):
return value.to_json()
if isinstance(value, list):
return list(map(to_json, value))
else:
return value
def from_json(value):
if isinstance(value, dict):
cls = getattr(sys.modules[__name__], value['type'])
args = {
k: from_json(v)
for k, v in value.items()
if k != 'type'
}
return cls(**args)
if isinstance(value, list):
return list(map(from_json, value))
else:
return value
def scalars_equal(node1, node2, ignored_fields):
"""Compare two nodes which are not lists."""
if type(node1) != type(node2):
return False
if isinstance(node1, BaseNode):
return node1.equals(node2, ignored_fields)
return node1 == node2
class BaseNode(object):
"""Base class for all Fluent AST nodes.
All productions described in the ASDL subclass BaseNode, including Span and
Annotation. Implements __str__, to_json and traverse.
"""
def traverse(self, fun):
"""Postorder-traverse this node and apply `fun` to all child nodes.
Traverse this node depth-first applying `fun` to subnodes and leaves.
Children are processed before parents (postorder traversal).
Return a new instance of the node.
"""
def visit(value):
"""Call `fun` on `value` and its descendants."""
if isinstance(value, BaseNode):
return value.traverse(fun)
if isinstance(value, list):
return fun(list(map(visit, value)))
else:
return fun(value)
node = self.__class__(
**{
name: visit(value)
for name, value in vars(self).items()
}
)
return fun(node)
def equals(self, other, ignored_fields=['span']):
"""Compare two nodes.
Nodes are deeply compared on a field by field basis. If possible, False
is returned early. When comparing attributes, tags and variants in
SelectExpressions, the order doesn't matter. By default, spans are not
taken into account.
"""
self_keys = set(vars(self).keys())
other_keys = set(vars(other).keys())
if ignored_fields:
for key in ignored_fields:
self_keys.discard(key)
other_keys.discard(key)
if self_keys != other_keys:
return False
for key in self_keys:
field1 = getattr(self, key)
field2 = getattr(other, key)
# List-typed nodes are compared item-by-item. When comparing
# attributes, tags and variants, the order of items doesn't matter.
if isinstance(field1, list) and isinstance(field2, list):
if len(field1) != len(field2):
return False
# These functions are used to sort lists of items for when
# order doesn't matter. Annotations are also lists but they
# can't be keyed on any of their fields reliably.
field_sorting = {
'attributes': lambda elem: elem.id.name,
'tags': lambda elem: elem.name.name,
'variants': lambda elem: elem.key.name,
}
if key in field_sorting:
sorting = field_sorting[key]
field1 = sorted(field1, key=sorting)
field2 = sorted(field2, key=sorting)
for elem1, elem2 in zip(field1, field2):
if not scalars_equal(elem1, elem2, ignored_fields):
return False
elif not scalars_equal(field1, field2, ignored_fields):
return False
return True
def to_json(self):
obj = {
name: to_json(value)
for name, value in vars(self).items()
}
obj.update(
{'type': self.__class__.__name__}
)
return obj
def __str__(self):
return json.dumps(self.to_json())
class SyntaxNode(BaseNode):
"""Base class for AST nodes which can have Spans."""
def __init__(self, span=None, **kwargs):
super(SyntaxNode, self).__init__(**kwargs)
self.span = span
def add_span(self, start, end):
self.span = Span(start, end)
class Resource(SyntaxNode):
def __init__(self, body=None, comment=None, **kwargs):
super(Resource, self).__init__(**kwargs)
self.body = body or []
self.comment = comment
class Entry(SyntaxNode):
def __init__(self, annotations=None, **kwargs):
super(Entry, self).__init__(**kwargs)
self.annotations = annotations or []
def add_annotation(self, annot):
self.annotations.append(annot)
class Message(Entry):
def __init__(self, id, value=None, attributes=None, tags=None,
comment=None, **kwargs):
super(Message, self).__init__(**kwargs)
self.id = id
self.value = value
self.attributes = attributes or []
self.tags = tags or []
self.comment = comment
class Pattern(SyntaxNode):
def __init__(self, elements, **kwargs):
super(Pattern, self).__init__(**kwargs)
self.elements = elements
class TextElement(SyntaxNode):
def __init__(self, value, **kwargs):
super(TextElement, self).__init__(**kwargs)
self.value = value
class Placeable(SyntaxNode):
def __init__(self, expression, **kwargs):
super(Placeable, self).__init__(**kwargs)
self.expression = expression
class Expression(SyntaxNode):
def __init__(self, **kwargs):
super(Expression, self).__init__(**kwargs)
class StringExpression(Expression):
def __init__(self, value, **kwargs):
super(StringExpression, self).__init__(**kwargs)
self.value = value
class NumberExpression(Expression):
def __init__(self, value, **kwargs):
super(NumberExpression, self).__init__(**kwargs)
self.value = value
class MessageReference(Expression):
def __init__(self, id, **kwargs):
super(MessageReference, self).__init__(**kwargs)
self.id = id
class ExternalArgument(Expression):
def __init__(self, id, **kwargs):
super(ExternalArgument, self).__init__(**kwargs)
self.id = id
class SelectExpression(Expression):
def __init__(self, expression, variants, **kwargs):
super(SelectExpression, self).__init__(**kwargs)
self.expression = expression
self.variants = variants
class AttributeExpression(Expression):
def __init__(self, id, name, **kwargs):
super(AttributeExpression, self).__init__(**kwargs)
self.id = id
self.name = name
class VariantExpression(Expression):
def __init__(self, id, key, **kwargs):
super(VariantExpression, self).__init__(**kwargs)
self.id = id
self.key = key
class CallExpression(Expression):
def __init__(self, callee, args, **kwargs):
super(CallExpression, self).__init__(**kwargs)
self.callee = callee
self.args = args
class Attribute(SyntaxNode):
def __init__(self, id, value, **kwargs):
super(Attribute, self).__init__(**kwargs)
self.id = id
self.value = value
class Tag(SyntaxNode):
def __init__(self, name, **kwargs):
super(Tag, self).__init__(**kwargs)
self.name = name
class Variant(SyntaxNode):
def __init__(self, key, value, default=False, **kwargs):
super(Variant, self).__init__(**kwargs)
self.key = key
self.value = value
self.default = default
class NamedArgument(SyntaxNode):
def __init__(self, name, val, **kwargs):
super(NamedArgument, self).__init__(**kwargs)
self.name = name
self.val = val
class Identifier(SyntaxNode):
def __init__(self, name, **kwargs):
super(Identifier, self).__init__(**kwargs)
self.name = name
class Symbol(Identifier):
def __init__(self, name, **kwargs):
super(Symbol, self).__init__(name, **kwargs)
class Comment(Entry):
def __init__(self, content=None, **kwargs):
super(Comment, self).__init__(**kwargs)
self.content = content
class Section(Entry):
def __init__(self, name, comment=None, **kwargs):
super(Section, self).__init__(**kwargs)
self.name = name
self.comment = comment
class Function(Identifier):
def __init__(self, name, **kwargs):
super(Function, self).__init__(name, **kwargs)
class Junk(Entry):
def __init__(self, content=None, **kwargs):
super(Junk, self).__init__(**kwargs)
self.content = content
class Span(BaseNode):
def __init__(self, start, end, **kwargs):
super(Span, self).__init__(**kwargs)
self.start = start
self.end = end
class Annotation(SyntaxNode):
def __init__(self, code, args=None, message=None, **kwargs):
super(Annotation, self).__init__(**kwargs)
self.code = code
self.args = args or []
self.message = message

43
third_party/python/fluent/fluent/syntax/errors.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,43 @@
from __future__ import unicode_literals
class ParseError(Exception):
def __init__(self, code, *args):
self.code = code
self.args = args
self.message = get_error_message(code, args)
def get_error_message(code, args):
if code == 'E00001':
return 'Generic error'
if code == 'E0002':
return 'Expected an entry start'
if code == 'E0003':
return 'Expected token: "{}"'.format(args[0])
if code == 'E0004':
return 'Expected a character from range: "{}"'.format(args[0])
if code == 'E0005':
msg = 'Expected entry "{}" to have a value or attributes'
return msg.format(args[0])
if code == 'E0006':
return 'Expected field: "{}"'.format(args[0])
if code == 'E0007':
return 'Keyword cannot end with a whitespace'
if code == 'E0008':
return 'Callee has to be a simple identifier'
if code == 'E0009':
return 'Key has to be a simple identifier'
if code == 'E0010':
return 'Expected one of the variants to be marked as default (*)'
if code == 'E0011':
return 'Expected at least one variant after "->"'
if code == 'E0012':
return 'Tags cannot be added to messages with attributes'
if code == 'E0013':
return 'Expected variant key'
if code == 'E0014':
return 'Expected literal'
if code == 'E0015':
return 'Only one variant can be marked as default (*)'
return code

208
third_party/python/fluent/fluent/syntax/ftlstream.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,208 @@
from __future__ import unicode_literals
from .stream import ParserStream
from .errors import ParseError
INLINE_WS = (' ', '\t')
class FTLParserStream(ParserStream):
def peek_inline_ws(self):
ch = self.current_peek()
while ch:
if ch not in INLINE_WS:
break
ch = self.peek()
def skip_blank_lines(self):
while True:
self.peek_inline_ws()
if self.current_peek_is('\n'):
self.skip_to_peek()
self.next()
else:
self.reset_peek()
break
def skip_inline_ws(self):
while self.ch:
if self.ch not in INLINE_WS:
break
self.next()
def expect_char(self, ch):
if self.ch == ch:
self.next()
return True
if ch == '\n':
# Unicode Character 'SYMBOL FOR NEWLINE' (U+2424)
raise ParseError('E0003', '\u2424')
raise ParseError('E0003', ch)
def take_char_if(self, ch):
if self.ch == ch:
self.next()
return True
return False
def take_char(self, f):
ch = self.ch
if ch is not None and f(ch):
self.next()
return ch
return None
def is_id_start(self):
if self.ch is None:
return False
cc = ord(self.ch)
return (cc >= 97 and cc <= 122) or \
(cc >= 65 and cc <= 90) or \
cc == 95
def is_number_start(self):
cc = ord(self.ch)
return (cc >= 48 and cc <= 57) or cc == 45
def is_peek_next_line_variant_start(self):
if not self.current_peek_is('\n'):
return False
self.peek()
ptr = self.get_peek_index()
self.peek_inline_ws()
if (self.get_peek_index() - ptr == 0):
self.reset_peek()
return False
if self.current_peek_is('*'):
self.peek()
if self.current_peek_is('[') and not self.peek_char_is('['):
self.reset_peek()
return True
self.reset_peek()
return False
def is_peek_next_line_attribute_start(self):
if not self.current_peek_is('\n'):
return False
self.peek()
ptr = self.get_peek_index()
self.peek_inline_ws()
if (self.get_peek_index() - ptr == 0):
self.reset_peek()
return False
if self.current_peek_is('.'):
self.reset_peek()
return True
self.reset_peek()
return False
def is_peek_next_line_pattern(self):
if not self.current_peek_is('\n'):
return False
self.peek()
ptr = self.get_peek_index()
self.peek_inline_ws()
if (self.get_peek_index() - ptr == 0):
self.reset_peek()
return False
if (self.current_peek_is('}') or
self.current_peek_is('.') or
self.current_peek_is('#') or
self.current_peek_is('[') or
self.current_peek_is('*')):
self.reset_peek()
return False
self.reset_peek()
return True
def is_peek_next_line_tag_start(self):
if not self.current_peek_is('\n'):
return False
self.peek()
ptr = self.get_peek_index()
self.peek_inline_ws()
if (self.get_peek_index() - ptr == 0):
self.reset_peek()
return False
if self.current_peek_is('#'):
self.reset_peek()
return True
self.reset_peek()
return False
def skip_to_next_entry_start(self):
while self.ch:
if self.current_is('\n') and not self.peek_char_is('\n'):
self.next()
if self.ch is None or self.is_id_start() or \
(self.current_is('/') and self.peek_char_is('/')) or \
(self.current_is('[') and self.peek_char_is('[')):
break
self.next()
def take_id_start(self):
if self.is_id_start():
ret = self.ch
self.next()
return ret
raise ParseError('E0004', 'a-zA-Z_')
def take_id_char(self):
def closure(ch):
cc = ord(ch)
return ((cc >= 97 and cc <= 122) or
(cc >= 65 and cc <= 90) or
(cc >= 48 and cc <= 57) or
cc == 95 or cc == 45)
return self.take_char(closure)
def take_symb_char(self):
def closure(ch):
if ch is None:
return False
cc = ord(ch)
return (cc >= 97 and cc <= 122) or \
(cc >= 65 and cc <= 90) or \
(cc >= 48 and cc <= 57) or \
cc == 95 or cc == 45 or cc == 32
return self.take_char(closure)
def take_digit(self):
def closure(ch):
cc = ord(ch)
return (cc >= 48 and cc <= 57)
return self.take_char(closure)

579
third_party/python/fluent/fluent/syntax/parser.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,579 @@
from __future__ import unicode_literals
import re
from .ftlstream import FTLParserStream
from . import ast
from .errors import ParseError
def with_span(fn):
def decorated(self, ps, *args):
if not self.with_spans:
return fn(self, ps, *args)
start = ps.get_index()
node = fn(self, ps, *args)
# Don't re-add the span if the node already has it. This may happen
# when one decorated function calls another decorated function.
if node.span is not None:
return node
# Spans of Messages and Sections should include the attached Comment.
if isinstance(node, ast.Message) or isinstance(node, ast.Section):
if node.comment is not None:
start = node.comment.span.start
end = ps.get_index()
node.add_span(start, end)
return node
return decorated
class FluentParser(object):
def __init__(self, with_spans=True):
self.with_spans = with_spans
def parse(self, source):
comment = None
ps = FTLParserStream(source)
ps.skip_blank_lines()
entries = []
while ps.current():
entry = self.get_entry_or_junk(ps)
if isinstance(entry, ast.Comment) and len(entries) == 0:
comment = entry
else:
entries.append(entry)
ps.skip_blank_lines()
res = ast.Resource(entries, comment)
if self.with_spans:
res.add_span(0, ps.get_index())
return res
def parse_entry(self, source):
ps = FTLParserStream(source)
ps.skip_blank_lines()
return self.get_entry_or_junk(ps)
def get_entry_or_junk(self, ps):
entry_start_pos = ps.get_index()
try:
return self.get_entry(ps)
except ParseError as err:
error_index = ps.get_index()
ps.skip_to_next_entry_start()
next_entry_start = ps.get_index()
# Create a Junk instance
slice = ps.get_slice(entry_start_pos, next_entry_start)
junk = ast.Junk(slice)
if self.with_spans:
junk.add_span(entry_start_pos, next_entry_start)
annot = ast.Annotation(err.code, err.args, err.message)
annot.add_span(error_index, error_index)
junk.add_annotation(annot)
return junk
def get_entry(self, ps):
comment = None
if ps.current_is('/'):
comment = self.get_comment(ps)
if ps.current_is('['):
return self.get_section(ps, comment)
if ps.is_id_start():
return self.get_message(ps, comment)
if comment:
return comment
raise ParseError('E0002')
@with_span
def get_comment(self, ps):
ps.expect_char('/')
ps.expect_char('/')
ps.take_char_if(' ')
content = ''
def until_eol(x):
return x != '\n'
while True:
ch = ps.take_char(until_eol)
while ch:
content += ch
ch = ps.take_char(until_eol)
ps.next()
if ps.current_is('/'):
content += '\n'
ps.next()
ps.expect_char('/')
ps.take_char_if(' ')
else:
break
return ast.Comment(content)
@with_span
def get_section(self, ps, comment):
ps.expect_char('[')
ps.expect_char('[')
ps.skip_inline_ws()
symb = self.get_symbol(ps)
ps.skip_inline_ws()
ps.expect_char(']')
ps.expect_char(']')
ps.skip_inline_ws()
ps.expect_char('\n')
return ast.Section(symb, comment)
@with_span
def get_message(self, ps, comment):
id = self.get_identifier(ps)
ps.skip_inline_ws()
pattern = None
attrs = None
tags = None
if ps.current_is('='):
ps.next()
ps.skip_inline_ws()
pattern = self.get_pattern(ps)
if ps.is_peek_next_line_attribute_start():
attrs = self.get_attributes(ps)
if ps.is_peek_next_line_tag_start():
if attrs is not None:
raise ParseError('E0012')
tags = self.get_tags(ps)
if pattern is None and attrs is None:
raise ParseError('E0005', id.name)
return ast.Message(id, pattern, attrs, tags, comment)
@with_span
def get_attribute(self, ps):
ps.expect_char('.')
key = self.get_identifier(ps)
ps.skip_inline_ws()
ps.expect_char('=')
ps.skip_inline_ws()
value = self.get_pattern(ps)
if value is None:
raise ParseError('E0006', 'value')
return ast.Attribute(key, value)
def get_attributes(self, ps):
attrs = []
while True:
ps.expect_char('\n')
ps.skip_inline_ws()
attr = self.get_attribute(ps)
attrs.append(attr)
if not ps.is_peek_next_line_attribute_start():
break
return attrs
@with_span
def get_tag(self, ps):
ps.expect_char('#')
symb = self.get_symbol(ps)
return ast.Tag(symb)
def get_tags(self, ps):
tags = []
while True:
ps.expect_char('\n')
ps.skip_inline_ws()
tag = self.get_tag(ps)
tags.append(tag)
if not ps.is_peek_next_line_tag_start():
break
return tags
@with_span
def get_identifier(self, ps):
name = ''
name += ps.take_id_start()
ch = ps.take_id_char()
while ch:
name += ch
ch = ps.take_id_char()
return ast.Identifier(name)
def get_variant_key(self, ps):
ch = ps.current()
if ch is None:
raise ParseError('E0013')
if ps.is_number_start():
return self.get_number(ps)
return self.get_symbol(ps)
@with_span
def get_variant(self, ps, has_default):
default_index = False
if ps.current_is('*'):
if has_default:
raise ParseError('E0015')
ps.next()
default_index = True
ps.expect_char('[')
key = self.get_variant_key(ps)
ps.expect_char(']')
ps.skip_inline_ws()
value = self.get_pattern(ps)
if value is None:
raise ParseError('E0006', 'value')
return ast.Variant(key, value, default_index)
def get_variants(self, ps):
variants = []
has_default = False
while True:
ps.expect_char('\n')
ps.skip_inline_ws()
variant = self.get_variant(ps, has_default)
if variant.default:
has_default = True
variants.append(variant)
if not ps.is_peek_next_line_variant_start():
break
if not has_default:
raise ParseError('E0010')
return variants
@with_span
def get_symbol(self, ps):
name = ''
name += ps.take_id_start()
while True:
ch = ps.take_symb_char()
if ch:
name += ch
else:
break
return ast.Symbol(name.rstrip())
def get_digits(self, ps):
num = ''
ch = ps.take_digit()
while ch:
num += ch
ch = ps.take_digit()
if len(num) == 0:
raise ParseError('E0004', '0-9')
return num
@with_span
def get_number(self, ps):
num = ''
if ps.current_is('-'):
num += '-'
ps.next()
num += self.get_digits(ps)
if ps.current_is('.'):
num += '.'
ps.next()
num += self.get_digits(ps)
return ast.NumberExpression(num)
@with_span
def get_pattern(self, ps):
elements = []
ps.skip_inline_ws()
# Special-case: trim leading whitespace and newlines.
if ps.is_peek_next_line_pattern():
ps.skip_blank_lines()
ps.skip_inline_ws()
while ps.current():
ch = ps.current()
# The end condition for get_pattern's while loop is a newline
# which is not followed by a valid pattern continuation.
if ch == '\n' and not ps.is_peek_next_line_pattern():
break
if ch == '{':
element = self.get_placeable(ps)
else:
element = self.get_text_element(ps)
elements.append(element)
return ast.Pattern(elements)
@with_span
def get_text_element(self, ps):
buf = ''
while ps.current():
ch = ps.current()
if ch == '{':
return ast.TextElement(buf)
if ch == '\n':
if not ps.is_peek_next_line_pattern():
return ast.TextElement(buf)
ps.next()
ps.skip_inline_ws()
# Add the new line to the buffer
buf += ch
continue
if ch == '\\':
ch2 = ps.next()
if ch2 == '{' or ch2 == '"':
buf += ch2
else:
buf += ch + ch2
else:
buf += ch
ps.next()
return ast.TextElement(buf)
@with_span
def get_placeable(self, ps):
ps.expect_char('{')
expression = self.get_expression(ps)
ps.expect_char('}')
return ast.Placeable(expression)
@with_span
def get_expression(self, ps):
if ps.is_peek_next_line_variant_start():
variants = self.get_variants(ps)
ps.expect_char('\n')
ps.expect_char(' ')
ps.skip_inline_ws()
return ast.SelectExpression(None, variants)
ps.skip_inline_ws()
selector = self.get_selector_expression(ps)
ps.skip_inline_ws()
if ps.current_is('-'):
ps.peek()
if not ps.current_peek_is('>'):
ps.reset_peek()
else:
ps.next()
ps.next()
ps.skip_inline_ws()
variants = self.get_variants(ps)
if len(variants) == 0:
raise ParseError('E0011')
ps.expect_char('\n')
ps.expect_char(' ')
ps.skip_inline_ws()
return ast.SelectExpression(selector, variants)
return selector
@with_span
def get_selector_expression(self, ps):
literal = self.get_literal(ps)
if not isinstance(literal, ast.MessageReference):
return literal
ch = ps.current()
if (ch == '.'):
ps.next()
attr = self.get_identifier(ps)
return ast.AttributeExpression(literal.id, attr)
if (ch == '['):
ps.next()
key = self.get_variant_key(ps)
ps.expect_char(']')
return ast.VariantExpression(literal.id, key)
if (ch == '('):
ps.next()
args = self.get_call_args(ps)
ps.expect_char(')')
if not re.match('^[A-Z_-]+$', literal.id.name):
raise ParseError('E0008')
return ast.CallExpression(
ast.Function(literal.id.name),
args
)
return literal
@with_span
def get_call_arg(self, ps):
exp = self.get_selector_expression(ps)
ps.skip_inline_ws()
if not ps.current_is(':'):
return exp
if not isinstance(exp, ast.MessageReference):
raise ParseError('E0009')
ps.next()
ps.skip_inline_ws()
val = self.get_arg_val(ps)
return ast.NamedArgument(exp.id, val)
def get_call_args(self, ps):
args = []
ps.skip_inline_ws()
while True:
if ps.current_is(')'):
break
arg = self.get_call_arg(ps)
args.append(arg)
ps.skip_inline_ws()
if ps.current_is(','):
ps.next()
ps.skip_inline_ws()
continue
else:
break
return args
def get_arg_val(self, ps):
if ps.is_number_start():
return self.get_number(ps)
elif ps.current_is('"'):
return self.get_string(ps)
raise ParseError('E0006', 'value')
@with_span
def get_string(self, ps):
val = ''
ps.expect_char('"')
ch = ps.take_char(lambda x: x != '"')
while ch:
val += ch
ch = ps.take_char(lambda x: x != '"')
ps.next()
return ast.StringExpression(val)
@with_span
def get_literal(self, ps):
ch = ps.current()
if ch is None:
raise ParseError('E0014')
if ps.is_number_start():
return self.get_number(ps)
elif ch == '"':
return self.get_string(ps)
elif ch == '$':
ps.next()
name = self.get_identifier(ps)
return ast.ExternalArgument(name)
name = self.get_identifier(ps)
return ast.MessageReference(name)

273
third_party/python/fluent/fluent/syntax/serializer.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,273 @@
from __future__ import unicode_literals
from . import ast
def indent(content):
return " ".join(
content.splitlines(True)
)
def contain_new_line(elems):
return bool([
elem for elem in elems
if isinstance(elem, ast.TextElement) and "\n" in elem.value
])
class FluentSerializer(object):
def __init__(self, with_junk=False):
self.with_junk = with_junk
def serialize(self, resource):
parts = []
if resource.comment:
parts.append(
"{}\n\n".format(
serialize_comment(resource.comment)
)
)
for entry in resource.body:
if not isinstance(entry, ast.Junk) or self.with_junk:
parts.append(self.serialize_entry(entry))
return "".join(parts)
def serialize_entry(self, entry):
if isinstance(entry, ast.Message):
return serialize_message(entry)
if isinstance(entry, ast.Section):
return serialize_section(entry)
if isinstance(entry, ast.Comment):
return "\n{}\n\n".format(serialize_comment(entry))
if isinstance(entry, ast.Junk):
return serialize_junk(entry)
raise Exception('Unknown entry type: {}'.format(entry.type))
def serialize_comment(comment):
return "".join([
"{}{}".format("// ", line)
for line in comment.content.splitlines(True)
])
def serialize_section(section):
if section.comment:
return "\n\n{}\n[[ {} ]]\n\n".format(
serialize_comment(section.comment),
serialize_symbol(section.name)
)
else:
return "\n\n[[ {} ]]\n\n".format(
serialize_symbol(section.name)
)
def serialize_junk(junk):
return junk.content
def serialize_message(message):
parts = []
if message.comment:
parts.append(serialize_comment(message.comment))
parts.append("\n")
parts.append(serialize_identifier(message.id))
if message.value:
parts.append(" =")
parts.append(serialize_value(message.value))
if message.tags:
for tag in message.tags:
parts.append(serialize_tag(tag))
if message.attributes:
for attribute in message.attributes:
parts.append(serialize_attribute(attribute))
parts.append("\n")
return ''.join(parts)
def serialize_tag(tag):
return "\n #{}".format(
serialize_symbol(tag.name),
)
def serialize_attribute(attribute):
return "\n .{} ={}".format(
serialize_identifier(attribute.id),
indent(serialize_value(attribute.value))
)
def serialize_value(pattern):
multi = contain_new_line(pattern.elements)
schema = "\n {}" if multi else " {}"
content = serialize_pattern(pattern)
return schema.format(indent(content))
def serialize_pattern(pattern):
return "".join([
serialize_element(elem)
for elem in pattern.elements
])
def serialize_element(element):
if isinstance(element, ast.TextElement):
return serialize_text_element(element)
if isinstance(element, ast.Placeable):
return serialize_placeable(element)
raise Exception('Unknown element type: {}'.format(element.type))
def serialize_text_element(text):
return text.value
def serialize_placeable(placeable):
expr = placeable.expression
if isinstance(expr, ast.Placeable):
return "{{{}}}".format(
serialize_placeable(expr))
if isinstance(expr, ast.SelectExpression):
return "{{{}}}".format(
serialize_select_expression(expr))
if isinstance(expr, ast.Expression):
return "{{ {} }}".format(
serialize_expression(expr))
def serialize_expression(expression):
if isinstance(expression, ast.StringExpression):
return serialize_string_expression(expression)
if isinstance(expression, ast.NumberExpression):
return serialize_number_expression(expression)
if isinstance(expression, ast.MessageReference):
return serialize_message_reference(expression)
if isinstance(expression, ast.ExternalArgument):
return serialize_external_argument(expression)
if isinstance(expression, ast.AttributeExpression):
return serialize_attribute_expression(expression)
if isinstance(expression, ast.VariantExpression):
return serialize_variant_expression(expression)
if isinstance(expression, ast.CallExpression):
return serialize_call_expression(expression)
raise Exception('Unknown expression type: {}'.format(expression.type))
def serialize_string_expression(expr):
return "\"{}\"".format(expr.value)
def serialize_number_expression(expr):
return expr.value
def serialize_message_reference(expr):
return serialize_identifier(expr.id)
def serialize_external_argument(expr):
return "${}".format(serialize_identifier(expr.id))
def serialize_select_expression(expr):
parts = []
if expr.expression:
selector = " {} ->".format(
serialize_expression(expr.expression)
)
parts.append(selector)
for variant in expr.variants:
parts.append(serialize_variant(variant))
parts.append("\n")
return "".join(parts)
def serialize_variant(variant):
return "\n{}[{}]{}".format(
" *" if variant.default else " ",
serialize_variant_key(variant.key),
indent(serialize_value(variant.value))
)
def serialize_attribute_expression(expr):
return "{}.{}".format(
serialize_identifier(expr.id),
serialize_identifier(expr.name),
)
def serialize_variant_expression(expr):
return "{}[{}]".format(
serialize_identifier(expr.id),
serialize_variant_key(expr.key),
)
def serialize_call_expression(expr):
return "{}({})".format(
serialize_function(expr.callee),
", ".join([
serialize_call_argument(arg)
for arg in expr.args
])
)
def serialize_call_argument(arg):
if isinstance(arg, ast.Expression):
return serialize_expression(arg)
if isinstance(arg, ast.NamedArgument):
return serialize_named_argument(arg)
def serialize_named_argument(arg):
return "{}: {}".format(
serialize_identifier(arg.name),
serialize_argument_value(arg.val)
)
def serialize_argument_value(argval):
if isinstance(argval, ast.StringExpression):
return serialize_string_expression(argval)
if isinstance(argval, ast.NumberExpression):
return serialize_number_expression(argval)
raise Exception('Unknown argument type: {}'.format(argval.type))
def serialize_identifier(identifier):
return identifier.name
def serialize_symbol(symbol):
return symbol.name
def serialize_variant_key(key):
if isinstance(key, ast.Symbol):
return serialize_symbol(key)
if isinstance(key, ast.NumberExpression):
return serialize_number_expression(key)
raise Exception('Unknown variant key type: {}'.format(key.type))
def serialize_function(function):
return function.name

120
third_party/python/fluent/fluent/syntax/stream.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,120 @@
from __future__ import unicode_literals
class StringIter():
def __init__(self, source):
self.source = source
self.len = len(source)
self.i = 0
def next(self):
if self.i < self.len:
ret = self.source[self.i]
self.i += 1
return ret
return None
def get_slice(self, start, end):
return self.source[start:end]
class ParserStream():
def __init__(self, string):
self.iter = StringIter(string)
self.buf = []
self.peek_index = 0
self.index = 0
self.ch = None
self.iter_end = False
self.peek_end = False
self.ch = self.iter.next()
def next(self):
if self.iter_end:
return None
if len(self.buf) == 0:
self.ch = self.iter.next()
else:
self.ch = self.buf.pop(0)
self.index += 1
if self.ch == None:
self.iter_end = True
self.peek_end = True
self.peek_index = self.index
return self.ch
def current(self):
return self.ch
def current_is(self, ch):
return self.ch == ch
def current_peek(self):
if self.peek_end:
return None
diff = self.peek_index - self.index
if diff == 0:
return self.ch
return self.buf[diff - 1]
def current_peek_is(self, ch):
return self.current_peek() == ch
def peek(self):
if self.peek_end:
return None
self.peek_index += 1
diff = self.peek_index - self.index
if diff > len(self.buf):
ch = self.iter.next()
if ch is not None:
self.buf.append(ch)
else:
self.peek_end = True
return None
return self.buf[diff - 1]
def get_index(self):
return self.index
def get_peek_index(self):
return self.peek_index
def peek_char_is(self, ch):
if self.peek_end:
return False
ret = self.peek()
self.peek_index -= 1
return ret == ch
def reset_peek(self):
self.peek_index = self.index
self.peek_end = self.iter_end
def skip_to_peek(self):
diff = self.peek_index - self.index
for i in range(0, diff):
self.ch = self.buf.pop(0)
self.index = self.peek_index
def get_slice(self, start, end):
return self.iter.get_slice(start, end)

42
third_party/python/fluent/fluent/util.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,42 @@
# coding=utf8
import textwrap
import fluent.syntax.ast as FTL
def ftl(code):
"""Nicer indentation for FTL code.
The code returned by this function is meant to be compared against the
output of the FTL Serializer. The input code will end with a newline to
match the output of the serializer.
"""
# The code might be triple-quoted.
code = code.lstrip('\n')
return textwrap.dedent(code)
def fold(fun, node, init):
"""Reduce `node` to a single value using `fun`.
Apply `fun` against an accumulator and each subnode of `node` (in postorder
traversal) to reduce it to a single value.
"""
def fold_(vals, acc):
if not vals:
return acc
head = list(vals)[0]
tail = list(vals)[1:]
if isinstance(head, FTL.BaseNode):
acc = fold(fun, head, acc)
if isinstance(head, list):
acc = fold_(head, acc)
return fold_(tail, fun(acc, head))
return fold_(vars(node).values(), init)

4
third_party/python/fluent/setup.cfg поставляемый Normal file
Просмотреть файл

@ -0,0 +1,4 @@
[egg_info]
tag_build =
tag_date = 0

24
third_party/python/fluent/setup.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,24 @@
#!/usr/bin/env python
from setuptools import setup
setup(name='fluent',
version='0.4.2',
description='Localization library for expressive translations.',
author='Mozilla',
author_email='l10n-drivers@mozilla.org',
license='APL 2',
url='https://github.com/projectfluent/python-fluent',
keywords=['fluent', 'localization', 'l10n'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
],
packages=['fluent', 'fluent.syntax', 'fluent.migrate'],
package_data={
'fluent.migrate': ['cldr_data/*']
}
)

Просмотреть файл

@ -205,7 +205,7 @@ ifdef NIGHTLY_BUILD
fi
endif
$(RM) -rf $(REAL_LOCALE_MERGEDIR)
$(MOZILLA_DIR)/mach compare-locales --l10n-base $(L10NBASEDIR) --merge-dir $(REAL_LOCALE_MERGEDIR) $*
$(MOZILLA_DIR)/mach compare-locales --merge $(BASE_MERGE) $(srcdir)/l10n.toml $(L10NBASEDIR) $*
langpack-%: LANGPACK_FILE=$(ABS_DIST)/$(PKG_LANGPACK_PATH)$(PKG_LANGPACK_BASENAME).xpi
langpack-%: AB_CD=$*

Просмотреть файл

@ -10,72 +10,46 @@ from mach.decorators import (
Command,
)
from mozbuild.base import (
MachCommandBase,
from mach.base import (
FailedCommandError,
)
import mozpack.path as mozpath
MERGE_HELP = '''Directory to merge to. Will be removed to before running
the comparison. Default: $(OBJDIR)/($MOZ_BUILD_APP)/locales/merge-$(AB_CD)
'''.lstrip()
@CommandProvider
class CompareLocales(MachCommandBase):
class CompareLocales(object):
"""Run compare-locales."""
@Command('compare-locales', category='testing',
description='Run source checks on a localization.')
@CommandArgument('--l10n-ini',
help='l10n.ini describing the app. ' +
'Default: $(MOZ_BUILD_APP)/locales/l10n.ini')
@CommandArgument('--l10n-base',
help='Directory with the localizations. ' +
'Default: $(L10NBASEDIR)')
@CommandArgument('--merge-dir',
help=MERGE_HELP)
@CommandArgument('locales', nargs='+', metavar='ab_CD',
help='Locale codes to compare')
def compare(self, l10n_ini=None, l10n_base=None, merge_dir=None,
locales=None):
from compare_locales.paths import EnumerateApp
from compare_locales.compare import compareApp
@CommandArgument('config_paths', metavar='l10n.toml', nargs='+',
help='TOML or INI file for the project')
@CommandArgument('l10n_base_dir', metavar='l10n-base-dir',
help='Parent directory of localizations')
@CommandArgument('locales', nargs='*', metavar='locale-code',
help='Locale code and top-level directory of '
'each localization')
@CommandArgument('-m', '--merge',
help='''Use this directory to stage merged files''')
@CommandArgument('-D', action='append', metavar='var=value',
default=[], dest='defines',
help='Overwrite variables in TOML files')
@CommandArgument('--unified', action="store_true",
help="Show output for all projects unified")
@CommandArgument('--full', action="store_true",
help="Compare projects that are disabled")
def compare(self, **kwargs):
from compare_locales.commands import CompareLocales
# check if we're configured and use defaults from there
# otherwise, error early
try:
self.substs # explicitly check
if not l10n_ini:
l10n_ini = mozpath.join(
self.topsrcdir,
self.substs['MOZ_BUILD_APP'],
'locales', 'l10n.ini'
)
if not l10n_base:
l10n_base = mozpath.join(
self.topsrcdir,
self.substs['L10NBASEDIR']
)
except Exception:
if not l10n_ini or not l10n_base:
print('Specify --l10n-ini and --l10n-base or run configure.')
return 1
class ErrorHelper(object):
'''Dummy ArgumentParser to marshall compare-locales
commandline errors to mach exceptions.
'''
def error(self, msg):
raise FailedCommandError(msg)
if not merge_dir:
try:
# self.substs is raising an Exception if we're not configured
# don't merge if we're not
merge_dir = mozpath.join(
self.topobjdir,
self.substs['MOZ_BUILD_APP'],
'locales', 'merge-dir-{ab_CD}'
)
except Exception:
pass
def exit(self, message=None, status=0):
raise FailedCommandError(message, exit_code=status)
app = EnumerateApp(l10n_ini, l10n_base, locales)
observer = compareApp(app, merge_stage=merge_dir,
clobber=True)
print(observer.serialize())
cmd = CompareLocales()
cmd.parser = ErrorHelper()
return cmd.handle(**kwargs)