Bug 1654112 - Add grit dep for building webrtc on android; r=mjf

Differential Revision: https://phabricator.services.mozilla.com/D114027
This commit is contained in:
Nico Grunbaum 2021-04-30 21:51:54 +00:00
Родитель e1a32cff5b
Коммит c13c1bee91
226 изменённых файлов: 33441 добавлений и 0 удалений

1
third_party/libwebrtc/tools/grit/.gitignore поставляемый Normal file
Просмотреть файл

@ -0,0 +1 @@
*.pyc

48
third_party/libwebrtc/tools/grit/BUILD.gn поставляемый Normal file
Просмотреть файл

@ -0,0 +1,48 @@
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This target creates a stamp file that depends on all the sources in the grit
# directory. By depending on this, a target can force itself to be rebuilt if
# grit itself changes.
import("//build/config/sanitizers/sanitizers.gni")
action("grit_sources") {
depfile = "$target_out_dir/grit_sources.d"
script = "stamp_grit_sources.py"
inputs = [ "grit.py" ]
# Note that we can't call this "grit_sources.stamp" because that file is
# implicitly created by GN for script actions.
outputs = [ "$target_out_dir/grit_sources.script.stamp" ]
args = [
rebase_path("//tools/grit", root_build_dir),
rebase_path(outputs[0], root_build_dir),
rebase_path(depfile, root_build_dir),
]
}
group("grit_python_unittests") {
testonly = true
data = [
"//testing/scripts/common.py",
"//testing/scripts/run_isolated_script_test.py",
"//testing/xvfb.py",
"//tools/grit/",
"//third_party/catapult/third_party/typ/",
]
}
# See https://crbug.com/983200
if (is_mac && is_asan) {
create_bundle("brotli_mac_asan_workaround") {
bundle_root_dir = "$target_out_dir/$target_name"
bundle_executable_dir = bundle_root_dir
public_deps = [ "//third_party/brotli:brotli($host_toolchain)" ]
}
}

3
third_party/libwebrtc/tools/grit/MANIFEST.in поставляемый Normal file
Просмотреть файл

@ -0,0 +1,3 @@
exclude grit/test_suite_all.py
exclude grit/tool/test.py
global-exclude *_unittest.py

8
third_party/libwebrtc/tools/grit/OWNERS поставляемый Normal file
Просмотреть файл

@ -0,0 +1,8 @@
agrieve@chromium.org
flackr@chromium.org
thakis@chromium.org
thestig@chromium.org
# Admin policy related grit tools.
per-file *policy*=file://components/policy/tools/OWNERS
per-file *admin_template*=file://components/policy/tools/OWNERS

22
third_party/libwebrtc/tools/grit/PRESUBMIT.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,22 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""grit unittests presubmit script.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
details on the presubmit API built into gcl.
"""
def RunUnittests(input_api, output_api):
return input_api.canned_checks.RunUnitTests(input_api, output_api,
[input_api.os_path.join('grit', 'test_suite_all.py')])
def CheckChangeOnUpload(input_api, output_api):
return RunUnittests(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return RunUnittests(input_api, output_api)

19
third_party/libwebrtc/tools/grit/README.md поставляемый Normal file
Просмотреть файл

@ -0,0 +1,19 @@
# GRIT (Google Resource and Internationalization Tool)
This is a tool for projects to manage resources and simplify the localization
workflow.
See the user guide for more details on using this project:
https://dev.chromium.org/developers/tools-we-use-in-chromium/grit/grit-users-guide
## History
This code previously used to live at
https://code.google.com/p/grit-i18n/source/checkout which still contains the
project's history. https://chromium.googlesource.com/external/grit-i18n/ is
a git mirror of the SVN repository that's identical except for the last two
commits. The project is now developed in the Chromium project directly.
There is a read-only mirror of just this directory at
https://chromium.googlesource.com/chromium/src/tools/grit/ if you don't want to
check out all of Chromium.

31
third_party/libwebrtc/tools/grit/grit.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,31 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Bootstrapping for GRIT.
'''
from __future__ import print_function
import os
import sys
import grit.grit_runner
sys.path.append(
os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'diagnosis'))
try:
import crbug_1001171
except ImportError:
crbug_1001171 = None
if __name__ == '__main__':
if crbug_1001171:
with crbug_1001171.DumpStateOnLookupError():
sys.exit(grit.grit_runner.Main(sys.argv[1:]))
else:
sys.exit(grit.grit_runner.Main(sys.argv[1:]))

19
third_party/libwebrtc/tools/grit/grit/__init__.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,19 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Package 'grit'
'''
from __future__ import print_function
import os
import sys
_CUR_DIR = os.path.abspath(os.path.dirname(__file__))
_GRIT_DIR = os.path.dirname(_CUR_DIR)
_THIRD_PARTY_DIR = os.path.join(_GRIT_DIR, 'third_party')
if _THIRD_PARTY_DIR not in sys.path:
sys.path.insert(0, _THIRD_PARTY_DIR)

491
third_party/libwebrtc/tools/grit/grit/clique.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,491 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Collections of messages and their translations, called cliques. Also
collections of cliques (uber-cliques).
'''
from __future__ import print_function
import re
import six
from grit import constants
from grit import exception
from grit import lazy_re
from grit import pseudo
from grit import pseudo_rtl
from grit import tclib
class UberClique(object):
'''A factory (NOT a singleton factory) for making cliques. It has several
methods for working with the cliques created using the factory.
'''
def __init__(self):
# A map from message ID to list of cliques whose source messages have
# that ID. This will contain all cliques created using this factory.
# Different messages can have the same ID because they have the
# same translateable portion and placeholder names, but occur in different
# places in the resource tree.
#
# Each list of cliques is kept sorted by description, to achieve
# stable results from the BestClique method, see below.
self.cliques_ = {}
# A map of clique IDs to list of languages to indicate translations where we
# fell back to English.
self.fallback_translations_ = {}
# A map of clique IDs to list of languages to indicate missing translations.
self.missing_translations_ = {}
def _AddMissingTranslation(self, lang, clique, is_error):
tl = self.fallback_translations_
if is_error:
tl = self.missing_translations_
id = clique.GetId()
if id not in tl:
tl[id] = {}
if lang not in tl[id]:
tl[id][lang] = 1
def HasMissingTranslations(self):
return len(self.missing_translations_) > 0
def MissingTranslationsReport(self):
'''Returns a string suitable for printing to report missing
and fallback translations to the user.
'''
def ReportTranslation(clique, langs):
text = clique.GetMessage().GetPresentableContent()
# The text 'error' (usually 'Error:' but we are conservative)
# can trigger some build environments (Visual Studio, we're
# looking at you) to consider invocation of grit to have failed,
# so we make sure never to output that word.
extract = re.sub(r'(?i)error', 'REDACTED', text[0:40])[0:40]
ellipsis = ''
if len(text) > 40:
ellipsis = '...'
langs_extract = langs[0:6]
describe_langs = ','.join(langs_extract)
if len(langs) > 6:
describe_langs += " and %d more" % (len(langs) - 6)
return " %s \"%s%s\" %s" % (clique.GetId(), extract, ellipsis,
describe_langs)
lines = []
if len(self.fallback_translations_):
lines.append(
"WARNING: Fell back to English for the following translations:")
for (id, langs) in self.fallback_translations_.items():
lines.append(
ReportTranslation(self.cliques_[id][0], list(langs.keys())))
if len(self.missing_translations_):
lines.append("ERROR: The following translations are MISSING:")
for (id, langs) in self.missing_translations_.items():
lines.append(
ReportTranslation(self.cliques_[id][0], list(langs.keys())))
return '\n'.join(lines)
def MakeClique(self, message, translateable=True):
'''Create a new clique initialized with a message.
Args:
message: tclib.Message()
translateable: True | False
'''
clique = MessageClique(self, message, translateable)
# Enable others to find this clique by its message ID
if message.GetId() in self.cliques_:
presentable_text = clique.GetMessage().GetPresentableContent()
if not message.HasAssignedId():
for c in self.cliques_[message.GetId()]:
assert c.GetMessage().GetPresentableContent() == presentable_text
self.cliques_[message.GetId()].append(clique)
# We need to keep each list of cliques sorted by description, to
# achieve stable results from the BestClique method, see below.
self.cliques_[message.GetId()].sort(
key=lambda c:c.GetMessage().GetDescription())
else:
self.cliques_[message.GetId()] = [clique]
return clique
def FindCliqueAndAddTranslation(self, translation, language):
'''Adds the specified translation to the clique with the source message
it is a translation of.
Args:
translation: tclib.Translation()
language: 'en' | 'fr' ...
Return:
True if the source message was found, otherwise false.
'''
if translation.GetId() in self.cliques_:
for clique in self.cliques_[translation.GetId()]:
clique.AddTranslation(translation, language)
return True
else:
return False
def BestClique(self, id):
'''Returns the "best" clique from a list of cliques. All the cliques
must have the same ID. The "best" clique is chosen in the following
order of preference:
- The first clique that has a non-ID-based description.
- If no such clique found, the first clique with an ID-based description.
- Otherwise the first clique.
This method is stable in terms of always returning a clique with
an identical description (on different runs of GRIT on the same
data) because self.cliques_ is sorted by description.
'''
clique_list = self.cliques_[id]
clique_with_id = None
clique_default = None
for clique in clique_list:
if not clique_default:
clique_default = clique
description = clique.GetMessage().GetDescription()
if description and len(description) > 0:
if not description.startswith('ID:'):
# this is the preferred case so we exit right away
return clique
elif not clique_with_id:
clique_with_id = clique
if clique_with_id:
return clique_with_id
else:
return clique_default
def BestCliquePerId(self):
'''Iterates over the list of all cliques and returns the best clique for
each ID. This will be the first clique with a source message that has a
non-empty description, or an arbitrary clique if none of them has a
description.
'''
for id in self.cliques_:
yield self.BestClique(id)
def BestCliqueByOriginalText(self, text, meaning):
'''Finds the "best" (as in BestClique()) clique that has original text
'text' and meaning 'meaning'. Returns None if there is no such clique.
'''
# If needed, this can be optimized by maintaining a map of
# fingerprints of original text+meaning to cliques.
for c in self.BestCliquePerId():
msg = c.GetMessage()
if msg.GetRealContent() == text and msg.GetMeaning() == meaning:
return msg
return None
def AllMessageIds(self):
'''Returns a list of all defined message IDs.
'''
return list(self.cliques_.keys())
def AllCliques(self):
'''Iterates over all cliques. Note that this can return multiple cliques
with the same ID.
'''
for cliques in self.cliques_.values():
for c in cliques:
yield c
def GenerateXtbParserCallback(self, lang, debug=False):
'''Creates a callback function as required by grit.xtb_reader.Parse().
This callback will create Translation objects for each message from
the XTB that exists in this uberclique, and add them as translations for
the relevant cliques. The callback will add translations to the language
specified by 'lang'
Args:
lang: 'fr'
debug: True | False
'''
def Callback(id, structure):
if id not in self.cliques_:
if debug:
print("Ignoring translation #%s" % id)
return
if debug:
print("Adding translation #%s" % id)
# We fetch placeholder information from the original message (the XTB file
# only contains placeholder names).
original_msg = self.BestClique(id).GetMessage()
translation = tclib.Translation(id=id)
for is_ph,text in structure:
if not is_ph:
translation.AppendText(text)
else:
found_placeholder = False
for ph in original_msg.GetPlaceholders():
if ph.GetPresentation() == text:
translation.AppendPlaceholder(tclib.Placeholder(
ph.GetPresentation(), ph.GetOriginal(), ph.GetExample()))
found_placeholder = True
break
if not found_placeholder:
raise exception.MismatchingPlaceholders(
'Translation for message ID %s had <ph name="%s"/>, no match\n'
'in original message' % (id, text))
self.FindCliqueAndAddTranslation(translation, lang)
return Callback
class CustomType(object):
'''A base class you should implement if you wish to specify a custom type
for a message clique (i.e. custom validation and optional modification of
translations).'''
def Validate(self, message):
'''Returns true if the message (a tclib.Message object) is valid,
otherwise false.
'''
raise NotImplementedError()
def ValidateAndModify(self, lang, translation):
'''Returns true if the translation (a tclib.Translation object) is valid,
otherwise false. The language is also passed in. This method may modify
the translation that is passed in, if it so wishes.
'''
raise NotImplementedError()
def ModifyTextPart(self, lang, text):
'''If you call ModifyEachTextPart, it will turn around and call this method
for each text part of the translation. You should return the modified
version of the text, or just the original text to not change anything.
'''
raise NotImplementedError()
def ModifyEachTextPart(self, lang, translation):
'''Call this to easily modify one or more of the textual parts of a
translation. It will call ModifyTextPart for each part of the
translation.
'''
contents = translation.GetContent()
for ix in range(len(contents)):
if (isinstance(contents[ix], six.string_types)):
contents[ix] = self.ModifyTextPart(lang, contents[ix])
class OneOffCustomType(CustomType):
'''A very simple custom type that performs the validation expressed by
the input expression on all languages including the source language.
The expression can access the variables 'lang', 'msg' and 'text()' where
'lang' is the language of 'msg', 'msg' is the message or translation being
validated and 'text()' returns the real contents of 'msg' (for shorthand).
'''
def __init__(self, expression):
self.expr = expression
def Validate(self, message):
return self.ValidateAndModify(MessageClique.source_language, message)
def ValidateAndModify(self, lang, msg):
def text():
return msg.GetRealContent()
return eval(self.expr, {},
{'lang' : lang,
'text' : text,
'msg' : msg,
})
class MessageClique(object):
'''A message along with all of its translations. Also code to bring
translations together with their original message.'''
# change this to the language code of Messages you add to cliques_.
# TODO(joi) Actually change this based on the <grit> node's source language
source_language = 'en'
# A constant translation we use when asked for a translation into the
# special language constants.CONSTANT_LANGUAGE.
CONSTANT_TRANSLATION = tclib.Translation(text='TTTTTT')
# A pattern to match messages that are empty or whitespace only.
WHITESPACE_MESSAGE = lazy_re.compile(r'^\s*$')
def __init__(self, uber_clique, message, translateable=True,
custom_type=None):
'''Create a new clique initialized with just a message.
Note that messages with a body comprised only of whitespace will implicitly
be marked non-translatable.
Args:
uber_clique: Our uber-clique (collection of cliques)
message: tclib.Message()
translateable: True | False
custom_type: instance of clique.CustomType interface
'''
# Our parent
self.uber_clique = uber_clique
# If not translateable, we only store the original message.
self.translateable = translateable
# We implicitly mark messages that have a whitespace-only body as
# non-translateable.
if MessageClique.WHITESPACE_MESSAGE.match(message.GetRealContent()):
self.translateable = False
# A mapping of language identifiers to tclib.BaseMessage and its
# subclasses (i.e. tclib.Message and tclib.Translation).
self.clique = { MessageClique.source_language : message }
# A list of the "shortcut groups" this clique is
# part of. Within any given shortcut group, no shortcut key (e.g. &J)
# must appear more than once in each language for all cliques that
# belong to the group.
self.shortcut_groups = []
# An instance of the CustomType interface, or None. If this is set, it will
# be used to validate the original message and translations thereof, and
# will also get a chance to modify translations of the message.
self.SetCustomType(custom_type)
def GetMessage(self):
'''Retrieves the tclib.Message that is the source for this clique.'''
return self.clique[MessageClique.source_language]
def GetId(self):
'''Retrieves the message ID of the messages in this clique.'''
return self.GetMessage().GetId()
def IsTranslateable(self):
return self.translateable
def AddToShortcutGroup(self, group):
self.shortcut_groups.append(group)
def SetCustomType(self, custom_type):
'''Makes this clique use custom_type for validating messages and
translations, and optionally modifying translations.
'''
self.custom_type = custom_type
if custom_type and not custom_type.Validate(self.GetMessage()):
raise exception.InvalidMessage(self.GetMessage().GetRealContent())
def MessageForLanguage(self, lang, pseudo_if_no_match=True,
fallback_to_english=False):
'''Returns the message/translation for the specified language, providing
a pseudotranslation if there is no available translation and a pseudo-
translation is requested.
The translation of any message whatsoever in the special language
'x_constant' is the message "TTTTTT".
Args:
lang: 'en'
pseudo_if_no_match: True
fallback_to_english: False
Return:
tclib.BaseMessage
'''
if not self.translateable:
return self.GetMessage()
if lang == constants.CONSTANT_LANGUAGE:
return self.CONSTANT_TRANSLATION
for msglang in self.clique:
if lang == msglang:
return self.clique[msglang]
if lang == constants.FAKE_BIDI:
return pseudo_rtl.PseudoRTLMessage(self.GetMessage())
if fallback_to_english:
self.uber_clique._AddMissingTranslation(lang, self, is_error=False)
return self.GetMessage()
# If we're not supposed to generate pseudotranslations, we add an error
# report to a list of errors, then fail at a higher level, so that we
# get a list of all messages that are missing translations.
if not pseudo_if_no_match:
self.uber_clique._AddMissingTranslation(lang, self, is_error=True)
return pseudo.PseudoMessage(self.GetMessage())
def AllMessagesThatMatch(self, lang_re, include_pseudo = True):
'''Returns a map of all messages that match 'lang', including the pseudo
translation if requested.
Args:
lang_re: re.compile(r'fr|en')
include_pseudo: True
Return:
{ 'en' : tclib.Message,
'fr' : tclib.Translation,
pseudo.PSEUDO_LANG : tclib.Translation }
'''
if not self.translateable:
return [self.GetMessage()]
matches = {}
for msglang in self.clique:
if lang_re.match(msglang):
matches[msglang] = self.clique[msglang]
if include_pseudo:
matches[pseudo.PSEUDO_LANG] = pseudo.PseudoMessage(self.GetMessage())
return matches
def AddTranslation(self, translation, language):
'''Add a translation to this clique. The translation must have the same
ID as the message that is the source for this clique.
If this clique is not translateable, the function just returns.
Args:
translation: tclib.Translation()
language: 'en'
Throws:
grit.exception.InvalidTranslation if the translation you're trying to add
doesn't have the same message ID as the source message of this clique.
'''
if not self.translateable:
return
if translation.GetId() != self.GetId():
raise exception.InvalidTranslation(
'Msg ID %s, transl ID %s' % (self.GetId(), translation.GetId()))
assert not language in self.clique
# Because two messages can differ in the original content of their
# placeholders yet share the same ID (because they are otherwise the
# same), the translation we are getting may have different original
# content for placeholders than our message, yet it is still the right
# translation for our message (because it is for the same ID). We must
# therefore fetch the original content of placeholders from our original
# English message.
#
# See grit.clique_unittest.MessageCliqueUnittest.testSemiIdenticalCliques
# for a concrete explanation of why this is necessary.
original = self.MessageForLanguage(self.source_language, False)
if len(original.GetPlaceholders()) != len(translation.GetPlaceholders()):
print("ERROR: '%s' translation of message id %s does not match" %
(language, translation.GetId()))
assert False
transl_msg = tclib.Translation(id=self.GetId(),
text=translation.GetPresentableContent(),
placeholders=original.GetPlaceholders())
if (self.custom_type and
not self.custom_type.ValidateAndModify(language, transl_msg)):
print("WARNING: %s translation failed validation: %s" %
(language, transl_msg.GetId()))
self.clique[language] = transl_msg

265
third_party/libwebrtc/tools/grit/grit/clique_unittest.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,265 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grit.clique'''
from __future__ import print_function
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import re
import unittest
from six import StringIO
from grit import clique
from grit import exception
from grit import pseudo
from grit import tclib
from grit import grd_reader
from grit import util
class MessageCliqueUnittest(unittest.TestCase):
def testClique(self):
factory = clique.UberClique()
msg = tclib.Message(text='Hello USERNAME, how are you?',
placeholders=[
tclib.Placeholder('USERNAME', '%s', 'Joi')])
c = factory.MakeClique(msg)
self.failUnless(c.GetMessage() == msg)
self.failUnless(c.GetId() == msg.GetId())
msg_fr = tclib.Translation(text='Bonjour USERNAME, comment ca va?',
id=msg.GetId(), placeholders=[
tclib.Placeholder('USERNAME', '%s', 'Joi')])
msg_de = tclib.Translation(text='Guten tag USERNAME, wie geht es dir?',
id=msg.GetId(), placeholders=[
tclib.Placeholder('USERNAME', '%s', 'Joi')])
c.AddTranslation(msg_fr, 'fr')
factory.FindCliqueAndAddTranslation(msg_de, 'de')
# sort() sorts lists in-place and does not return them
for lang in ('en', 'fr', 'de'):
self.failUnless(lang in c.clique)
self.failUnless(c.MessageForLanguage('fr').GetRealContent() ==
msg_fr.GetRealContent())
try:
c.MessageForLanguage('zh-CN', False)
self.fail('Should have gotten exception')
except:
pass
self.failUnless(c.MessageForLanguage('zh-CN', True) != None)
rex = re.compile('fr|de|bingo')
self.failUnless(len(c.AllMessagesThatMatch(rex, False)) == 2)
self.failUnless(
c.AllMessagesThatMatch(rex, True)[pseudo.PSEUDO_LANG] is not None)
def testBestClique(self):
factory = clique.UberClique()
factory.MakeClique(tclib.Message(text='Alfur', description='alfaholl'))
factory.MakeClique(tclib.Message(text='Alfur', description=''))
factory.MakeClique(tclib.Message(text='Vaettur', description=''))
factory.MakeClique(tclib.Message(text='Vaettur', description=''))
factory.MakeClique(tclib.Message(text='Troll', description=''))
factory.MakeClique(tclib.Message(text='Gryla', description='ID: IDS_GRYLA'))
factory.MakeClique(tclib.Message(text='Gryla', description='vondakerling'))
factory.MakeClique(tclib.Message(text='Leppaludi', description='ID: IDS_LL'))
factory.MakeClique(tclib.Message(text='Leppaludi', description=''))
count_best_cliques = 0
for c in factory.BestCliquePerId():
count_best_cliques += 1
msg = c.GetMessage()
text = msg.GetRealContent()
description = msg.GetDescription()
if text == 'Alfur':
self.failUnless(description == 'alfaholl')
elif text == 'Gryla':
self.failUnless(description == 'vondakerling')
elif text == 'Leppaludi':
self.failUnless(description == 'ID: IDS_LL')
self.failUnless(count_best_cliques == 5)
def testAllInUberClique(self):
resources = grd_reader.Parse(
StringIO(u'''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<messages>
<message name="IDS_GREETING" desc="Printed to greet the currently logged in user">
Hello <ph name="USERNAME">%s<ex>Joi</ex></ph>, how are you doing today?
</message>
</messages>
<structures>
<structure type="dialog" name="IDD_ABOUTBOX" encoding="utf-16" file="grit/testdata/klonk.rc" />
<structure type="tr_html" name="ID_HTML" file="grit/testdata/simple.html" />
</structures>
</release>
</grit>'''), util.PathFromRoot('.'))
resources.SetOutputLanguage('en')
resources.RunGatherers()
content_list = []
for clique_list in resources.UberClique().cliques_.values():
for clique in clique_list:
content_list.append(clique.GetMessage().GetRealContent())
self.failUnless('Hello %s, how are you doing today?' in content_list)
self.failUnless('Jack "Black" Daniels' in content_list)
self.failUnless('Hello!' in content_list)
def testCorrectExceptionIfWrongEncodingOnResourceFile(self):
'''This doesn't really belong in this unittest file, but what the heck.'''
resources = grd_reader.Parse(
StringIO(u'''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<structures>
<structure type="dialog" name="IDD_ABOUTBOX" file="grit/testdata/klonk.rc" />
</structures>
</release>
</grit>'''), util.PathFromRoot('.'))
self.assertRaises(exception.SectionNotFound, resources.RunGatherers)
def testSemiIdenticalCliques(self):
messages = [
tclib.Message(text='Hello USERNAME',
placeholders=[tclib.Placeholder('USERNAME', '$1', 'Joi')]),
tclib.Message(text='Hello USERNAME',
placeholders=[tclib.Placeholder('USERNAME', '%s', 'Joi')]),
]
self.failUnless(messages[0].GetId() == messages[1].GetId())
# Both of the above would share a translation.
translation = tclib.Translation(id=messages[0].GetId(),
text='Bonjour USERNAME',
placeholders=[tclib.Placeholder(
'USERNAME', '$1', 'Joi')])
factory = clique.UberClique()
cliques = [factory.MakeClique(msg) for msg in messages]
for clq in cliques:
clq.AddTranslation(translation, 'fr')
self.failUnless(cliques[0].MessageForLanguage('fr').GetRealContent() ==
'Bonjour $1')
self.failUnless(cliques[1].MessageForLanguage('fr').GetRealContent() ==
'Bonjour %s')
def testMissingTranslations(self):
messages = [ tclib.Message(text='Hello'), tclib.Message(text='Goodbye') ]
factory = clique.UberClique()
cliques = [factory.MakeClique(msg) for msg in messages]
cliques[1].MessageForLanguage('fr', False, True)
self.failUnless(not factory.HasMissingTranslations())
cliques[0].MessageForLanguage('de', False, False)
self.failUnless(factory.HasMissingTranslations())
report = factory.MissingTranslationsReport()
self.failUnless(report.count('WARNING') == 1)
self.failUnless(report.count('8053599568341804890 "Goodbye" fr') == 1)
self.failUnless(report.count('ERROR') == 1)
self.failUnless(report.count('800120468867715734 "Hello" de') == 1)
def testCustomTypes(self):
factory = clique.UberClique()
message = tclib.Message(text='Bingo bongo')
c = factory.MakeClique(message)
try:
c.SetCustomType(DummyCustomType())
self.fail()
except:
pass # expected case - 'Bingo bongo' does not start with 'jjj'
message = tclib.Message(text='jjjBingo bongo')
c = factory.MakeClique(message)
c.SetCustomType(util.NewClassInstance(
'grit.clique_unittest.DummyCustomType', clique.CustomType))
translation = tclib.Translation(id=message.GetId(), text='Bilingo bolongo')
c.AddTranslation(translation, 'fr')
self.failUnless(c.MessageForLanguage('fr').GetRealContent().startswith('jjj'))
def testWhitespaceMessagesAreNontranslateable(self):
factory = clique.UberClique()
message = tclib.Message(text=' \t')
c = factory.MakeClique(message, translateable=True)
self.failIf(c.IsTranslateable())
message = tclib.Message(text='\n \n ')
c = factory.MakeClique(message, translateable=True)
self.failIf(c.IsTranslateable())
message = tclib.Message(text='\n hello')
c = factory.MakeClique(message, translateable=True)
self.failUnless(c.IsTranslateable())
def testEachCliqueKeptSorted(self):
factory = clique.UberClique()
msg_a = tclib.Message(text='hello', description='a')
msg_b = tclib.Message(text='hello', description='b')
msg_c = tclib.Message(text='hello', description='c')
# Insert out of order
clique_b = factory.MakeClique(msg_b, translateable=True)
clique_a = factory.MakeClique(msg_a, translateable=True)
clique_c = factory.MakeClique(msg_c, translateable=True)
clique_list = factory.cliques_[clique_a.GetId()]
self.failUnless(len(clique_list) == 3)
self.failUnless(clique_list[0] == clique_a)
self.failUnless(clique_list[1] == clique_b)
self.failUnless(clique_list[2] == clique_c)
def testBestCliqueSortIsStable(self):
factory = clique.UberClique()
text = 'hello'
msg_no_description = tclib.Message(text=text)
msg_id_description_a = tclib.Message(text=text, description='ID: a')
msg_id_description_b = tclib.Message(text=text, description='ID: b')
msg_description_x = tclib.Message(text=text, description='x')
msg_description_y = tclib.Message(text=text, description='y')
clique_id = msg_no_description.GetId()
# Insert in an order that tests all outcomes.
clique_no_description = factory.MakeClique(msg_no_description,
translateable=True)
self.failUnless(factory.BestClique(clique_id) == clique_no_description)
clique_id_description_b = factory.MakeClique(msg_id_description_b,
translateable=True)
self.failUnless(factory.BestClique(clique_id) == clique_id_description_b)
clique_id_description_a = factory.MakeClique(msg_id_description_a,
translateable=True)
self.failUnless(factory.BestClique(clique_id) == clique_id_description_a)
clique_description_y = factory.MakeClique(msg_description_y,
translateable=True)
self.failUnless(factory.BestClique(clique_id) == clique_description_y)
clique_description_x = factory.MakeClique(msg_description_x,
translateable=True)
self.failUnless(factory.BestClique(clique_id) == clique_description_x)
class DummyCustomType(clique.CustomType):
def Validate(self, message):
return message.GetRealContent().startswith('jjj')
def ValidateAndModify(self, lang, translation):
is_ok = self.Validate(translation)
self.ModifyEachTextPart(lang, translation)
def ModifyTextPart(self, lang, text):
return 'jjj%s' % text
if __name__ == '__main__':
unittest.main()

23
third_party/libwebrtc/tools/grit/grit/constants.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,23 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Constant definitions for GRIT.
'''
from __future__ import print_function
# This is the Icelandic noun meaning "grit" and is used to check that our
# input files are in the correct encoding. The middle character gets encoded
# as two bytes in UTF-8, so this is sufficient to detect incorrect encoding.
ENCODING_CHECK = u'm\u00f6l'
# A special language, translations into which are always "TTTTTT".
CONSTANT_LANGUAGE = 'x_constant'
FAKE_BIDI = 'fake-bidi'
# Magic number added to the header of resources brotli compressed by grit. Used
# to easily identify resources as being brotli compressed. See
# ui/base/resource/resource_bundle.h for decompression usage.
BROTLI_CONST = b'\x1e\x9b'

139
third_party/libwebrtc/tools/grit/grit/exception.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,139 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Exception types for GRIT.
'''
from __future__ import print_function
class Base(Exception):
'''A base exception that uses the class's docstring in addition to any
user-provided message as the body of the Base.
'''
def __init__(self, msg=''):
if len(msg):
if self.__doc__:
msg = self.__doc__ + ': ' + msg
else:
msg = self.__doc__
super(Base, self).__init__(msg)
class Parsing(Base):
'''An error occurred parsing a GRD or XTB file.'''
pass
class UnknownElement(Parsing):
'''An unknown node type was encountered.'''
pass
class MissingElement(Parsing):
'''An expected element was missing.'''
pass
class UnexpectedChild(Parsing):
'''An unexpected child element was encountered (on a leaf node).'''
pass
class UnexpectedAttribute(Parsing):
'''The attribute was not expected'''
pass
class UnexpectedContent(Parsing):
'''This element should not have content'''
pass
class MissingMandatoryAttribute(Parsing):
'''This element is missing a mandatory attribute'''
pass
class MutuallyExclusiveMandatoryAttribute(Parsing):
'''This element has 2 mutually exclusive mandatory attributes'''
pass
class DuplicateKey(Parsing):
'''A duplicate key attribute was found.'''
pass
class TooManyExamples(Parsing):
'''Only one <ex> element is allowed for each <ph> element.'''
pass
class FileNotFound(Parsing):
'''The resource file was not found.'''
pass
class InvalidMessage(Base):
'''The specified message failed validation.'''
pass
class InvalidTranslation(Base):
'''Attempt to add an invalid translation to a clique.'''
pass
class NoSuchTranslation(Base):
'''Requested translation not available'''
pass
class NotReady(Base):
'''Attempt to use an object before it is ready, or attempt to translate \
an empty document.'''
pass
class MismatchingPlaceholders(Base):
'''Placeholders do not match.'''
pass
class InvalidPlaceholderName(Base):
'''Placeholder name can only contain A-Z, a-z, 0-9 and underscore.'''
pass
class BlockTagInTranslateableChunk(Base):
'''A block tag was encountered where it wasn't expected.'''
pass
class SectionNotFound(Base):
'''The section you requested was not found in the RC file. Make \
sure the section ID is correct (matches the section's ID in the RC file). \
Also note that you may need to specify the RC file's encoding (using the \
encoding="" attribute) if it is not in the default Windows-1252 encoding. \
'''
pass
class IdRangeOverlap(Base):
'''ID range overlap.'''
pass
class ReservedHeaderCollision(Base):
'''Resource included with first 3 bytes matching reserved header.'''
pass
class PlaceholderNotInsidePhNode(Base):
'''Placeholder formatters should be inside <ph> element.'''
pass
class InvalidCharactersInsidePhNode(Base):
'''Invalid characters found inside <ph> element.'''
pass

22
third_party/libwebrtc/tools/grit/grit/extern/BogoFP.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,22 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Bogus fingerprint implementation, do not use for production,
provided only as an example.
Usage:
grit.py -h grit.extern.BogoFP xmb /tmp/foo
"""
from __future__ import print_function
import grit.extern.FP
def UnsignedFingerPrint(str, encoding='utf-8'):
"""Generate a fingerprint not intended for production from str (it
reduces the precision of the production fingerprint by one bit).
"""
return (0xFFFFF7FFFFFFFFFF &
grit.extern.FP._UnsignedFingerPrintImpl(str, encoding))

72
third_party/libwebrtc/tools/grit/grit/extern/FP.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,72 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
try:
import hashlib
_new_md5 = hashlib.md5
except ImportError:
import md5
_new_md5 = md5.new
"""64-bit fingerprint support for strings.
Usage:
from extern import FP
print('Fingerprint is %ld' % FP.FingerPrint('Hello world!'))
"""
def _UnsignedFingerPrintImpl(str, encoding='utf-8'):
"""Generate a 64-bit fingerprint by taking the first half of the md5
of the string.
"""
hex128 = _new_md5(str.encode(encoding)).hexdigest()
int64 = int(hex128[:16], 16)
return int64
def UnsignedFingerPrint(str, encoding='utf-8'):
"""Generate a 64-bit fingerprint.
The default implementation uses _UnsignedFingerPrintImpl, which
takes the first half of the md5 of the string, but the
implementation may be switched using SetUnsignedFingerPrintImpl.
"""
return _UnsignedFingerPrintImpl(str, encoding)
def FingerPrint(str, encoding='utf-8'):
fp = UnsignedFingerPrint(str, encoding=encoding)
# interpret fingerprint as signed longs
if fp & 0x8000000000000000:
fp = -((~fp & 0xFFFFFFFFFFFFFFFF) + 1)
return fp
def UseUnsignedFingerPrintFromModule(module_name):
"""Imports module_name and replaces UnsignedFingerPrint in the
current module with the function of the same name from the imported
module.
Returns the function object previously known as
grit.extern.FP.UnsignedFingerPrint.
"""
hash_module = __import__(module_name, fromlist=[module_name])
return SetUnsignedFingerPrint(hash_module.UnsignedFingerPrint)
def SetUnsignedFingerPrint(function_object):
"""Sets grit.extern.FP.UnsignedFingerPrint to point to
function_object.
Returns the function object previously known as
grit.extern.FP.UnsignedFingerPrint.
"""
global UnsignedFingerPrint
original_function_object = UnsignedFingerPrint
UnsignedFingerPrint = function_object
return original_function_object

0
third_party/libwebrtc/tools/grit/grit/extern/__init__.py поставляемый Normal file
Просмотреть файл

503
third_party/libwebrtc/tools/grit/grit/extern/tclib.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,503 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# The tclib module contains tools for aggregating, verifying, and storing
# messages destined for the Translation Console, as well as for reading
# translations back and outputting them in some desired format.
#
# This has been stripped down to include only the functionality needed by grit
# for creating Windows .rc and .h files. These are the only parts needed by
# the Chrome build process.
from __future__ import print_function
from grit.extern import FP
# This module assumes that within a bundle no two messages can have the
# same id unless they're identical.
# The basic classes defined here for external use are Message and Translation,
# where the former is used for English messages and the latter for
# translations. These classes have a lot of common functionality, as expressed
# by the common parent class BaseMessage. Perhaps the most important
# distinction is that translated text is stored in UTF-8, whereas original text
# is stored in whatever encoding the client uses (presumably Latin-1).
# --------------------
# The public interface
# --------------------
# Generate message id from message text and meaning string (optional),
# both in utf-8 encoding
#
def GenerateMessageId(message, meaning=''):
fp = FP.FingerPrint(message)
if meaning:
# combine the fingerprints of message and meaning
fp2 = FP.FingerPrint(meaning)
if fp < 0:
fp = fp2 + (fp << 1) + 1
else:
fp = fp2 + (fp << 1)
# To avoid negative ids we strip the high-order bit
return str(fp & 0x7fffffffffffffff)
# -------------------------------------------------------------------------
# The MessageTranslationError class is used to signal tclib-specific errors.
class MessageTranslationError(Exception):
def __init__(self, args = ''):
self.args = args
# -----------------------------------------------------------
# The Placeholder class represents a placeholder in a message.
class Placeholder(object):
# String representation
def __str__(self):
return '%s, "%s", "%s"' % \
(self.__presentation, self.__original, self.__example)
# Getters
def GetOriginal(self):
return self.__original
def GetPresentation(self):
return self.__presentation
def GetExample(self):
return self.__example
def __eq__(self, other):
return self.EqualTo(other, strict=1, ignore_trailing_spaces=0)
# Equality test
#
# ignore_trailing_spaces: TC is using varchar to store the
# phrwr fields, as a result of that, the trailing spaces
# are removed by MySQL when the strings are stored into TC:-(
# ignore_trailing_spaces parameter is used to ignore
# trailing spaces during equivalence comparison.
#
def EqualTo(self, other, strict = 1, ignore_trailing_spaces = 1):
if type(other) is not Placeholder:
return 0
if StringEquals(self.__presentation, other.__presentation,
ignore_trailing_spaces):
if not strict or (StringEquals(self.__original, other.__original,
ignore_trailing_spaces) and
StringEquals(self.__example, other.__example,
ignore_trailing_spaces)):
return 1
return 0
# -----------------------------------------------------------------
# BaseMessage is the common parent class of Message and Translation.
# It is not meant for direct use.
class BaseMessage(object):
# Three types of message construction is supported. If the message text is a
# simple string with no dynamic content, you can pass it to the constructor
# as the "text" parameter. Otherwise, you can omit "text" and assemble the
# message step by step using AppendText() and AppendPlaceholder(). Or, as an
# alternative, you can give the constructor the "presentable" version of the
# message and a list of placeholders; it will then parse the presentation and
# build the message accordingly. For example:
# Message(text = "There are NUM_BUGS bugs in your code",
# placeholders = [Placeholder("NUM_BUGS", "%d", "33")],
# description = "Bla bla bla")
def __eq__(self, other):
# "source encoding" is nonsense, so ignore it
return _ObjectEquals(self, other, ['_BaseMessage__source_encoding'])
def GetName(self):
return self.__name
def GetSourceEncoding(self):
return self.__source_encoding
# Append a placeholder to the message
def AppendPlaceholder(self, placeholder):
if not isinstance(placeholder, Placeholder):
raise MessageTranslationError("Invalid message placeholder %s in "
"message %s" % (placeholder, self.GetId()))
# Are there other placeholders with the same presentation?
# If so, they need to be the same.
for other in self.GetPlaceholders():
if placeholder.GetPresentation() == other.GetPresentation():
if not placeholder.EqualTo(other):
raise MessageTranslationError(
"Conflicting declarations of %s within message" %
placeholder.GetPresentation())
# update placeholder list
dup = 0
for item in self.__content:
if isinstance(item, Placeholder) and placeholder.EqualTo(item):
dup = 1
break
if not dup:
self.__placeholders.append(placeholder)
# update content
self.__content.append(placeholder)
# Strips leading and trailing whitespace, and returns a tuple
# containing the leading and trailing space that was removed.
def Strip(self):
leading = trailing = ''
if len(self.__content) > 0:
s0 = self.__content[0]
if not isinstance(s0, Placeholder):
s = s0.lstrip()
leading = s0[:-len(s)]
self.__content[0] = s
s0 = self.__content[-1]
if not isinstance(s0, Placeholder):
s = s0.rstrip()
trailing = s0[len(s):]
self.__content[-1] = s
return leading, trailing
# Return the id of this message
def GetId(self):
if self.__id is None:
return self.GenerateId()
return self.__id
# Set the id of this message
def SetId(self, id):
if id is None:
self.__id = None
else:
self.__id = str(id) # Treat numerical ids as strings
# Return content of this message as a list (internal use only)
def GetContent(self):
return self.__content
# Return a human-readable version of this message
def GetPresentableContent(self):
presentable_content = ""
for item in self.__content:
if isinstance(item, Placeholder):
presentable_content += item.GetPresentation()
else:
presentable_content += item
return presentable_content
# Return a fragment of a message in escaped format
def EscapeFragment(self, fragment):
return fragment.replace('%', '%%')
# Return the "original" version of this message, doing %-escaping
# properly. If source_msg is specified, the placeholder original
# information inside source_msg will be used instead.
def GetOriginalContent(self, source_msg = None):
original_content = ""
for item in self.__content:
if isinstance(item, Placeholder):
if source_msg:
ph = source_msg.GetPlaceholder(item.GetPresentation())
if not ph:
raise MessageTranslationError(
"Placeholder %s doesn't exist in message: %s" %
(item.GetPresentation(), source_msg))
original_content += ph.GetOriginal()
else:
original_content += item.GetOriginal()
else:
original_content += self.EscapeFragment(item)
return original_content
# Return the example of this message
def GetExampleContent(self):
example_content = ""
for item in self.__content:
if isinstance(item, Placeholder):
example_content += item.GetExample()
else:
example_content += item
return example_content
# Return a list of all unique placeholders in this message
def GetPlaceholders(self):
return self.__placeholders
# Return a placeholder in this message
def GetPlaceholder(self, presentation):
for item in self.__content:
if (isinstance(item, Placeholder) and
item.GetPresentation() == presentation):
return item
return None
# Return this message's description
def GetDescription(self):
return self.__description
# Add a message source
def AddSource(self, source):
self.__sources.append(source)
# Return this message's sources as a list
def GetSources(self):
return self.__sources
# Return this message's sources as a string
def GetSourcesAsText(self, delimiter = "; "):
return delimiter.join(self.__sources)
# Set the obsolete flag for a message (internal use only)
def SetObsolete(self):
self.__obsolete = 1
# Get the obsolete flag for a message (internal use only)
def IsObsolete(self):
return self.__obsolete
# Get the sequence number (0 by default)
def GetSequenceNumber(self):
return self.__sequence_number
# Set the sequence number
def SetSequenceNumber(self, number):
self.__sequence_number = number
# Increment instance counter
def AddInstance(self):
self.__num_instances += 1
# Return instance count
def GetNumInstances(self):
return self.__num_instances
def GetErrors(self, from_tc=0):
"""
Returns a description of the problem if the message is not
syntactically valid, or None if everything is fine.
Args:
from_tc: indicates whether this message came from the TC. We let
the TC get away with some things we normally wouldn't allow for
historical reasons.
"""
# check that placeholders are unambiguous
pos = 0
phs = {}
for item in self.__content:
if isinstance(item, Placeholder):
phs[pos] = item
pos += len(item.GetPresentation())
else:
pos += len(item)
presentation = self.GetPresentableContent()
for ph in self.GetPlaceholders():
for pos in FindOverlapping(presentation, ph.GetPresentation()):
# message contains the same text as a placeholder presentation
other_ph = phs.get(pos)
if ((not other_ph
and not IsSubstringInPlaceholder(pos, len(ph.GetPresentation()), phs))
or
(other_ph and len(other_ph.GetPresentation()) < len(ph.GetPresentation()))):
return "message contains placeholder name '%s':\n%s" % (
ph.GetPresentation(), presentation)
return None
def __CopyTo(self, other):
"""
Returns a copy of this BaseMessage.
"""
assert isinstance(other, self.__class__) or isinstance(self, other.__class__)
other.__source_encoding = self.__source_encoding
other.__content = self.__content[:]
other.__description = self.__description
other.__id = self.__id
other.__num_instances = self.__num_instances
other.__obsolete = self.__obsolete
other.__name = self.__name
other.__placeholders = self.__placeholders[:]
other.__sequence_number = self.__sequence_number
other.__sources = self.__sources[:]
return other
def HasText(self):
"""Returns true iff this message has anything other than placeholders."""
for item in self.__content:
if not isinstance(item, Placeholder):
return True
return False
# --------------------------------------------------------
# The Message class represents original (English) messages
class Message(BaseMessage):
# See BaseMessage constructor
def __init__(self, source_encoding, text=None, id=None,
description=None, meaning="", placeholders=None,
source=None, sequence_number=0, clone_from=None,
time_created=0, name=None, is_hidden = 0):
if clone_from is not None:
BaseMessage.__init__(self, None, clone_from=clone_from)
self.__meaning = clone_from.__meaning
self.__time_created = clone_from.__time_created
self.__is_hidden = clone_from.__is_hidden
return
BaseMessage.__init__(self, source_encoding, text, id, description,
placeholders, source, sequence_number,
name=name)
self.__meaning = meaning
self.__time_created = time_created
self.SetIsHidden(is_hidden)
# String representation
def __str__(self):
s = 'source: %s, id: %s, content: "%s", meaning: "%s", ' \
'description: "%s"' % \
(self.GetSourcesAsText(), self.GetId(), self.GetPresentableContent(),
self.__meaning, self.GetDescription())
if self.GetName() is not None:
s += ', name: "%s"' % self.GetName()
placeholders = self.GetPlaceholders()
for i in range(len(placeholders)):
s += ", placeholder[%d]: %s" % (i, placeholders[i])
return s
# Strips leading and trailing whitespace, and returns a tuple
# containing the leading and trailing space that was removed.
def Strip(self):
leading = trailing = ''
content = self.GetContent()
if len(content) > 0:
s0 = content[0]
if not isinstance(s0, Placeholder):
s = s0.lstrip()
leading = s0[:-len(s)]
content[0] = s
s0 = content[-1]
if not isinstance(s0, Placeholder):
s = s0.rstrip()
trailing = s0[len(s):]
content[-1] = s
return leading, trailing
# Generate an id by hashing message content
def GenerateId(self):
self.SetId(GenerateMessageId(self.GetPresentableContent(),
self.__meaning))
return self.GetId()
def GetMeaning(self):
return self.__meaning
def GetTimeCreated(self):
return self.__time_created
# Equality operator
def EqualTo(self, other, strict = 1):
# Check id, meaning, content
if self.GetId() != other.GetId():
return 0
if self.__meaning != other.__meaning:
return 0
if self.GetPresentableContent() != other.GetPresentableContent():
return 0
# Check descriptions if comparison is strict
if (strict and
self.GetDescription() is not None and
other.GetDescription() is not None and
self.GetDescription() != other.GetDescription()):
return 0
# Check placeholders
ph1 = self.GetPlaceholders()
ph2 = other.GetPlaceholders()
if len(ph1) != len(ph2):
return 0
for i in range(len(ph1)):
if not ph1[i].EqualTo(ph2[i], strict):
return 0
return 1
def Copy(self):
"""
Returns a copy of this Message.
"""
assert isinstance(self, Message)
return Message(None, clone_from=self)
def SetIsHidden(self, is_hidden):
"""Sets whether this message should be hidden.
Args:
is_hidden : 0 or 1 - if the message should be hidden, 0 otherwise
"""
if is_hidden not in [0, 1]:
raise MessageTranslationError("is_hidden must be 0 or 1, got %s")
self.__is_hidden = is_hidden
def IsHidden(self):
"""Returns 1 if this message is hidden, and 0 otherwise."""
return self.__is_hidden
# ----------------------------------------------------
# The Translation class represents translated messages
class Translation(BaseMessage):
# See BaseMessage constructor
def __init__(self, source_encoding, text=None, id=None,
description=None, placeholders=None, source=None,
sequence_number=0, clone_from=None, ignore_ph_errors=0,
name=None):
if clone_from is not None:
BaseMessage.__init__(self, None, clone_from=clone_from)
return
BaseMessage.__init__(self, source_encoding, text, id, description,
placeholders, source, sequence_number,
ignore_ph_errors=ignore_ph_errors, name=name)
# String representation
def __str__(self):
s = 'source: %s, id: %s, content: "%s", description: "%s"' % \
(self.GetSourcesAsText(), self.GetId(), self.GetPresentableContent(),
self.GetDescription());
placeholders = self.GetPlaceholders()
for i in range(len(placeholders)):
s += ", placeholder[%d]: %s" % (i, placeholders[i])
return s
# Equality operator
def EqualTo(self, other, strict=1):
# Check id and content
if self.GetId() != other.GetId():
return 0
if self.GetPresentableContent() != other.GetPresentableContent():
return 0
# Check placeholders
ph1 = self.GetPlaceholders()
ph2 = other.GetPlaceholders()
if len(ph1) != len(ph2):
return 0
for i in range(len(ph1)):
if not ph1[i].EqualTo(ph2[i], strict):
return 0
return 1
def Copy(self):
"""
Returns a copy of this Translation.
"""
return Translation(None, clone_from=self)

8
third_party/libwebrtc/tools/grit/grit/format/__init__.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,8 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Module grit.format
'''
pass

212
third_party/libwebrtc/tools/grit/grit/format/android_xml.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,212 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Produces localized strings.xml files for Android.
In cases where an "android" type output file is requested in a grd, the classes
in android_xml will process the messages and translations to produce a valid
strings.xml that is properly localized with the specified language.
For example if the following output tag were to be included in a grd file
<outputs>
...
<output filename="values-es/strings.xml" type="android" lang="es" />
...
</outputs>
for a grd file with the following messages:
<message name="IDS_HELLO" desc="Simple greeting">Hello</message>
<message name="IDS_WORLD" desc="The world">world</message>
and there existed an appropriate xtb file containing the Spanish translations,
then the output would be:
<?xml version="1.0" encoding="utf-8"?>
<resources xmlns:android="http://schemas.android.com/apk/res/android">
<string name="hello">"Hola"</string>
<string name="world">"mundo"</string>
</resources>
which would be written to values-es/strings.xml and usable by the Android
resource framework.
Advanced usage
--------------
To process only certain messages in a grd file, tag each desired message by
adding "android_java" to formatter_data. Then set the environmental variable
ANDROID_JAVA_TAGGED_ONLY to "true" when building the grd file. For example:
<message name="IDS_HELLO" formatter_data="android_java">Hello</message>
To generate Android plurals (aka "quantity strings"), use the ICU plural syntax
in the grd file. This will automatically be transformed into a <purals> element
in the output xml file. For example:
<message name="IDS_CATS">
{NUM_CATS, plural,
=1 {1 cat}
other {# cats}}
</message>
will produce
<plurals name="cats">
<item quantity="one">1 Katze</item>
<item quantity="other">%d Katzen</item>
</plurals>
"""
from __future__ import print_function
import os
import re
import xml.sax.saxutils
from grit import lazy_re
from grit.node import message
# When this environmental variable has value "true", only tagged messages will
# be outputted.
_TAGGED_ONLY_ENV_VAR = 'ANDROID_JAVA_TAGGED_ONLY'
_TAGGED_ONLY_DEFAULT = False
# In tagged-only mode, only messages with this tag will be ouputted.
_EMIT_TAG = 'android_java'
_NAME_PATTERN = lazy_re.compile(r'IDS_(?P<name>[A-Z0-9_]+)\Z')
# Most strings are output as a <string> element. Note the double quotes
# around the value to preserve whitespace.
_STRING_TEMPLATE = u'<string name="%s">"%s"</string>\n'
# Some strings are output as a <plurals> element.
_PLURALS_TEMPLATE = '<plurals name="%s">\n%s</plurals>\n'
_PLURALS_ITEM_TEMPLATE = ' <item quantity="%s">%s</item>\n'
# Matches e.g. "{HELLO, plural, HOW ARE YOU DOING}", while capturing
# "HOW ARE YOU DOING" in <items>.
_PLURALS_PATTERN = lazy_re.compile(r'\{[A-Z_]+,\s*plural,(?P<items>.*)\}$',
flags=re.S)
# Repeatedly matched against the <items> capture in _PLURALS_PATTERN,
# to match "<quantity>{<value>}".
_PLURALS_ITEM_PATTERN = lazy_re.compile(r'(?P<quantity>\S+?)\s*'
r'\{(?P<value>.*?)\}')
_PLURALS_QUANTITY_MAP = {
'=0': 'zero',
'zero': 'zero',
'=1': 'one',
'one': 'one',
'=2': 'two',
'two': 'two',
'few': 'few',
'many': 'many',
'other': 'other',
}
def Format(root, lang='en', output_dir='.'):
yield ('<?xml version="1.0" encoding="utf-8"?>\n'
'<resources '
'xmlns:android="http://schemas.android.com/apk/res/android">\n')
tagged_only = _TAGGED_ONLY_DEFAULT
if _TAGGED_ONLY_ENV_VAR in os.environ:
tagged_only = os.environ[_TAGGED_ONLY_ENV_VAR].lower()
if tagged_only == 'true':
tagged_only = True
elif tagged_only == 'false':
tagged_only = False
else:
raise Exception('env variable ANDROID_JAVA_TAGGED_ONLY must have value '
'true or false. Invalid value: %s' % tagged_only)
for item in root.ActiveDescendants():
with item:
if ShouldOutputNode(item, tagged_only):
yield _FormatMessage(item, lang)
yield '</resources>\n'
def ShouldOutputNode(node, tagged_only):
"""Returns true if node should be outputted.
Args:
node: a Node from the grd dom
tagged_only: true, if only tagged messages should be outputted
"""
return (isinstance(node, message.MessageNode) and
(not tagged_only or _EMIT_TAG in node.formatter_data))
def _FormatPluralMessage(message):
"""Compiles ICU plural syntax to the body of an Android <plurals> element.
1. In a .grd file, we can write a plural string like this:
<message name="IDS_THINGS">
{NUM_THINGS, plural,
=1 {1 thing}
other {# things}}
</message>
2. The Android equivalent looks like this:
<plurals name="things">
<item quantity="one">1 thing</item>
<item quantity="other">%d things</item>
</plurals>
This method takes the body of (1) and converts it to the body of (2).
If the message is *not* a plural string, this function returns `None`.
If the message includes quantities without an equivalent format in Android,
it raises an exception.
"""
ret = {}
plural_match = _PLURALS_PATTERN.match(message)
if not plural_match:
return None
body_in = plural_match.group('items').strip()
lines = []
quantities_so_far = set()
for item_match in _PLURALS_ITEM_PATTERN.finditer(body_in):
quantity_in = item_match.group('quantity')
quantity_out = _PLURALS_QUANTITY_MAP.get(quantity_in)
value_in = item_match.group('value')
value_out = '"' + value_in.replace('#', '%d') + '"'
if quantity_out:
# only one line per quantity out (https://crbug.com/787488)
if quantity_out not in quantities_so_far:
quantities_so_far.add(quantity_out)
lines.append(_PLURALS_ITEM_TEMPLATE % (quantity_out, value_out))
else:
raise Exception('Unsupported plural quantity for android '
'strings.xml: %s' % quantity_in)
return ''.join(lines)
def _FormatMessage(item, lang):
"""Writes out a single string as a <resource/> element."""
mangled_name = item.GetTextualIds()[0]
match = _NAME_PATTERN.match(mangled_name)
if not match:
raise Exception('Unexpected resource name: %s' % mangled_name)
name = match.group('name').lower()
value = item.ws_at_start + item.Translate(lang) + item.ws_at_end
# Replace < > & with &lt; &gt; &amp; to ensure we generate valid XML and
# replace ' " with \' \" to conform to Android's string formatting rules.
value = xml.sax.saxutils.escape(value, {"'": "\\'", '"': '\\"'})
plurals = _FormatPluralMessage(value)
if plurals:
return _PLURALS_TEMPLATE % (name, plurals)
else:
return _STRING_TEMPLATE % (name, value)

Просмотреть файл

@ -0,0 +1,149 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittest for android_xml.py."""
from __future__ import print_function
import os
import sys
import unittest
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
from six import StringIO
from grit import util
from grit.format import android_xml
from grit.node import message
from grit.tool import build
class AndroidXmlUnittest(unittest.TestCase):
def testMessages(self):
root = util.ParseGrdForUnittest(r"""
<messages>
<message name="IDS_SIMPLE" desc="A vanilla string">
Martha
</message>
<message name="IDS_ONE_LINE" desc="On one line">sat and wondered</message>
<message name="IDS_QUOTES" desc="A string with quotation marks">
out loud, "Why don't I build a flying car?"
</message>
<message name="IDS_MULTILINE" desc="A string split over several lines">
She gathered
wood, charcoal, and
a sledge hammer.
</message>
<message name="IDS_WHITESPACE" desc="A string with extra whitespace.">
''' How old fashioned -- she thought. '''
</message>
<message name="IDS_PLACEHOLDERS" desc="A string with placeholders">
I'll buy a <ph name="WAVELENGTH">%d<ex>200</ex></ph> nm laser at <ph name="STORE_NAME">%s<ex>the grocery store</ex></ph>.
</message>
<message name="IDS_PLURALS" desc="A string using the ICU plural format">
{NUM_THINGS, plural,
=1 {Maybe I'll get one laser.}
other {Maybe I'll get # lasers.}}
</message>
<message name="IDS_PLURALS_NO_SPACE" desc="A string using the ICU plural format with no space">
{NUM_MISSISSIPPIS, plural,
=1{OneMississippi}other{ManyMississippis}}
</message>
</messages>
""")
buf = StringIO()
build.RcBuilder.ProcessNode(root, DummyOutput('android', 'en'), buf)
output = buf.getvalue()
expected = r"""
<?xml version="1.0" encoding="utf-8"?>
<resources xmlns:android="http://schemas.android.com/apk/res/android">
<string name="simple">"Martha"</string>
<string name="one_line">"sat and wondered"</string>
<string name="quotes">"out loud, \"Why don\'t I build a flying car?\""</string>
<string name="multiline">"She gathered
wood, charcoal, and
a sledge hammer."</string>
<string name="whitespace">" How old fashioned -- she thought. "</string>
<string name="placeholders">"I\'ll buy a %d nm laser at %s."</string>
<plurals name="plurals">
<item quantity="one">"Maybe I\'ll get one laser."</item>
<item quantity="other">"Maybe I\'ll get %d lasers."</item>
</plurals>
<plurals name="plurals_no_space">
<item quantity="one">"OneMississippi"</item>
<item quantity="other">"ManyMississippis"</item>
</plurals>
</resources>
"""
self.assertEqual(output.strip(), expected.strip())
def testConflictingPlurals(self):
root = util.ParseGrdForUnittest(r"""
<messages>
<message name="IDS_PLURALS" desc="A string using the ICU plural format">
{NUM_THINGS, plural,
=1 {Maybe I'll get one laser.}
one {Maybe I'll get one laser.}
other {Maybe I'll get # lasers.}}
</message>
</messages>
""")
buf = StringIO()
build.RcBuilder.ProcessNode(root, DummyOutput('android', 'en'), buf)
output = buf.getvalue()
expected = r"""
<?xml version="1.0" encoding="utf-8"?>
<resources xmlns:android="http://schemas.android.com/apk/res/android">
<plurals name="plurals">
<item quantity="one">"Maybe I\'ll get one laser."</item>
<item quantity="other">"Maybe I\'ll get %d lasers."</item>
</plurals>
</resources>
"""
self.assertEqual(output.strip(), expected.strip())
def testTaggedOnly(self):
root = util.ParseGrdForUnittest(r"""
<messages>
<message name="IDS_HELLO" desc="" formatter_data="android_java">
Hello
</message>
<message name="IDS_WORLD" desc="">
world
</message>
</messages>
""")
msg_hello, msg_world = root.GetChildrenOfType(message.MessageNode)
self.assertTrue(android_xml.ShouldOutputNode(msg_hello, tagged_only=True))
self.assertFalse(android_xml.ShouldOutputNode(msg_world, tagged_only=True))
self.assertTrue(android_xml.ShouldOutputNode(msg_hello, tagged_only=False))
self.assertTrue(android_xml.ShouldOutputNode(msg_world, tagged_only=False))
class DummyOutput(object):
def __init__(self, type, language):
self.type = type
self.language = language
def GetType(self):
return self.type
def GetLanguage(self):
return self.language
def GetOutputFilename(self):
return 'hello.gif'
if __name__ == '__main__':
unittest.main()

95
third_party/libwebrtc/tools/grit/grit/format/c_format.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,95 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Formats as a .C file for compilation.
"""
from __future__ import print_function
import codecs
import os
import re
import six
from grit import util
def _FormatHeader(root, output_dir):
"""Returns the required preamble for C files."""
# Find the location of the resource header file, so that we can include
# it.
resource_header = 'resource.h' # fall back to this
for output in root.GetOutputFiles():
if output.attrs['type'] == 'rc_header':
resource_header = os.path.abspath(output.GetOutputFilename())
resource_header = util.MakeRelativePath(output_dir, resource_header)
return """// This file is automatically generated by GRIT. Do not edit.
#include "%s"
// All strings are UTF-8
""" % (resource_header)
# end _FormatHeader() function
def Format(root, lang='en', output_dir='.'):
"""Outputs a C switch statement representing the string table."""
from grit.node import message
assert isinstance(lang, six.string_types)
yield _FormatHeader(root, output_dir)
yield 'const char* GetString(int id) {\n switch (id) {'
for item in root.ActiveDescendants():
with item:
if isinstance(item, message.MessageNode):
yield _FormatMessage(item, lang)
yield '\n default:\n return 0;\n }\n}\n'
def _HexToOct(match):
"Return the octal form of the hex numbers"
hex = match.group("hex")
result = ""
while len(hex):
next_num = int(hex[2:4], 16)
result += "\\" + '%03o' % next_num
hex = hex[4:]
return match.group("escaped_backslashes") + result
def _FormatMessage(item, lang):
"""Format a single <message> element."""
message = item.ws_at_start + item.Translate(lang) + item.ws_at_end
# Output message with non-ascii chars escaped as octal numbers C's grammar
# allows escaped hexadecimal numbers to be infinite, but octal is always of
# the form \OOO. Python 3 doesn't support string-escape, so we have to jump
# through some hoops here via codecs.escape_encode.
# This basically does:
# - message - the starting string
# - message.encode(...) - convert to bytes
# - codecs.escape_encode(...) - convert non-ASCII bytes to \x## escapes
# - (...).decode() - convert bytes back to a string
message = codecs.escape_encode(message.encode('utf-8'))[0].decode('utf-8')
# an escaped char is (\xHH)+ but only if the initial
# backslash is not escaped.
not_a_backslash = r"(^|[^\\])" # beginning of line or a non-backslash char
escaped_backslashes = not_a_backslash + r"(\\\\)*"
hex_digits = r"((\\x)[0-9a-f]{2})+"
two_digit_hex_num = re.compile(
r"(?P<escaped_backslashes>%s)(?P<hex>%s)"
% (escaped_backslashes, hex_digits))
message = two_digit_hex_num.sub(_HexToOct, message)
# unescape \ (convert \\ back to \)
message = message.replace('\\\\', '\\')
message = message.replace('"', '\\"')
message = util.LINEBREAKS.sub(r'\\n', message)
name_attr = item.GetTextualIds()[0]
return '\n case %s:\n return "%s";' % (name_attr, message)

Просмотреть файл

@ -0,0 +1,81 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittest for c_format.py.
"""
from __future__ import print_function
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
from six import StringIO
from grit import util
from grit.tool import build
class CFormatUnittest(unittest.TestCase):
def testMessages(self):
root = util.ParseGrdForUnittest(u"""
<messages>
<message name="IDS_QUESTIONS">Do you want to play questions?</message>
<message name="IDS_QUOTES">
"What's in a name, <ph name="NAME">%s<ex>Brandon</ex></ph>?"
</message>
<message name="IDS_LINE_BREAKS">
Was that rhetoric?
No.
Statement. Two all. Game point.
</message>
<message name="IDS_NON_ASCII">
\u00f5\\xc2\\xa4\\\u00a4\\\\xc3\\xb5\u4924
</message>
</messages>
""")
buf = StringIO()
build.RcBuilder.ProcessNode(root, DummyOutput('c_format', 'en'), buf)
output = util.StripBlankLinesAndComments(buf.getvalue())
self.assertEqual(u"""\
#include "resource.h"
const char* GetString(int id) {
switch (id) {
case IDS_QUESTIONS:
return "Do you want to play questions?";
case IDS_QUOTES:
return "\\"What\\'s in a name, %s?\\"";
case IDS_LINE_BREAKS:
return "Was that rhetoric?\\nNo.\\nStatement. Two all. Game point.";
case IDS_NON_ASCII:
return "\\303\\265\\xc2\\xa4\\\\302\\244\\\\xc3\\xb5\\344\\244\\244";
default:
return 0;
}
}""", output)
class DummyOutput(object):
def __init__(self, type, language):
self.type = type
self.language = language
def GetType(self):
return self.type
def GetLanguage(self):
return self.language
def GetOutputFilename(self):
return 'hello.gif'
if __name__ == '__main__':
unittest.main()

Просмотреть файл

@ -0,0 +1,59 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Formats as a .json file that can be used to localize Google Chrome
extensions."""
from __future__ import print_function
from json import JSONEncoder
from grit import constants
from grit.node import message
def Format(root, lang='en', output_dir='.'):
"""Format the messages as JSON."""
yield '{'
encoder = JSONEncoder(ensure_ascii=False)
format = '"%s":{"message":%s%s}'
placeholder_format = '"%i":{"content":"$%i"}'
first = True
for child in root.ActiveDescendants():
if isinstance(child, message.MessageNode):
id = child.attrs['name']
if id.startswith('IDR_') or id.startswith('IDS_'):
id = id[4:]
translation_missing = child.GetCliques()[0].clique.get(lang) is None;
if (child.ShouldFallbackToEnglish() and translation_missing and
lang != constants.FAKE_BIDI):
# Skip the string if it's not translated. Chrome will fallback
# to English automatically.
continue
loc_message = encoder.encode(child.ws_at_start + child.Translate(lang) +
child.ws_at_end)
# Replace $n place-holders with $n$ and add an appropriate "placeholders"
# entry. Note that chrome.i18n.getMessage only supports 9 placeholders:
# https://developer.chrome.com/extensions/i18n#method-getMessage
placeholders = ''
for i in range(1, 10):
if loc_message.find('$%d' % i) == -1:
break
loc_message = loc_message.replace('$%d' % i, '$%d$' % i)
if placeholders:
placeholders += ','
placeholders += placeholder_format % (i, i)
if not first:
yield ','
first = False
if placeholders:
placeholders = ',"placeholders":{%s}' % placeholders
yield format % (id, loc_message, placeholders)
yield '}'

Просмотреть файл

@ -0,0 +1,190 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittest for chrome_messages_json.py.
"""
from __future__ import print_function
import json
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
from six import StringIO
from grit import grd_reader
from grit import util
from grit.tool import build
class ChromeMessagesJsonFormatUnittest(unittest.TestCase):
# The default unittest diff limit is too low for our unittests.
# Allow the framework to show the full diff output all the time.
maxDiff = None
def testMessages(self):
root = util.ParseGrdForUnittest(u"""
<messages>
<message name="IDS_SIMPLE_MESSAGE">
Simple message.
</message>
<message name="IDS_QUOTES">
element\u2019s \u201c<ph name="NAME">%s<ex>name</ex></ph>\u201d attribute
</message>
<message name="IDS_PLACEHOLDERS">
<ph name="ERROR_COUNT">%1$d<ex>1</ex></ph> error, <ph name="WARNING_COUNT">%2$d<ex>1</ex></ph> warning
</message>
<message name="IDS_PLACEHOLDERS_SUBSTITUTED_BY_GETMESSAGE">
<ph name="BEGIN">$1<ex>a</ex></ph>test<ph name="END">$2<ex>b</ex></ph>
</message>
<message name="IDS_STARTS_WITH_SPACE">
''' (<ph name="COUNT">%d<ex>2</ex></ph>)
</message>
<message name="IDS_ENDS_WITH_SPACE">
(<ph name="COUNT">%d<ex>2</ex></ph>) '''
</message>
<message name="IDS_SPACE_AT_BOTH_ENDS">
''' (<ph name="COUNT">%d<ex>2</ex></ph>) '''
</message>
<message name="IDS_DOUBLE_QUOTES">
A "double quoted" message.
</message>
<message name="IDS_BACKSLASH">
\\
</message>
</messages>
""")
buf = StringIO()
build.RcBuilder.ProcessNode(root, DummyOutput('chrome_messages_json', 'en'),
buf)
output = buf.getvalue()
test = u"""
{
"SIMPLE_MESSAGE": {
"message": "Simple message."
},
"QUOTES": {
"message": "element\u2019s \u201c%s\u201d attribute"
},
"PLACEHOLDERS": {
"message": "%1$d error, %2$d warning"
},
"PLACEHOLDERS_SUBSTITUTED_BY_GETMESSAGE": {
"message": "$1$test$2$",
"placeholders": {
"1": {
"content": "$1"
},
"2": {
"content": "$2"
}
}
},
"STARTS_WITH_SPACE": {
"message": " (%d)"
},
"ENDS_WITH_SPACE": {
"message": "(%d) "
},
"SPACE_AT_BOTH_ENDS": {
"message": " (%d) "
},
"DOUBLE_QUOTES": {
"message": "A \\"double quoted\\" message."
},
"BACKSLASH": {
"message": "\\\\"
}
}
"""
self.assertEqual(json.loads(test), json.loads(output))
def testTranslations(self):
root = util.ParseGrdForUnittest("""
<messages>
<message name="ID_HELLO">Hello!</message>
<message name="ID_HELLO_USER">Hello <ph name="USERNAME">%s<ex>
Joi</ex></ph></message>
</messages>
""")
buf = StringIO()
build.RcBuilder.ProcessNode(root, DummyOutput('chrome_messages_json', 'fr'),
buf)
output = buf.getvalue()
test = u"""
{
"ID_HELLO": {
"message": "H\u00e9P\u00e9ll\u00f4P\u00f4!"
},
"ID_HELLO_USER": {
"message": "H\u00e9P\u00e9ll\u00f4P\u00f4 %s"
}
}
"""
self.assertEqual(json.loads(test), json.loads(output))
def testSkipMissingTranslations(self):
grd = """<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" current_release="3" source_lang_id="en"
base_dir="%s">
<outputs>
</outputs>
<release seq="3" allow_pseudo="False">
<messages fallback_to_english="true">
<message name="ID_HELLO_NO_TRANSLATION">Hello not translated</message>
</messages>
</release>
</grit>"""
root = grd_reader.Parse(StringIO(grd), dir=".")
buf = StringIO()
build.RcBuilder.ProcessNode(root, DummyOutput('chrome_messages_json', 'fr'),
buf)
output = buf.getvalue()
test = u'{}'
self.assertEqual(test, output)
def testVerifyMinification(self):
root = util.ParseGrdForUnittest(u"""
<messages>
<message name="IDS">
<ph name="BEGIN">$1<ex>a</ex></ph>test<ph name="END">$2<ex>b</ex></ph>
</message>
</messages>
""")
buf = StringIO()
build.RcBuilder.ProcessNode(root, DummyOutput('chrome_messages_json', 'en'),
buf)
output = buf.getvalue()
test = (u'{"IDS":{"message":"$1$test$2$","placeholders":'
u'{"1":{"content":"$1"},"2":{"content":"$2"}}}}')
self.assertEqual(test, output)
class DummyOutput(object):
def __init__(self, type, language):
self.type = type
self.language = language
def GetType(self):
return self.type
def GetLanguage(self):
return self.language
def GetOutputFilename(self):
return 'hello.gif'
if __name__ == '__main__':
unittest.main()

321
third_party/libwebrtc/tools/grit/grit/format/data_pack.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,321 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Support for formatting a data pack file used for platform agnostic resource
files.
"""
from __future__ import print_function
import collections
import os
import struct
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import six
from grit import util
from grit.node import include
from grit.node import message
from grit.node import structure
PACK_FILE_VERSION = 5
BINARY, UTF8, UTF16 = range(3)
GrdInfoItem = collections.namedtuple('GrdInfoItem',
['textual_id', 'id', 'path'])
class WrongFileVersion(Exception):
pass
class CorruptDataPack(Exception):
pass
class DataPackSizes(object):
def __init__(self, header, id_table, alias_table, data):
self.header = header
self.id_table = id_table
self.alias_table = alias_table
self.data = data
@property
def total(self):
return sum(v for v in self.__dict__.values())
def __iter__(self):
yield ('header', self.header)
yield ('id_table', self.id_table)
yield ('alias_table', self.alias_table)
yield ('data', self.data)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return self.__class__.__name__ + repr(self.__dict__)
class DataPackContents(object):
def __init__(self, resources, encoding, version, aliases, sizes):
# Map of resource_id -> str.
self.resources = resources
# Encoding (int).
self.encoding = encoding
# Version (int).
self.version = version
# Map of resource_id->canonical_resource_id
self.aliases = aliases
# DataPackSizes instance.
self.sizes = sizes
def Format(root, lang='en', output_dir='.'):
"""Writes out the data pack file format (platform agnostic resource file)."""
id_map = root.GetIdMap()
data = {}
root.info = []
for node in root.ActiveDescendants():
with node:
if isinstance(node, (include.IncludeNode, message.MessageNode,
structure.StructureNode)):
value = node.GetDataPackValue(lang, util.BINARY)
if value is not None:
resource_id = id_map[node.GetTextualIds()[0]]
data[resource_id] = value
root.info.append('{},{},{}'.format(
node.attrs.get('name'), resource_id, node.source))
return WriteDataPackToString(data, UTF8)
def ReadDataPack(input_file):
return ReadDataPackFromString(util.ReadFile(input_file, util.BINARY))
def ReadDataPackFromString(data):
"""Reads a data pack file and returns a dictionary."""
# Read the header.
version = struct.unpack('<I', data[:4])[0]
if version == 4:
resource_count, encoding = struct.unpack('<IB', data[4:9])
alias_count = 0
header_size = 9
elif version == 5:
encoding, resource_count, alias_count = struct.unpack('<BxxxHH', data[4:12])
header_size = 12
else:
raise WrongFileVersion('Found version: ' + str(version))
resources = {}
kIndexEntrySize = 2 + 4 # Each entry is a uint16 and a uint32.
def entry_at_index(idx):
offset = header_size + idx * kIndexEntrySize
return struct.unpack('<HI', data[offset:offset + kIndexEntrySize])
prev_resource_id, prev_offset = entry_at_index(0)
for i in range(1, resource_count + 1):
resource_id, offset = entry_at_index(i)
resources[prev_resource_id] = data[prev_offset:offset]
prev_resource_id, prev_offset = resource_id, offset
id_table_size = (resource_count + 1) * kIndexEntrySize
# Read the alias table.
kAliasEntrySize = 2 + 2 # uint16, uint16
def alias_at_index(idx):
offset = header_size + id_table_size + idx * kAliasEntrySize
return struct.unpack('<HH', data[offset:offset + kAliasEntrySize])
aliases = {}
for i in range(alias_count):
resource_id, index = alias_at_index(i)
aliased_id = entry_at_index(index)[0]
aliases[resource_id] = aliased_id
resources[resource_id] = resources[aliased_id]
alias_table_size = kAliasEntrySize * alias_count
sizes = DataPackSizes(
header_size, id_table_size, alias_table_size,
len(data) - header_size - id_table_size - alias_table_size)
assert sizes.total == len(data), 'original={} computed={}'.format(
len(data), sizes.total)
return DataPackContents(resources, encoding, version, aliases, sizes)
def WriteDataPackToString(resources, encoding):
"""Returns bytes with a map of id=>data in the data pack format."""
ret = []
# Compute alias map.
resource_ids = sorted(resources)
# Use reversed() so that for duplicates lower IDs clobber higher ones.
id_by_data = {resources[k]: k for k in reversed(resource_ids)}
# Map of resource_id -> resource_id, where value < key.
alias_map = {k: id_by_data[v] for k, v in resources.items()
if id_by_data[v] != k}
# Write file header.
resource_count = len(resources) - len(alias_map)
# Padding bytes added for alignment.
ret.append(struct.pack('<IBxxxHH', PACK_FILE_VERSION, encoding,
resource_count, len(alias_map)))
HEADER_LENGTH = 4 + 4 + 2 + 2
# Each main table entry is: uint16 + uint32 (and an extra entry at the end).
# Each alias table entry is: uint16 + uint16.
data_offset = HEADER_LENGTH + (resource_count + 1) * 6 + len(alias_map) * 4
# Write main table.
index_by_id = {}
deduped_data = []
index = 0
for resource_id in resource_ids:
if resource_id in alias_map:
continue
data = resources[resource_id]
if isinstance(data, six.text_type):
data = data.encode('utf-8')
index_by_id[resource_id] = index
ret.append(struct.pack('<HI', resource_id, data_offset))
data_offset += len(data)
deduped_data.append(data)
index += 1
assert index == resource_count
# Add an extra entry at the end.
ret.append(struct.pack('<HI', 0, data_offset))
# Write alias table.
for resource_id in sorted(alias_map):
index = index_by_id[alias_map[resource_id]]
ret.append(struct.pack('<HH', resource_id, index))
# Write data.
ret.extend(deduped_data)
return b''.join(ret)
def WriteDataPack(resources, output_file, encoding):
"""Writes a map of id=>data into output_file as a data pack."""
content = WriteDataPackToString(resources, encoding)
with open(output_file, 'wb') as file:
file.write(content)
def ReadGrdInfo(grd_file):
info_dict = {}
with open(grd_file + '.info', 'rt') as f:
for line in f:
item = GrdInfoItem._make(line.strip().split(','))
info_dict[int(item.id)] = item
return info_dict
def RePack(output_file, input_files, whitelist_file=None,
suppress_removed_key_output=False,
output_info_filepath=None):
"""Write a new data pack file by combining input pack files.
Args:
output_file: path to the new data pack file.
input_files: a list of paths to the data pack files to combine.
whitelist_file: path to the file that contains the list of resource IDs
that should be kept in the output file or None to include
all resources.
suppress_removed_key_output: allows the caller to suppress the output from
RePackFromDataPackStrings.
output_info_file: If not None, specify the output .info filepath.
Raises:
KeyError: if there are duplicate keys or resource encoding is
inconsistent.
"""
input_data_packs = [ReadDataPack(filename) for filename in input_files]
input_info_files = [filename + '.info' for filename in input_files]
whitelist = None
if whitelist_file:
lines = util.ReadFile(whitelist_file, 'utf-8').strip().splitlines()
if not lines:
raise Exception('Whitelist file should not be empty')
whitelist = set(int(x) for x in lines)
inputs = [(p.resources, p.encoding) for p in input_data_packs]
resources, encoding = RePackFromDataPackStrings(
inputs, whitelist, suppress_removed_key_output)
WriteDataPack(resources, output_file, encoding)
if output_info_filepath is None:
output_info_filepath = output_file + '.info'
with open(output_info_filepath, 'w') as output_info_file:
for filename in input_info_files:
with open(filename, 'r') as info_file:
output_info_file.writelines(info_file.readlines())
def RePackFromDataPackStrings(inputs, whitelist,
suppress_removed_key_output=False):
"""Combines all inputs into one.
Args:
inputs: a list of (resources_by_id, encoding) tuples to be combined.
whitelist: a list of resource IDs that should be kept in the output string
or None to include all resources.
suppress_removed_key_output: Do not print removed keys.
Returns:
Returns (resources_by_id, encoding).
Raises:
KeyError: if there are duplicate keys or resource encoding is
inconsistent.
"""
resources = {}
encoding = None
for input_resources, input_encoding in inputs:
# Make sure we have no dups.
duplicate_keys = set(input_resources.keys()) & set(resources.keys())
if duplicate_keys:
raise KeyError('Duplicate keys: ' + str(list(duplicate_keys)))
# Make sure encoding is consistent.
if encoding in (None, BINARY):
encoding = input_encoding
elif input_encoding not in (BINARY, encoding):
raise KeyError('Inconsistent encodings: ' + str(encoding) +
' vs ' + str(input_encoding))
if whitelist:
whitelisted_resources = dict([(key, input_resources[key])
for key in input_resources.keys()
if key in whitelist])
resources.update(whitelisted_resources)
removed_keys = [key for key in input_resources.keys()
if key not in whitelist]
if not suppress_removed_key_output:
for key in removed_keys:
print('RePackFromDataPackStrings Removed Key:', key)
else:
resources.update(input_resources)
# Encoding is 0 for BINARY, 1 for UTF8 and 2 for UTF16
if encoding is None:
encoding = BINARY
return resources, encoding
def main():
# Write a simple file.
data = {1: '', 4: 'this is id 4', 6: 'this is id 6', 10: ''}
WriteDataPack(data, 'datapack1.pak', UTF8)
data2 = {1000: 'test', 5: 'five'}
WriteDataPack(data2, 'datapack2.pak', UTF8)
print('wrote datapack1 and datapack2 to current directory.')
if __name__ == '__main__':
main()

Просмотреть файл

@ -0,0 +1,102 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grit.format.data_pack'''
from __future__ import print_function
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
from grit.format import data_pack
class FormatDataPackUnittest(unittest.TestCase):
def testReadDataPackV4(self):
expected_data = (
b'\x04\x00\x00\x00' # header(version
b'\x04\x00\x00\x00' # no. entries,
b'\x01' # encoding)
b'\x01\x00\x27\x00\x00\x00' # index entry 1
b'\x04\x00\x27\x00\x00\x00' # index entry 4
b'\x06\x00\x33\x00\x00\x00' # index entry 6
b'\x0a\x00\x3f\x00\x00\x00' # index entry 10
b'\x00\x00\x3f\x00\x00\x00' # extra entry for the size of last
b'this is id 4this is id 6') # data
expected_data_pack = data_pack.DataPackContents(
{
1: b'',
4: b'this is id 4',
6: b'this is id 6',
10: b'',
}, data_pack.UTF8, 4, {}, data_pack.DataPackSizes(9, 30, 0, 24))
loaded = data_pack.ReadDataPackFromString(expected_data)
self.assertDictEqual(expected_data_pack.__dict__, loaded.__dict__)
def testReadWriteDataPackV5(self):
expected_data = (
b'\x05\x00\x00\x00' # version
b'\x01\x00\x00\x00' # encoding & padding
b'\x03\x00' # resource_count
b'\x01\x00' # alias_count
b'\x01\x00\x28\x00\x00\x00' # index entry 1
b'\x04\x00\x28\x00\x00\x00' # index entry 4
b'\x06\x00\x34\x00\x00\x00' # index entry 6
b'\x00\x00\x40\x00\x00\x00' # extra entry for the size of last
b'\x0a\x00\x01\x00' # alias table
b'this is id 4this is id 6') # data
input_resources = {
1: b'',
4: b'this is id 4',
6: b'this is id 6',
10: b'this is id 4',
}
data = data_pack.WriteDataPackToString(input_resources, data_pack.UTF8)
self.assertEquals(data, expected_data)
expected_data_pack = data_pack.DataPackContents({
1: b'',
4: input_resources[4],
6: input_resources[6],
10: input_resources[4],
}, data_pack.UTF8, 5, {10: 4}, data_pack.DataPackSizes(12, 24, 4, 24))
loaded = data_pack.ReadDataPackFromString(expected_data)
self.assertDictEqual(expected_data_pack.__dict__, loaded.__dict__)
def testRePackUnittest(self):
expected_with_whitelist = {
1: 'Never gonna', 10: 'give you up', 20: 'Never gonna let',
30: 'you down', 40: 'Never', 50: 'gonna run around and',
60: 'desert you'}
expected_without_whitelist = {
1: 'Never gonna', 10: 'give you up', 20: 'Never gonna let', 65: 'Close',
30: 'you down', 40: 'Never', 50: 'gonna run around and', 4: 'click',
60: 'desert you', 6: 'chirr', 32: 'oops, try again', 70: 'Awww, snap!'}
inputs = [{1: 'Never gonna', 4: 'click', 6: 'chirr', 10: 'give you up'},
{20: 'Never gonna let', 30: 'you down', 32: 'oops, try again'},
{40: 'Never', 50: 'gonna run around and', 60: 'desert you'},
{65: 'Close', 70: 'Awww, snap!'}]
whitelist = [1, 10, 20, 30, 40, 50, 60]
inputs = [(i, data_pack.UTF8) for i in inputs]
# RePack using whitelist
output, _ = data_pack.RePackFromDataPackStrings(
inputs, whitelist, suppress_removed_key_output=True)
self.assertDictEqual(expected_with_whitelist, output,
'Incorrect resource output')
# RePack a None whitelist
output, _ = data_pack.RePackFromDataPackStrings(
inputs, None, suppress_removed_key_output=True)
self.assertDictEqual(expected_without_whitelist, output,
'Incorrect resource output')
if __name__ == '__main__':
unittest.main()

Просмотреть файл

@ -0,0 +1,144 @@
#!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
A tool to generate a predetermined resource ids file that can be used as an
input to grit via the -p option. This is meant to be run manually every once in
a while and its output checked in. See tools/gritsettings/README.md for details.
"""
from __future__ import print_function
import os
import re
import sys
# Regular expression for parsing the #define macro format. Matches both the
# version of the macro with whitelist support and the one without. For example,
# Without generate whitelist flag:
# #define IDS_FOO_MESSAGE 1234
# With generate whitelist flag:
# #define IDS_FOO_MESSAGE (::ui::WhitelistedResource<1234>(), 1234)
RESOURCE_EXTRACT_REGEX = re.compile(r'^#define (\S*).* (\d+)\)?$', re.MULTILINE)
ORDERED_RESOURCE_IDS_REGEX = re.compile(r'^Resource=(\d*)$', re.MULTILINE)
def _GetResourceNameIdPairsIter(string_to_scan):
"""Gets an iterator of the resource name and id pairs of the given string.
Scans the input string for lines of the form "#define NAME ID" and returns
an iterator over all matching (NAME, ID) pairs.
Args:
string_to_scan: The input string to scan.
Yields:
A tuple of name and id.
"""
for match in RESOURCE_EXTRACT_REGEX.finditer(string_to_scan):
yield match.group(1, 2)
def _ReadOrderedResourceIds(path):
"""Reads ordered resource ids from the given file.
The resources are expected to be of the format produced by running Chrome
with --print-resource-ids command line.
Args:
path: File path to read resource ids from.
Returns:
An array of ordered resource ids.
"""
ordered_resource_ids = []
with open(path, "r") as f:
for match in ORDERED_RESOURCE_IDS_REGEX.finditer(f.read()):
ordered_resource_ids.append(int(match.group(1)))
return ordered_resource_ids
def GenerateResourceMapping(original_resources, ordered_resource_ids):
"""Generates a resource mapping from the ordered ids and the original mapping.
The returned dict will assign new ids to ordered_resource_ids numerically
increasing from 101.
Args:
original_resources: A dict of original resource ids to resource names.
ordered_resource_ids: An array of ordered resource ids.
Returns:
A dict of resource ids to resource names.
"""
output_resource_map = {}
# 101 is used as the starting value since other parts of GRIT require it to be
# the minimum (e.g. rc_header.py) based on Windows resource numbering.
next_id = 101
for original_id in ordered_resource_ids:
resource_name = original_resources[original_id]
output_resource_map[next_id] = resource_name
next_id += 1
return output_resource_map
def ReadResourceIdsFromFile(file, original_resources):
"""Reads resource ids from a GRIT-produced header file.
Args:
file: File to a GRIT-produced header file to read from.
original_resources: Dict of resource ids to resource names to add to.
"""
for resource_name, resource_id in _GetResourceNameIdPairsIter(file.read()):
original_resources[int(resource_id)] = resource_name
def _ReadOriginalResourceIds(out_dir):
"""Reads resource ids from GRIT header files in the specified directory.
Args:
out_dir: A Chrome build output directory (e.g. out/gn) to scan.
Returns:
A dict of resource ids to resource names.
"""
original_resources = {}
for root, dirnames, filenames in os.walk(out_dir + '/gen'):
for filename in filenames:
if filename.endswith(('_resources.h', '_settings.h', '_strings.h')):
with open(os.path.join(root, filename), "r") as f:
ReadResourceIdsFromFile(f, original_resources)
return original_resources
def _GeneratePredeterminedIdsFile(ordered_resources_file, out_dir):
"""Generates a predetermined ids file.
Args:
ordered_resources_file: File path to read ordered resource ids from.
out_dir: A Chrome build output directory (e.g. out/gn) to scan.
Returns:
A dict of resource ids to resource names.
"""
original_resources = _ReadOriginalResourceIds(out_dir)
ordered_resource_ids = _ReadOrderedResourceIds(ordered_resources_file)
output_resource_map = GenerateResourceMapping(original_resources,
ordered_resource_ids)
for res_id in sorted(output_resource_map.keys()):
print(output_resource_map[res_id], res_id)
def main(argv):
if len(argv) != 2:
print("usage: gen_predetermined_ids.py <ordered_resources_file> <out_dir>")
sys.exit(1)
ordered_resources_file, out_dir = argv[0], argv[1]
_GeneratePredeterminedIdsFile(ordered_resources_file, out_dir)
if '__main__' == __name__:
main(sys.argv[1:])

Просмотреть файл

@ -0,0 +1,46 @@
#!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for the gen_predetermined_ids module.'''
from __future__ import print_function
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
from six import StringIO
from grit.format import gen_predetermined_ids
class GenPredeterminedIdsUnittest(unittest.TestCase):
def testGenerateResourceMapping(self):
original_resources = {200: 'A', 201: 'B', 300: 'C', 350: 'D', 370: 'E'}
ordered_resource_ids = [300, 201, 370]
mapping = gen_predetermined_ids.GenerateResourceMapping(
original_resources, ordered_resource_ids)
self.assertEqual({101: 'C', 102: 'B', 103: 'E'}, mapping)
def testReadResourceIdsFromFile(self):
f = StringIO('''
// This file is automatically generated by GRIT. Do not edit.
#pragma once
#define IDS_BOOKMARKS_NO_ITEMS 12500
#define IDS_BOOKMARK_BAR_IMPORT_LINK (::ui::WhitelistedResource<12501>(), 12501)
#define IDS_BOOKMARK_X (::ui::WhitelistedResource<12502>(), 12502)
''')
resources = {}
gen_predetermined_ids.ReadResourceIdsFromFile(f, resources)
self.assertEqual({12500: 'IDS_BOOKMARKS_OPEN_ALL',
12501: 'IDS_BOOKMARKS_OPEN_ALL_INCOGNITO',
12502: 'IDS_BOOKMARK_X'}, resources)
if __name__ == '__main__':
unittest.main()

46
third_party/libwebrtc/tools/grit/grit/format/gzip_string.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,46 @@
# Copyright (c) 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides gzip utilities for strings.
"""
from __future__ import print_function
import gzip
import io
import subprocess
def GzipStringRsyncable(data):
# Make call to host system's gzip to get access to --rsyncable option. This
# option makes updates much smaller - if one line is changed in the resource,
# it won't have to push the entire compressed resource with the update.
# Instead, --rsyncable breaks the file into small chunks, so that one doesn't
# affect the other in compression, and then only that chunk will have to be
# updated.
gzip_proc = subprocess.Popen(['gzip', '--stdout', '--rsyncable',
'--best', '--no-name'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
data, stderr = gzip_proc.communicate(data)
if gzip_proc.returncode != 0:
raise subprocess.CalledProcessError(gzip_proc.returncode, 'gzip',
stderr)
return data
def GzipString(data):
# Gzipping using Python's built in gzip: Windows doesn't ship with gzip, and
# OSX's gzip does not have an --rsyncable option built in. Although this is
# not preferable to --rsyncable, it is an option for the systems that do
# not have --rsyncable. If used over GzipStringRsyncable, the primary
# difference of this function's compression will be larger updates every time
# a compressed resource is changed.
gzip_output = io.BytesIO()
with gzip.GzipFile(mode='wb', compresslevel=9, fileobj=gzip_output,
mtime=0) as gzip_file:
gzip_file.write(data)
data = gzip_output.getvalue()
gzip_output.close()
return data

Просмотреть файл

@ -0,0 +1,65 @@
#!/usr/bin/env python
# Copyright (c) 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grit.format.gzip_string'''
from __future__ import print_function
import gzip
import io
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
from grit.format import gzip_string
class FormatGzipStringUnittest(unittest.TestCase):
def testGzipStringRsyncable(self):
# Can only test the rsyncable version on platforms which support rsyncable,
# which at the moment is Linux.
if sys.platform == 'linux2':
header_begin = (b'\x1f\x8b') # gzip first two bytes
input = (b'TEST STRING STARTING NOW'
b'continuing'
b'<even more>'
b'<finished NOW>')
compressed = gzip_string.GzipStringRsyncable(input)
self.failUnless(header_begin == compressed[:2])
compressed_file = io.BytesIO()
compressed_file.write(compressed)
compressed_file.seek(0)
with gzip.GzipFile(mode='rb', fileobj=compressed_file) as f:
output = f.read()
self.failUnless(output == input)
def testGzipString(self):
header_begin = b'\x1f\x8b' # gzip first two bytes
input = (b'TEST STRING STARTING NOW'
b'continuing'
b'<even more>'
b'<finished NOW>')
compressed = gzip_string.GzipString(input)
self.failUnless(header_begin == compressed[:2])
compressed_file = io.BytesIO()
compressed_file.write(compressed)
compressed_file.seek(0)
with gzip.GzipFile(mode='rb', fileobj=compressed_file) as f:
output = f.read()
self.failUnless(output == input)
if __name__ == '__main__':
unittest.main()

602
third_party/libwebrtc/tools/grit/grit/format/html_inline.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,602 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Flattens a HTML file by inlining its external resources.
This is a small script that takes a HTML file, looks for src attributes
and inlines the specified file, producing one HTML file with no external
dependencies. It recursively inlines the included files.
"""
from __future__ import print_function
import os
import re
import sys
import base64
import mimetypes
from grit import lazy_re
from grit import util
from grit.format import minifier
# There is a python bug that makes mimetypes crash if the Windows
# registry contains non-Latin keys ( http://bugs.python.org/issue9291
# ). Initing manually and blocking external mime-type databases will
# prevent that bug and if we add svg manually, it will still give us
# the data we need.
mimetypes.init([])
mimetypes.add_type('image/svg+xml', '.svg')
# webm video type is not always available if mimetype package is outdated.
mimetypes.add_type('video/webm', '.webm')
DIST_DEFAULT = 'chromium'
DIST_ENV_VAR = 'CHROMIUM_BUILD'
DIST_SUBSTR = '%DISTRIBUTION%'
# Matches beginning of an "if" block.
_BEGIN_IF_BLOCK = lazy_re.compile(
r'<if [^>]*?expr=("(?P<expr1>[^">]*)"|\'(?P<expr2>[^\'>]*)\')[^>]*?>')
# Matches ending of an "if" block.
_END_IF_BLOCK = lazy_re.compile(r'</if>')
# Used by DoInline to replace various links with inline content.
_STYLESHEET_RE = lazy_re.compile(
r'<link rel="stylesheet"[^>]+?href="(?P<filename>[^"]*)".*?>(\s*</link>)?',
re.DOTALL)
_INCLUDE_RE = lazy_re.compile(
r'(?P<comment>\/\/ )?<include[^>]+?'
r'src=("(?P<file1>[^">]*)"|\'(?P<file2>[^\'>]*)\').*?>(\s*</include>)?',
re.DOTALL)
_SRC_RE = lazy_re.compile(
r'<(?!script)(?:[^>]+?\s)src="(?!\[\[|{{)(?P<filename>[^"\']*)"',
re.MULTILINE)
# This re matches '<img srcset="..."' or '<source srcset="..."'
_SRCSET_RE = lazy_re.compile(
r'<(img|source)\b(?:[^>]*?\s)srcset="(?!\[\[|{{|\$i18n{)'
r'(?P<srcset>[^"\']*)"',
re.MULTILINE)
# This re is for splitting srcset value string into "image candidate strings".
# Notes:
# - HTML 5.2 states that URL cannot start or end with comma.
# - the "descriptor" is either "width descriptor" or "pixel density descriptor".
# The first one consists of "valid non-negative integer + letter 'x'",
# the second one is formed of "positive valid floating-point number +
# letter 'w'". As a reasonable compromise, we match a list of characters
# that form both of them.
# Matches for example "img2.png 2x" or "img9.png 11E-2w".
_SRCSET_ENTRY_RE = lazy_re.compile(
r'\s*(?P<url>[^,\s]\S+[^,\s])'
r'(?:\s+(?P<descriptor>[\deE.-]+[wx]))?\s*'
r'(?P<separator>,|$)',
re.MULTILINE)
_ICON_RE = lazy_re.compile(
r'<link rel="icon"\s(?:[^>]+?\s)?'
r'href=(?P<quote>")(?P<filename>[^"\']*)\1',
re.MULTILINE)
def GetDistribution():
"""Helper function that gets the distribution we are building.
Returns:
string
"""
distribution = DIST_DEFAULT
if DIST_ENV_VAR in os.environ:
distribution = os.environ[DIST_ENV_VAR]
if len(distribution) > 1 and distribution[0] == '_':
distribution = distribution[1:].lower()
return distribution
def ConvertFileToDataURL(filename, base_path, distribution, inlined_files,
names_only):
"""Convert filename to inlined data URI.
Takes a filename from ether "src" or "srcset", and attempts to read the file
at 'filename'. Returns data URI as string with given file inlined.
If it finds DIST_SUBSTR string in file name, replaces it with distribution.
If filename contains ':', it is considered URL and not translated.
Args:
filename: filename string from ether src or srcset attributes.
base_path: path that to look for files in
distribution: string that should replace DIST_SUBSTR
inlined_files: The name of the opened file is appended to this list.
names_only: If true, the function will not read the file but just return "".
It will still add the filename to |inlined_files|.
Returns:
string
"""
if filename.find(':') != -1:
# filename is probably a URL, which we don't want to bother inlining
return filename
filename = filename.replace(DIST_SUBSTR , distribution)
filepath = os.path.normpath(os.path.join(base_path, filename))
inlined_files.add(filepath)
if names_only:
return ""
mimetype = mimetypes.guess_type(filename)[0]
if mimetype is None:
raise Exception('%s is of an an unknown type and '
'cannot be stored in a data url.' % filename)
inline_data = base64.standard_b64encode(util.ReadFile(filepath, util.BINARY))
return 'data:%s;base64,%s' % (mimetype, inline_data.decode('utf-8'))
def SrcInlineAsDataURL(
src_match, base_path, distribution, inlined_files, names_only=False,
filename_expansion_function=None):
"""regex replace function.
Takes a regex match for src="filename", attempts to read the file
at 'filename' and returns the src attribute with the file inlined
as a data URI. If it finds DIST_SUBSTR string in file name, replaces
it with distribution.
Args:
src_match: regex match object with 'filename' named capturing group
base_path: path that to look for files in
distribution: string that should replace DIST_SUBSTR
inlined_files: The name of the opened file is appended to this list.
names_only: If true, the function will not read the file but just return "".
It will still add the filename to |inlined_files|.
Returns:
string
"""
filename = src_match.group('filename')
if filename_expansion_function:
filename = filename_expansion_function(filename)
data_url = ConvertFileToDataURL(filename, base_path, distribution,
inlined_files, names_only)
if not data_url:
return data_url
prefix = src_match.string[src_match.start():src_match.start('filename')]
suffix = src_match.string[src_match.end('filename'):src_match.end()]
return prefix + data_url + suffix
def SrcsetInlineAsDataURL(
srcset_match, base_path, distribution, inlined_files, names_only=False,
filename_expansion_function=None):
"""regex replace function to inline files in srcset="..." attributes
Takes a regex match for srcset="filename 1x, filename 2x, ...", attempts to
read the files referenced by filenames and returns the srcset attribute with
the files inlined as a data URI. If it finds DIST_SUBSTR string in file name,
replaces it with distribution.
Args:
srcset_match: regex match object with 'srcset' named capturing group
base_path: path that to look for files in
distribution: string that should replace DIST_SUBSTR
inlined_files: The name of the opened file is appended to this list.
names_only: If true, the function will not read the file but just return "".
It will still add the filename to |inlined_files|.
Returns:
string
"""
srcset = srcset_match.group('srcset')
if not srcset:
return srcset_match.group(0)
# HTML 5.2 defines srcset as a list of "image candidate strings".
# Each of them consists of URL and descriptor.
# _SRCSET_ENTRY_RE splits srcset into a list of URLs, descriptors and
# commas.
# The descriptor part will be None if that optional regex didn't match
parts = _SRCSET_ENTRY_RE.split(srcset)
if not parts:
return srcset_match.group(0)
# List of image candidate strings that will form new srcset="..."
new_candidates = []
# When iterating over split srcset we fill this parts of a single image
# candidate string: [url, descriptor]
candidate = [];
# Each entry should consist of some text before the entry, the url,
# the descriptor or None if the entry has no descriptor, a comma separator or
# the end of the line, and finally some text after the entry (which is the
# same as the text before the next entry).
for i in range(0, len(parts) - 1, 4):
before, url, descriptor, separator, after = parts[i:i+5]
# There must be a comma-separated next entry or this must be the last entry.
assert separator == "," or (separator == "" and i == len(parts) - 5), (
"Bad srcset format in {}".format(srcset_match.group(0)))
# Both before and after the entry must be empty
assert before == after == "", (
"Bad srcset format in {}".format(srcset_match.group(0)))
if filename_expansion_function:
filename = filename_expansion_function(url)
else:
filename = url
data_url = ConvertFileToDataURL(filename, base_path, distribution,
inlined_files, names_only)
# This is not "names_only" mode
if data_url:
candidate = [data_url]
if descriptor:
candidate.append(descriptor)
new_candidates.append(" ".join(candidate))
prefix = srcset_match.string[srcset_match.start():
srcset_match.start('srcset')]
suffix = srcset_match.string[srcset_match.end('srcset'):srcset_match.end()]
return prefix + ','.join(new_candidates) + suffix
class InlinedData:
"""Helper class holding the results from DoInline().
Holds the inlined data and the set of filenames of all the inlined
files.
"""
def __init__(self, inlined_data, inlined_files):
self.inlined_data = inlined_data
self.inlined_files = inlined_files
def DoInline(
input_filename, grd_node, allow_external_script=False,
preprocess_only=False, names_only=False, strip_whitespace=False,
rewrite_function=None, filename_expansion_function=None):
"""Helper function that inlines the resources in a specified file.
Reads input_filename, finds all the src attributes and attempts to
inline the files they are referring to, then returns the result and
the set of inlined files.
Args:
input_filename: name of file to read in
grd_node: html node from the grd file for this include tag
preprocess_only: Skip all HTML processing, only handle <if> and <include>.
names_only: |nil| will be returned for the inlined contents (faster).
strip_whitespace: remove whitespace and comments in the input files.
rewrite_function: function(filepath, text, distribution) which will be
called to rewrite html content before inlining images.
filename_expansion_function: function(filename) which will be called to
rewrite filenames before attempting to read them.
Returns:
a tuple of the inlined data as a string and the set of filenames
of all the inlined files
"""
if filename_expansion_function:
input_filename = filename_expansion_function(input_filename)
input_filepath = os.path.dirname(input_filename)
distribution = GetDistribution()
# Keep track of all the files we inline.
inlined_files = set()
def SrcReplace(src_match, filepath=input_filepath,
inlined_files=inlined_files):
"""Helper function to provide SrcInlineAsDataURL with the base file path"""
return SrcInlineAsDataURL(
src_match, filepath, distribution, inlined_files, names_only=names_only,
filename_expansion_function=filename_expansion_function)
def SrcsetReplace(srcset_match, filepath=input_filepath,
inlined_files=inlined_files):
"""Helper function to provide SrcsetInlineAsDataURL with the base file
path.
"""
return SrcsetInlineAsDataURL(
srcset_match, filepath, distribution, inlined_files,
names_only=names_only,
filename_expansion_function=filename_expansion_function)
def GetFilepath(src_match, base_path = input_filepath):
filename = [v for k, v in src_match.groupdict().items()
if k.startswith('file') and v][0]
if filename.find(':') != -1:
# filename is probably a URL, which we don't want to bother inlining
return None
filename = filename.replace('%DISTRIBUTION%', distribution)
if filename_expansion_function:
filename = filename_expansion_function(filename)
return os.path.normpath(os.path.join(base_path, filename))
def IsConditionSatisfied(src_match):
expr1 = src_match.group('expr1') or ''
expr2 = src_match.group('expr2') or ''
return grd_node is None or grd_node.EvaluateCondition(expr1 + expr2)
def CheckConditionalElements(str):
"""Helper function to conditionally inline inner elements"""
while True:
begin_if = _BEGIN_IF_BLOCK.search(str)
if begin_if is None:
if _END_IF_BLOCK.search(str) is not None:
raise Exception('Unmatched </if>')
return str
condition_satisfied = IsConditionSatisfied(begin_if)
leading = str[0:begin_if.start()]
content_start = begin_if.end()
# Find matching "if" block end.
count = 1
pos = begin_if.end()
while True:
end_if = _END_IF_BLOCK.search(str, pos)
if end_if is None:
raise Exception('Unmatched <if>')
next_if = _BEGIN_IF_BLOCK.search(str, pos)
if next_if is None or next_if.start() >= end_if.end():
count = count - 1
if count == 0:
break
pos = end_if.end()
else:
count = count + 1
pos = next_if.end()
content = str[content_start:end_if.start()]
trailing = str[end_if.end():]
if condition_satisfied:
str = leading + CheckConditionalElements(content) + trailing
else:
str = leading + trailing
def InlineFileContents(src_match,
pattern,
inlined_files=inlined_files,
strip_whitespace=False):
"""Helper function to inline external files of various types"""
filepath = GetFilepath(src_match)
if filepath is None:
return src_match.group(0)
inlined_files.add(filepath)
if names_only:
inlined_files.update(GetResourceFilenames(
filepath,
grd_node,
allow_external_script,
rewrite_function,
filename_expansion_function=filename_expansion_function))
return ""
# To recursively save inlined files, we need InlinedData instance returned
# by DoInline.
inlined_data_inst=DoInline(filepath, grd_node,
allow_external_script=allow_external_script,
preprocess_only=preprocess_only,
strip_whitespace=strip_whitespace,
filename_expansion_function=filename_expansion_function)
inlined_files.update(inlined_data_inst.inlined_files)
return pattern % inlined_data_inst.inlined_data;
def InlineIncludeFiles(src_match):
"""Helper function to directly inline generic external files (without
wrapping them with any kind of tags).
"""
return InlineFileContents(src_match, '%s')
def InlineScript(match):
"""Helper function to inline external script files"""
attrs = (match.group('attrs1') + match.group('attrs2')).strip()
if attrs:
attrs = ' ' + attrs
return InlineFileContents(match, '<script' + attrs + '>%s</script>',
strip_whitespace=True)
def InlineCSSText(text, css_filepath):
"""Helper function that inlines external resources in CSS text"""
filepath = os.path.dirname(css_filepath)
# Allow custom modifications before inlining images.
if rewrite_function:
text = rewrite_function(filepath, text, distribution)
text = InlineCSSImages(text, filepath)
return InlineCSSImports(text, filepath)
def InlineCSSFile(src_match, pattern, base_path=input_filepath):
"""Helper function to inline external CSS files.
Args:
src_match: A regular expression match with a named group named "filename".
pattern: The pattern to replace with the contents of the CSS file.
base_path: The base path to use for resolving the CSS file.
Returns:
The text that should replace the reference to the CSS file.
"""
filepath = GetFilepath(src_match, base_path)
if filepath is None:
return src_match.group(0)
# Even if names_only is set, the CSS file needs to be opened, because it
# can link to images that need to be added to the file set.
inlined_files.add(filepath)
# Inline stylesheets included in this css file.
text = _INCLUDE_RE.sub(InlineIncludeFiles, util.ReadFile(filepath, 'utf-8'))
# When resolving CSS files we need to pass in the path so that relative URLs
# can be resolved.
return pattern % InlineCSSText(text, filepath)
def GetUrlRegexString(postfix=''):
"""Helper function that returns a string for a regex that matches url('')
but not url([[ ]]) or url({{ }}). Appends |postfix| to group names.
"""
url_re = (r'url\((?!\[\[|{{)(?P<q%s>"|\'|)(?P<filename%s>[^"\'()]*)'
r'(?P=q%s)\)')
return url_re % (postfix, postfix, postfix)
def InlineCSSImages(text, filepath=input_filepath):
"""Helper function that inlines external images in CSS backgrounds."""
# Replace contents of url() for css attributes: content, background,
# or *-image.
property_re = r'(content|background|[\w-]*-image):[^;]*'
# Replace group names to prevent duplicates when forming value_re.
image_set_value_re = (r'image-set\(([ ]*' + GetUrlRegexString('2') +
r'[ ]*[0-9.]*x[ ]*(,[ ]*)?)+\)')
value_re = '(%s|%s)' % (GetUrlRegexString(), image_set_value_re)
css_re = property_re + value_re
return re.sub(css_re, lambda m: InlineCSSUrls(m, filepath), text)
def InlineCSSUrls(src_match, filepath=input_filepath):
"""Helper function that inlines each url on a CSS image rule match."""
# Replace contents of url() references in matches.
return re.sub(GetUrlRegexString(),
lambda m: SrcReplace(m, filepath),
src_match.group(0))
def InlineCSSImports(text, filepath=input_filepath):
"""Helper function that inlines CSS files included via the @import
directive.
"""
return re.sub(r'@import\s+' + GetUrlRegexString() + r';',
lambda m: InlineCSSFile(m, '%s', filepath),
text)
flat_text = util.ReadFile(input_filename, 'utf-8')
# Check conditional elements, remove unsatisfied ones from the file. We do
# this twice. The first pass is so that we don't even bother calling
# InlineScript, InlineCSSFile and InlineIncludeFiles on text we're eventually
# going to throw out anyway.
flat_text = CheckConditionalElements(flat_text)
flat_text = _INCLUDE_RE.sub(InlineIncludeFiles, flat_text)
if not preprocess_only:
if strip_whitespace:
flat_text = minifier.Minify(flat_text.encode('utf-8'),
input_filename).decode('utf-8')
if not allow_external_script:
# We need to inline css and js before we inline images so that image
# references gets inlined in the css and js
flat_text = re.sub(r'<script (?P<attrs1>.*?)src="(?P<filename>[^"\']*)"'
r'(?P<attrs2>.*?)></script>',
InlineScript,
flat_text)
flat_text = _STYLESHEET_RE.sub(
lambda m: InlineCSSFile(m, '<style>%s</style>'),
flat_text)
# Check conditional elements, second pass. This catches conditionals in any
# of the text we just inlined.
flat_text = CheckConditionalElements(flat_text)
# Allow custom modifications before inlining images.
if rewrite_function:
flat_text = rewrite_function(input_filepath, flat_text, distribution)
if not preprocess_only:
flat_text = _SRC_RE.sub(SrcReplace, flat_text)
flat_text = _SRCSET_RE.sub(SrcsetReplace, flat_text)
# TODO(arv): Only do this inside <style> tags.
flat_text = InlineCSSImages(flat_text)
flat_text = _ICON_RE.sub(SrcReplace, flat_text)
if names_only:
flat_text = None # Will contains garbage if the flag is set anyway.
return InlinedData(flat_text, inlined_files)
def InlineToString(input_filename, grd_node, preprocess_only = False,
allow_external_script=False, strip_whitespace=False,
rewrite_function=None, filename_expansion_function=None):
"""Inlines the resources in a specified file and returns it as a string.
Args:
input_filename: name of file to read in
grd_node: html node from the grd file for this include tag
Returns:
the inlined data as a string
"""
try:
return DoInline(
input_filename,
grd_node,
preprocess_only=preprocess_only,
allow_external_script=allow_external_script,
strip_whitespace=strip_whitespace,
rewrite_function=rewrite_function,
filename_expansion_function=filename_expansion_function).inlined_data
except IOError as e:
raise Exception("Failed to open %s while trying to flatten %s. (%s)" %
(e.filename, input_filename, e.strerror))
def InlineToFile(input_filename, output_filename, grd_node):
"""Inlines the resources in a specified file and writes it.
Reads input_filename, finds all the src attributes and attempts to
inline the files they are referring to, then writes the result
to output_filename.
Args:
input_filename: name of file to read in
output_filename: name of file to be written to
grd_node: html node from the grd file for this include tag
Returns:
a set of filenames of all the inlined files
"""
inlined_data = InlineToString(input_filename, grd_node)
with open(output_filename, 'wb') as out_file:
out_file.write(inlined_data)
def GetResourceFilenames(filename,
grd_node,
allow_external_script=False,
rewrite_function=None,
filename_expansion_function=None):
"""For a grd file, returns a set of all the files that would be inline."""
try:
return DoInline(
filename,
grd_node,
names_only=True,
preprocess_only=False,
allow_external_script=allow_external_script,
strip_whitespace=False,
rewrite_function=rewrite_function,
filename_expansion_function=filename_expansion_function).inlined_files
except IOError as e:
raise Exception("Failed to open %s while trying to flatten %s. (%s)" %
(e.filename, filename, e.strerror))
def main():
if len(sys.argv) <= 2:
print("Flattens a HTML file by inlining its external resources.\n")
print("html_inline.py inputfile outputfile")
else:
InlineToFile(sys.argv[1], sys.argv[2], None)
if __name__ == '__main__':
main()

Просмотреть файл

@ -0,0 +1,927 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grit.format.html_inline'''
from __future__ import print_function
import os
import re
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
from grit import util
from grit.format import html_inline
class HtmlInlineUnittest(unittest.TestCase):
'''Unit tests for HtmlInline.'''
def testGetResourceFilenames(self):
'''Tests that all included files are returned by GetResourceFilenames.'''
files = {
'index.html': '''
<!DOCTYPE HTML>
<html>
<head>
<link rel="stylesheet" href="test.css">
<link rel="stylesheet"
href="really-long-long-long-long-long-test.css">
</head>
<body>
<include src='test.html'>
<include
src="really-long-long-long-long-long-test-file-omg-so-long.html">
</body>
</html>
''',
'test.html': '''
<include src="test2.html">
''',
'really-long-long-long-long-long-test-file-omg-so-long.html': '''
<!-- This really long named resource should be included. -->
''',
'test2.html': '''
<!-- This second level resource should also be included. -->
''',
'test.css': '''
.image {
background: url('test.png');
}
''',
'really-long-long-long-long-long-test.css': '''
a:hover {
font-weight: bold; /* Awesome effect is awesome! */
}
''',
'test.png': 'PNG DATA',
}
source_resources = set()
tmp_dir = util.TempDir(files)
for filename in files:
source_resources.add(tmp_dir.GetPath(filename))
resources = html_inline.GetResourceFilenames(tmp_dir.GetPath('index.html'),
None)
resources.add(tmp_dir.GetPath('index.html'))
self.failUnlessEqual(resources, source_resources)
tmp_dir.CleanUp()
def testUnmatchedEndIfBlock(self):
'''Tests that an unmatched </if> raises an exception.'''
files = {
'index.html': '''
<!DOCTYPE HTML>
<html>
<if expr="lang == 'fr'">
bonjour
</if>
<if expr='lang == "de"'>
hallo
</if>
</if>
</html>
''',
}
tmp_dir = util.TempDir(files)
with self.assertRaises(Exception) as cm:
html_inline.GetResourceFilenames(tmp_dir.GetPath('index.html'), None)
self.failUnlessEqual(str(cm.exception), 'Unmatched </if>')
tmp_dir.CleanUp()
def testCompressedJavaScript(self):
'''Tests that ".src=" doesn't treat as a tag.'''
files = {
'index.js': '''
if(i<j)a.src="hoge.png";
''',
}
source_resources = set()
tmp_dir = util.TempDir(files)
for filename in files:
source_resources.add(tmp_dir.GetPath(filename))
resources = html_inline.GetResourceFilenames(tmp_dir.GetPath('index.js'),
None)
resources.add(tmp_dir.GetPath('index.js'))
self.failUnlessEqual(resources, source_resources)
tmp_dir.CleanUp()
def testInlineCSSImports(self):
'''Tests that @import directives in inlined CSS files are inlined too.
'''
files = {
'index.html': '''
<html>
<head>
<link rel="stylesheet" href="css/test.css">
</head>
</html>
''',
'css/test.css': '''
@import url('test2.css');
blink {
display: none;
}
''',
'css/test2.css': '''
.image {
background: url('../images/test.png');
}
'''.strip(),
'images/test.png': 'PNG DATA'
}
expected_inlined = '''
<html>
<head>
<style>
.image {
background: url('data:image/png;base64,UE5HIERBVEE=');
}
blink {
display: none;
}
</style>
</head>
</html>
'''
source_resources = set()
tmp_dir = util.TempDir(files)
for filename in files:
source_resources.add(tmp_dir.GetPath(util.normpath(filename)))
result = html_inline.DoInline(tmp_dir.GetPath('index.html'), None)
resources = result.inlined_files
resources.add(tmp_dir.GetPath('index.html'))
self.failUnlessEqual(resources, source_resources)
self.failUnlessEqual(expected_inlined,
util.FixLineEnd(result.inlined_data, '\n'))
tmp_dir.CleanUp()
def testInlineIgnoresPolymerBindings(self):
'''Tests that polymer bindings are ignored when inlining.
'''
files = {
'index.html': '''
<html>
<head>
<link rel="stylesheet" href="test.css">
</head>
<body>
<iron-icon src="[[icon]]"></iron-icon><!-- Should be ignored. -->
<iron-icon src="{{src}}"></iron-icon><!-- Also ignored. -->
<!-- [[image]] should be ignored. -->
<div style="background: url([[image]]),
url('test.png');">
</div>
<div style="background: url('test.png'),
url([[image]]);">
</div>
</body>
</html>
''',
'test.css': '''
.image {
background: url('test.png');
background-image: url([[ignoreMe]]);
background-image: image-set(url({{alsoMe}}), 1x);
background-image: image-set(
url({{ignore}}) 1x,
url('test.png') 2x);
}
''',
'test.png': 'PNG DATA'
}
expected_inlined = '''
<html>
<head>
<style>
.image {
background: url('data:image/png;base64,UE5HIERBVEE=');
background-image: url([[ignoreMe]]);
background-image: image-set(url({{alsoMe}}), 1x);
background-image: image-set(
url({{ignore}}) 1x,
url('data:image/png;base64,UE5HIERBVEE=') 2x);
}
</style>
</head>
<body>
<iron-icon src="[[icon]]"></iron-icon><!-- Should be ignored. -->
<iron-icon src="{{src}}"></iron-icon><!-- Also ignored. -->
<!-- [[image]] should be ignored. -->
<div style="background: url([[image]]),
url('data:image/png;base64,UE5HIERBVEE=');">
</div>
<div style="background: url('data:image/png;base64,UE5HIERBVEE='),
url([[image]]);">
</div>
</body>
</html>
'''
source_resources = set()
tmp_dir = util.TempDir(files)
for filename in files:
source_resources.add(tmp_dir.GetPath(util.normpath(filename)))
result = html_inline.DoInline(tmp_dir.GetPath('index.html'), None)
resources = result.inlined_files
resources.add(tmp_dir.GetPath('index.html'))
self.failUnlessEqual(resources, source_resources)
self.failUnlessEqual(expected_inlined,
util.FixLineEnd(result.inlined_data, '\n'))
tmp_dir.CleanUp()
def testInlineCSSWithIncludeDirective(self):
'''Tests that include directive in external css files also inlined'''
files = {
'index.html': '''
<html>
<head>
<link rel="stylesheet" href="foo.css">
</head>
</html>
''',
'foo.css': '''<include src="style.css">''',
'style.css': '''
<include src="style2.css">
blink {
display: none;
}
''',
'style2.css': '''h1 {}''',
}
expected_inlined = '''
<html>
<head>
<style>
h1 {}
blink {
display: none;
}
</style>
</head>
</html>
'''
source_resources = set()
tmp_dir = util.TempDir(files)
for filename in files:
source_resources.add(tmp_dir.GetPath(filename))
result = html_inline.DoInline(tmp_dir.GetPath('index.html'), None)
resources = result.inlined_files
resources.add(tmp_dir.GetPath('index.html'))
self.failUnlessEqual(resources, source_resources)
self.failUnlessEqual(expected_inlined,
util.FixLineEnd(result.inlined_data, '\n'))
tmp_dir.CleanUp()
def testCssIncludedFileNames(self):
'''Tests that all included files from css are returned'''
files = {
'index.html': '''
<!DOCTYPE HTML>
<html>
<head>
<link rel="stylesheet" href="test.css">
</head>
<body>
</body>
</html>
''',
'test.css': '''
<include src="test2.css">
''',
'test2.css': '''
<include src="test3.css">
.image {
background: url('test.png');
}
''',
'test3.css': '''h1 {}''',
'test.png': 'PNG DATA'
}
source_resources = set()
tmp_dir = util.TempDir(files)
for filename in files:
source_resources.add(tmp_dir.GetPath(filename))
resources = html_inline.GetResourceFilenames(tmp_dir.GetPath('index.html'),
None)
resources.add(tmp_dir.GetPath('index.html'))
self.failUnlessEqual(resources, source_resources)
tmp_dir.CleanUp()
def testInlineCSSLinks(self):
'''Tests that only CSS files referenced via relative URLs are inlined.'''
files = {
'index.html': '''
<html>
<head>
<link rel="stylesheet" href="foo.css">
<link rel="stylesheet" href="chrome://resources/bar.css">
</head>
</html>
''',
'foo.css': '''
@import url(chrome://resources/blurp.css);
blink {
display: none;
}
''',
}
expected_inlined = '''
<html>
<head>
<style>
@import url(chrome://resources/blurp.css);
blink {
display: none;
}
</style>
<link rel="stylesheet" href="chrome://resources/bar.css">
</head>
</html>
'''
source_resources = set()
tmp_dir = util.TempDir(files)
for filename in files:
source_resources.add(tmp_dir.GetPath(filename))
result = html_inline.DoInline(tmp_dir.GetPath('index.html'), None)
resources = result.inlined_files
resources.add(tmp_dir.GetPath('index.html'))
self.failUnlessEqual(resources, source_resources)
self.failUnlessEqual(expected_inlined,
util.FixLineEnd(result.inlined_data, '\n'))
tmp_dir.CleanUp()
def testFilenameVariableExpansion(self):
'''Tests that variables are expanded in filenames before inlining.'''
files = {
'index.html': '''
<html>
<head>
<link rel="stylesheet" href="style[WHICH].css">
<script src="script[WHICH].js"></script>
</head>
<include src="tmpl[WHICH].html">
<img src="img[WHICH].png">
</html>
''',
'style1.css': '''h1 {}''',
'tmpl1.html': '''<h1></h1>''',
'script1.js': '''console.log('hello');''',
'img1.png': '''abc''',
}
expected_inlined = '''
<html>
<head>
<style>h1 {}</style>
<script>console.log('hello');</script>
</head>
<h1></h1>
<img src="data:image/png;base64,YWJj">
</html>
'''
source_resources = set()
tmp_dir = util.TempDir(files)
for filename in files:
source_resources.add(tmp_dir.GetPath(filename))
def replacer(var, repl):
return lambda filename: filename.replace('[%s]' % var, repl)
# Test normal inlining.
result = html_inline.DoInline(
tmp_dir.GetPath('index.html'),
None,
filename_expansion_function=replacer('WHICH', '1'))
resources = result.inlined_files
resources.add(tmp_dir.GetPath('index.html'))
self.failUnlessEqual(resources, source_resources)
self.failUnlessEqual(expected_inlined,
util.FixLineEnd(result.inlined_data, '\n'))
# Test names-only inlining.
result = html_inline.DoInline(
tmp_dir.GetPath('index.html'),
None,
names_only=True,
filename_expansion_function=replacer('WHICH', '1'))
resources = result.inlined_files
resources.add(tmp_dir.GetPath('index.html'))
self.failUnlessEqual(resources, source_resources)
tmp_dir.CleanUp()
def testWithCloseTags(self):
'''Tests that close tags are removed.'''
files = {
'index.html': '''
<html>
<head>
<link rel="stylesheet" href="style1.css"></link>
<link rel="stylesheet" href="style2.css">
</link>
<link rel="stylesheet" href="style2.css"
>
</link>
<script src="script1.js"></script>
</head>
<include src="tmpl1.html"></include>
<include src="tmpl2.html">
</include>
<include src="tmpl2.html"
>
</include>
<img src="img1.png">
<include src='single-double-quotes.html"></include>
<include src="double-single-quotes.html'></include>
</html>
''',
'style1.css': '''h1 {}''',
'style2.css': '''h2 {}''',
'tmpl1.html': '''<h1></h1>''',
'tmpl2.html': '''<h2></h2>''',
'script1.js': '''console.log('hello');''',
'img1.png': '''abc''',
}
expected_inlined = '''
<html>
<head>
<style>h1 {}</style>
<style>h2 {}</style>
<style>h2 {}</style>
<script>console.log('hello');</script>
</head>
<h1></h1>
<h2></h2>
<h2></h2>
<img src="data:image/png;base64,YWJj">
<include src='single-double-quotes.html"></include>
<include src="double-single-quotes.html'></include>
</html>
'''
source_resources = set()
tmp_dir = util.TempDir(files)
for filename in files:
source_resources.add(tmp_dir.GetPath(filename))
# Test normal inlining.
result = html_inline.DoInline(
tmp_dir.GetPath('index.html'),
None)
resources = result.inlined_files
resources.add(tmp_dir.GetPath('index.html'))
self.failUnlessEqual(resources, source_resources)
self.failUnlessEqual(expected_inlined,
util.FixLineEnd(result.inlined_data, '\n'))
tmp_dir.CleanUp()
def testCommentedJsInclude(self):
'''Tests that <include> works inside a comment.'''
files = {
'include.js': '// <include src="other.js">',
'other.js': '// Copyright somebody\nalert(1);',
}
expected_inlined = '// Copyright somebody\nalert(1);'
source_resources = set()
tmp_dir = util.TempDir(files)
for filename in files:
source_resources.add(tmp_dir.GetPath(filename))
result = html_inline.DoInline(tmp_dir.GetPath('include.js'), None)
resources = result.inlined_files
resources.add(tmp_dir.GetPath('include.js'))
self.failUnlessEqual(resources, source_resources)
self.failUnlessEqual(expected_inlined,
util.FixLineEnd(result.inlined_data, '\n'))
tmp_dir.CleanUp()
def testCommentedJsIf(self):
'''Tests that <if> works inside a comment.'''
files = {
'if.js': '''
// <if expr="True">
yep();
// </if>
// <if expr="False">
nope();
// </if>
''',
}
expected_inlined = '''
//
yep();
//
//
'''
source_resources = set()
tmp_dir = util.TempDir(files)
for filename in files:
source_resources.add(tmp_dir.GetPath(filename))
class FakeGrdNode(object):
def EvaluateCondition(self, cond):
return eval(cond)
result = html_inline.DoInline(tmp_dir.GetPath('if.js'), FakeGrdNode())
resources = result.inlined_files
resources.add(tmp_dir.GetPath('if.js'))
self.failUnlessEqual(resources, source_resources)
self.failUnlessEqual(expected_inlined,
util.FixLineEnd(result.inlined_data, '\n'))
tmp_dir.CleanUp()
def testImgSrcset(self):
'''Tests that img srcset="" attributes are converted.'''
# Note that there is no space before "img10.png" and that
# "img11.png" has no descriptor.
files = {
'index.html': '''
<html>
<img src="img1.png" srcset="img2.png 1x, img3.png 2x">
<img src="img4.png" srcset=" img5.png 1x , img6.png 2x ">
<img src="chrome://theme/img11.png" srcset="img7.png 1x, '''\
'''chrome://theme/img13.png 2x">
<img srcset="img8.png 300w, img9.png 11E-2w,img10.png -1e2w">
<img srcset="img11.png">
<img srcset="img11.png, img2.png 1x">
<img srcset="img2.png 1x, img11.png">
</html>
''',
'img1.png': '''a1''',
'img2.png': '''a2''',
'img3.png': '''a3''',
'img4.png': '''a4''',
'img5.png': '''a5''',
'img6.png': '''a6''',
'img7.png': '''a7''',
'img8.png': '''a8''',
'img9.png': '''a9''',
'img10.png': '''a10''',
'img11.png': '''a11''',
}
expected_inlined = '''
<html>
<img src="data:image/png;base64,YTE=" srcset="data:image/png;base64,'''\
'''YTI= 1x,data:image/png;base64,YTM= 2x">
<img src="data:image/png;base64,YTQ=" srcset="data:image/png;base64,'''\
'''YTU= 1x,data:image/png;base64,YTY= 2x">
<img src="chrome://theme/img11.png" srcset="data:image/png;base64,'''\
'''YTc= 1x,chrome://theme/img13.png 2x">
<img srcset="data:image/png;base64,YTg= 300w,data:image/png;base64,'''\
'''YTk= 11E-2w,data:image/png;base64,YTEw -1e2w">
<img srcset="data:image/png;base64,YTEx">
<img srcset="data:image/png;base64,YTEx,data:image/png;base64,YTI= 1x">
<img srcset="data:image/png;base64,YTI= 1x,data:image/png;base64,YTEx">
</html>
'''
source_resources = set()
tmp_dir = util.TempDir(files)
for filename in files:
source_resources.add(tmp_dir.GetPath(filename))
# Test normal inlining.
result = html_inline.DoInline(
tmp_dir.GetPath('index.html'),
None)
resources = result.inlined_files
resources.add(tmp_dir.GetPath('index.html'))
self.failUnlessEqual(resources, source_resources)
self.failUnlessEqual(expected_inlined,
util.FixLineEnd(result.inlined_data, '\n'))
tmp_dir.CleanUp()
def testImgSrcsetIgnoresI18n(self):
'''Tests that $i18n{...} strings are ignored when inlining.
'''
src_html = '''
<html>
<head></head>
<body>
<img srcset="$i18n{foo}">
</body>
</html>
'''
files = {
'index.html': src_html,
}
expected_inlined = src_html
source_resources = set()
tmp_dir = util.TempDir(files)
for filename in files:
source_resources.add(tmp_dir.GetPath(util.normpath(filename)))
result = html_inline.DoInline(tmp_dir.GetPath('index.html'), None)
resources = result.inlined_files
resources.add(tmp_dir.GetPath('index.html'))
self.failUnlessEqual(resources, source_resources)
self.failUnlessEqual(expected_inlined,
util.FixLineEnd(result.inlined_data, '\n'))
tmp_dir.CleanUp()
def testSourceSrcset(self):
'''Tests that source srcset="" attributes are converted.'''
# Note that there is no space before "img10.png" and that
# "img11.png" has no descriptor.
files = {
'index.html': '''
<html>
<source src="img1.png" srcset="img2.png 1x, img3.png 2x">
<source src="img4.png" srcset=" img5.png 1x , img6.png 2x ">
<source src="chrome://theme/img11.png" srcset="img7.png 1x, '''\
'''chrome://theme/img13.png 2x">
<source srcset="img8.png 300w, img9.png 11E-2w,img10.png -1e2w">
<source srcset="img11.png">
</html>
''',
'img1.png': '''a1''',
'img2.png': '''a2''',
'img3.png': '''a3''',
'img4.png': '''a4''',
'img5.png': '''a5''',
'img6.png': '''a6''',
'img7.png': '''a7''',
'img8.png': '''a8''',
'img9.png': '''a9''',
'img10.png': '''a10''',
'img11.png': '''a11''',
}
expected_inlined = '''
<html>
<source src="data:image/png;base64,YTE=" srcset="data:image/png;'''\
'''base64,YTI= 1x,data:image/png;base64,YTM= 2x">
<source src="data:image/png;base64,YTQ=" srcset="data:image/png;'''\
'''base64,YTU= 1x,data:image/png;base64,YTY= 2x">
<source src="chrome://theme/img11.png" srcset="data:image/png;'''\
'''base64,YTc= 1x,chrome://theme/img13.png 2x">
<source srcset="data:image/png;base64,YTg= 300w,data:image/png;'''\
'''base64,YTk= 11E-2w,data:image/png;base64,YTEw -1e2w">
<source srcset="data:image/png;base64,YTEx">
</html>
'''
source_resources = set()
tmp_dir = util.TempDir(files)
for filename in files:
source_resources.add(tmp_dir.GetPath(filename))
# Test normal inlining.
result = html_inline.DoInline(tmp_dir.GetPath('index.html'), None)
resources = result.inlined_files
resources.add(tmp_dir.GetPath('index.html'))
self.failUnlessEqual(resources, source_resources)
self.failUnlessEqual(expected_inlined,
util.FixLineEnd(result.inlined_data, '\n'))
tmp_dir.CleanUp()
def testConditionalInclude(self):
'''Tests that output and dependency generation includes only files not'''\
''' blocked by <if> macros.'''
files = {
'index.html': '''
<html>
<if expr="True">
<img src="img1.png" srcset="img2.png 1x, img3.png 2x">
</if>
<if expr="False">
<img src="img4.png" srcset=" img5.png 1x, img6.png 2x ">
</if>
<if expr="True">
<img src="chrome://theme/img11.png" srcset="img7.png 1x, '''\
'''chrome://theme/img13.png 2x">
</if>
<img srcset="img8.png 300w, img9.png 11E-2w,img10.png -1e2w">
</html>
''',
'img1.png': '''a1''',
'img2.png': '''a2''',
'img3.png': '''a3''',
'img4.png': '''a4''',
'img5.png': '''a5''',
'img6.png': '''a6''',
'img7.png': '''a7''',
'img8.png': '''a8''',
'img9.png': '''a9''',
'img10.png': '''a10''',
}
expected_inlined = '''
<html>
<img src="data:image/png;base64,YTE=" srcset="data:image/png;base64,'''\
'''YTI= 1x,data:image/png;base64,YTM= 2x">
<img src="chrome://theme/img11.png" srcset="data:image/png;base64,'''\
'''YTc= 1x,chrome://theme/img13.png 2x">
<img srcset="data:image/png;base64,YTg= 300w,data:image/png;base64,'''\
'''YTk= 11E-2w,data:image/png;base64,YTEw -1e2w">
</html>
'''
expected_files = [
'index.html',
'img1.png',
'img2.png',
'img3.png',
'img7.png',
'img8.png',
'img9.png',
'img10.png'
]
source_resources = set()
tmp_dir = util.TempDir(files)
for filename in expected_files:
source_resources.add(tmp_dir.GetPath(filename))
class FakeGrdNode(object):
def EvaluateCondition(self, cond):
return eval(cond)
# Test normal inlining.
result = html_inline.DoInline(
tmp_dir.GetPath('index.html'),
FakeGrdNode())
resources = result.inlined_files
resources.add(tmp_dir.GetPath('index.html'))
self.failUnlessEqual(resources, source_resources)
# ignore whitespace
expected_inlined = re.sub(r'\s+', ' ', expected_inlined)
actually_inlined = re.sub(r'\s+', ' ',
util.FixLineEnd(result.inlined_data, '\n'))
self.failUnlessEqual(expected_inlined, actually_inlined);
tmp_dir.CleanUp()
def testPreprocessOnlyEvaluatesIncludeAndIf(self):
'''Tests that preprocess_only=true evaluates <include> and <if> only. '''
files = {
'index.html': '''
<html>
<head>
<link rel="stylesheet" href="not_inlined.css">
<script src="also_not_inlined.js">
</head>
<body>
<include src="inline_this.html">
<if expr="True">
<p>'if' should be evaluated.</p>
</if>
</body>
</html>
''',
'not_inlined.css': ''' /* <link> should not be inlined. */ ''',
'also_not_inlined.js': ''' // <script> should not be inlined. ''',
'inline_this.html': ''' <p>'include' should be inlined.</p> '''
}
expected_inlined = '''
<html>
<head>
<link rel="stylesheet" href="not_inlined.css">
<script src="also_not_inlined.js">
</head>
<body>
<p>'include' should be inlined.</p>
<p>'if' should be evaluated.</p>
</body>
</html>
'''
source_resources = set()
tmp_dir = util.TempDir(files)
source_resources.add(tmp_dir.GetPath('index.html'))
source_resources.add(tmp_dir.GetPath('inline_this.html'))
result = html_inline.DoInline(tmp_dir.GetPath('index.html'), None,
preprocess_only=True)
resources = result.inlined_files
resources.add(tmp_dir.GetPath('index.html'))
self.failUnlessEqual(resources, source_resources)
# Ignore whitespace
expected_inlined = re.sub(r'\s+', ' ', expected_inlined)
actually_inlined = re.sub(r'\s+', ' ',
util.FixLineEnd(result.inlined_data, '\n'))
self.failUnlessEqual(expected_inlined, actually_inlined)
tmp_dir.CleanUp()
def testPreprocessOnlyAppliesRecursively(self):
'''Tests that preprocess_only=true propagates to included files. '''
files = {
'index.html': '''
<html>
<include src="outer_include.html">
</html>
''',
'outer_include.html': '''
<include src="inner_include.html">
<link rel="stylesheet" href="not_inlined.css">
''',
'inner_include.html': ''' <p>This should be inlined in index.html</p> ''',
'not_inlined.css': ''' /* This should not be inlined. */ '''
}
expected_inlined = '''
<html>
<p>This should be inlined in index.html</p>
<link rel="stylesheet" href="not_inlined.css">
</html>
'''
source_resources = set()
tmp_dir = util.TempDir(files)
source_resources.add(tmp_dir.GetPath('index.html'))
source_resources.add(tmp_dir.GetPath('outer_include.html'))
source_resources.add(tmp_dir.GetPath('inner_include.html'))
result = html_inline.DoInline(tmp_dir.GetPath('index.html'), None,
preprocess_only=True)
resources = result.inlined_files
resources.add(tmp_dir.GetPath('index.html'))
self.failUnlessEqual(resources, source_resources)
# Ignore whitespace
expected_inlined = re.sub(r'\s+', ' ', expected_inlined)
actually_inlined = re.sub(r'\s+', ' ',
util.FixLineEnd(result.inlined_data, '\n'))
self.failUnlessEqual(expected_inlined, actually_inlined)
tmp_dir.CleanUp()
if __name__ == '__main__':
unittest.main()

45
third_party/libwebrtc/tools/grit/grit/format/minifier.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,45 @@
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Framework for stripping whitespace and comments from resource files"""
from __future__ import print_function
from os import path
import subprocess
import sys
import six
__js_minifier = None
__css_minifier = None
def SetJsMinifier(minifier):
global __js_minifier
__js_minifier = minifier.split()
def SetCssMinifier(minifier):
global __css_minifier
__css_minifier = minifier.split()
def Minify(source, filename):
"""Minify |source| (bytes) from |filename| and return bytes."""
file_type = path.splitext(filename)[1]
minifier = None
if file_type == '.js':
minifier = __js_minifier
elif file_type == '.css':
minifier = __css_minifier
if not minifier:
return source
p = subprocess.Popen(
minifier,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate(source)
if p.returncode != 0:
print('Minification failed for %s' % filename)
print(stderr)
sys.exit(p.returncode)
return stdout

Просмотреть файл

@ -0,0 +1,26 @@
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Translates policy_templates.json files.
"""
from __future__ import print_function
from grit.node import structure
def Format(root, lang='en', output_dir='.'):
policy_json = None
for item in root.ActiveDescendants():
with item:
if (isinstance(item, structure.StructureNode) and
item.attrs['type'] == 'policy_template_metafile'):
json_text = item.gatherer.Translate(
lang,
pseudo_if_not_available=item.PseudoIsAllowed(),
fallback_to_english=item.ShouldFallbackToEnglish())
# We're only expecting one node of this kind.
assert not policy_json
policy_json = json_text
return policy_json

Просмотреть файл

@ -0,0 +1,207 @@
#!/usr/bin/env python
# coding: utf-8
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittest for policy_templates_json.py.
"""
from __future__ import print_function
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import grit.extern.tclib
import tempfile
import unittest
from six import StringIO
from grit import grd_reader
from grit.tool import build
class PolicyTemplatesJsonUnittest(unittest.TestCase):
def testPolicyTranslation(self):
# Create test policy_templates.json data.
caption = "The main policy"
caption_translation = "Die Hauptrichtlinie"
message = \
"Red cabbage stays red cabbage and wedding dress stays wedding dress"
message_translation = \
"Blaukraut bleibt Blaukraut und Brautkleid bleibt Brautkleid"
schema_key_description = "Number of users"
schema_key_description_translation = "Anzahl der Nutzer"
policy_json = """
{
"policy_definitions": [
{
'name': 'MainPolicy',
'type': 'main',
'owners': ['foo@bar.com'],
'schema': {
'properties': {
'default_launch_container': {
'enum': [
'tab',
'window',
],
'type': 'string',
},
'users_number': {
'description': '''%s''',
'type': 'integer',
},
},
'type': 'object',
},
'supported_on': ['chrome_os:29-'],
'features': {
'can_be_recommended': True,
'dynamic_refresh': True,
},
'example_value': True,
'caption': '''%s''',
'tags': [],
'desc': '''This policy does stuff.'''
},
],
"policy_atomic_group_definitions": [],
"placeholders": [],
"messages": {
'message_string_id': {
'desc': '''The description is removed from the grit output''',
'text': '''%s'''
}
}
}""" % (schema_key_description, caption, message)
# Create translations. The translation IDs are hashed from the English text.
caption_id = grit.extern.tclib.GenerateMessageId(caption);
message_id = grit.extern.tclib.GenerateMessageId(message);
schema_key_description_id = grit.extern.tclib.GenerateMessageId(
schema_key_description)
policy_xtb = """
<?xml version="1.0" ?>
<!DOCTYPE translationbundle>
<translationbundle lang="de">
<translation id="%s">%s</translation>
<translation id="%s">%s</translation>
<translation id="%s">%s</translation>
</translationbundle>""" % (caption_id, caption_translation,
message_id, message_translation,
schema_key_description_id,
schema_key_description_translation)
# Write both to a temp file.
tmp_dir_name = tempfile.gettempdir()
json_file_path = os.path.join(tmp_dir_name, 'test.json')
with open(json_file_path, 'w') as f:
f.write(policy_json.strip())
xtb_file_path = os.path.join(tmp_dir_name, 'test.xtb')
with open(xtb_file_path, 'w') as f:
f.write(policy_xtb.strip())
# Assemble a test grit tree, similar to policy_templates.grd.
grd_text = '''
<grit base_dir="." latest_public_release="0" current_release="1" source_lang_id="en">
<translations>
<file path="%s" lang="de" />
</translations>
<release seq="1">
<structures>
<structure name="IDD_POLICY_SOURCE_FILE" file="%s" type="policy_template_metafile" />
</structures>
</release>
</grit>''' % (xtb_file_path, json_file_path)
grd_string_io = StringIO(grd_text)
# Parse the grit tree and load the policies' JSON with a gatherer.
grd = grd_reader.Parse(grd_string_io, dir=tmp_dir_name, defines={'_google_chrome': True})
grd.SetOutputLanguage('en')
grd.RunGatherers()
# Remove the temp files.
os.unlink(xtb_file_path)
os.unlink(json_file_path)
# Run grit with en->de translation.
env_lang = 'en'
out_lang = 'de'
env_defs = {'_google_chrome': '1'}
grd.SetOutputLanguage(env_lang)
grd.SetDefines(env_defs)
buf = StringIO()
build.RcBuilder.ProcessNode(grd, DummyOutput('policy_templates', out_lang), buf)
output = buf.getvalue()
# Caption and message texts get taken from xtb.
# desc is 'translated' to some pseudo-English
# 'ThïPïs pôPôlïPïcýPý dôéPôés stüPüff'.
expected = u"""{
"policy_definitions": [
{
"caption": "%s",
"desc": "Th\xefP\xefs p\xf4P\xf4l\xefP\xefc\xfdP\xfd d\xf4\xe9P\xf4\xe9s st\xfcP\xfcff.",
"example_value": true,
"features": {"can_be_recommended": true, "dynamic_refresh": true},
"name": "MainPolicy",
"owners": ["foo@bar.com"],
"schema": {
"properties": {
"default_launch_container": {
"enum": [
"tab",
"window"
],
"type": "string"
},
"users_number": {
"description": "%s",
"type": "integer"
}
},
"type": "object"
},
"supported_on": ["chrome_os:29-"],
"tags": [],
"type": "main"
}
],
"policy_atomic_group_definitions": [
],
"messages": {
"message_string_id": {
"text": "%s"
}
}
}""" % (caption_translation, schema_key_description_translation,
message_translation)
self.assertEqual(expected, output)
class DummyOutput(object):
def __init__(self, type, language):
self.type = type
self.language = language
def GetType(self):
return self.type
def GetLanguage(self):
return self.language
def GetOutputFilename(self):
return 'hello.gif'

474
third_party/libwebrtc/tools/grit/grit/format/rc.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,474 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Support for formatting an RC file for compilation.
'''
from __future__ import print_function
import os
import re
from functools import partial
import six
from grit import util
from grit.node import misc
def Format(root, lang='en', output_dir='.'):
from grit.node import empty, include, message, structure
yield _FormatHeader(root, lang, output_dir)
for item in root.ActiveDescendants():
if isinstance(item, empty.MessagesNode):
# Write one STRINGTABLE per <messages> container.
# This is hacky: it iterates over the children twice.
yield 'STRINGTABLE\nBEGIN\n'
for subitem in item.ActiveDescendants():
if isinstance(subitem, message.MessageNode):
with subitem:
yield FormatMessage(subitem, lang)
yield 'END\n\n'
elif isinstance(item, include.IncludeNode):
with item:
yield FormatInclude(item, lang, output_dir)
elif isinstance(item, structure.StructureNode):
with item:
yield FormatStructure(item, lang, output_dir)
'''
This dictionary defines the language charset pair lookup table, which is used
for replacing the GRIT expand variables for language info in Product Version
resource. The key is the language ISO country code, and the value
is the language and character-set pair, which is a hexadecimal string
consisting of the concatenation of the language and character-set identifiers.
The first 4 digit of the value is the hex value of LCID, the remaining
4 digits is the hex value of character-set id(code page)of the language.
LCID resource: http://msdn.microsoft.com/en-us/library/ms776294.aspx
Codepage resource: http://www.science.co.il/language/locale-codes.asp
We have defined three GRIT expand_variables to be used in the version resource
file to set the language info. Here is an example how they should be used in
the VS_VERSION_INFO section of the resource file to allow GRIT to localize
the language info correctly according to product locale.
VS_VERSION_INFO VERSIONINFO
...
BEGIN
BLOCK "StringFileInfo"
BEGIN
BLOCK "[GRITVERLANGCHARSETHEX]"
BEGIN
...
END
END
BLOCK "VarFileInfo"
BEGIN
VALUE "Translation", [GRITVERLANGID], [GRITVERCHARSETID]
END
END
'''
_LANGUAGE_CHARSET_PAIR = {
# Language neutral LCID, unicode(1200) code page.
'neutral' : '000004b0',
# LANG_USER_DEFAULT LCID, unicode(1200) code page.
'userdefault' : '040004b0',
'ar' : '040104e8',
'fi' : '040b04e4',
'ko' : '041203b5',
'es' : '0c0a04e4',
'bg' : '040204e3',
# No codepage for filipino, use unicode(1200).
'fil' : '046404e4',
'fr' : '040c04e4',
'lv' : '042604e9',
'sv' : '041d04e4',
'ca' : '040304e4',
'de' : '040704e4',
'lt' : '042704e9',
# Do not use! This is only around for backwards
# compatibility and will be removed - use fil instead
'tl' : '0c0004b0',
'zh-CN' : '080403a8',
'zh-TW' : '040403b6',
'zh-HK' : '0c0403b6',
'el' : '040804e5',
'no' : '001404e4',
'nb' : '041404e4',
'nn' : '081404e4',
'th' : '041e036a',
'he' : '040d04e7',
'iw' : '040d04e7',
'pl' : '041504e2',
'tr' : '041f04e6',
'hr' : '041a04e4',
# No codepage for Hindi, use unicode(1200).
'hi' : '043904b0',
'pt-PT' : '081604e4',
'pt-BR' : '041604e4',
'uk' : '042204e3',
'cs' : '040504e2',
'hu' : '040e04e2',
'ro' : '041804e2',
# No codepage for Urdu, use unicode(1200).
'ur' : '042004b0',
'da' : '040604e4',
'is' : '040f04e4',
'ru' : '041904e3',
'vi' : '042a04ea',
'nl' : '041304e4',
'id' : '042104e4',
'sr' : '081a04e2',
'en-GB' : '0809040e',
'it' : '041004e4',
'sk' : '041b04e2',
'et' : '042504e9',
'ja' : '041103a4',
'sl' : '042404e2',
'en' : '040904b0',
# LCID for Mexico; Windows does not support L.A. LCID.
'es-419' : '080a04e4',
# No codepage for Bengali, use unicode(1200).
'bn' : '044504b0',
'fa' : '042904e8',
# No codepage for Gujarati, use unicode(1200).
'gu' : '044704b0',
# No codepage for Kannada, use unicode(1200).
'kn' : '044b04b0',
# Malay (Malaysia) [ms-MY]
'ms' : '043e04e4',
# No codepage for Malayalam, use unicode(1200).
'ml' : '044c04b0',
# No codepage for Marathi, use unicode(1200).
'mr' : '044e04b0',
# No codepage for Oriya , use unicode(1200).
'or' : '044804b0',
# No codepage for Tamil, use unicode(1200).
'ta' : '044904b0',
# No codepage for Telugu, use unicode(1200).
'te' : '044a04b0',
# No codepage for Amharic, use unicode(1200). >= Vista.
'am' : '045e04b0',
'sw' : '044104e4',
'af' : '043604e4',
'eu' : '042d04e4',
'fr-CA' : '0c0c04e4',
'gl' : '045604e4',
# No codepage for Zulu, use unicode(1200).
'zu' : '043504b0',
'fake-bidi' : '040d04e7',
}
# Language ID resource: http://msdn.microsoft.com/en-us/library/ms776294.aspx
#
# There is no appropriate sublang for Spanish (Latin America) [es-419], so we
# use Mexico. SUBLANG_DEFAULT would incorrectly map to Spain. Unlike other
# Latin American countries, Mexican Spanish is supported by VERSIONINFO:
# http://msdn.microsoft.com/en-us/library/aa381058.aspx
_LANGUAGE_DIRECTIVE_PAIR = {
'neutral' : 'LANG_NEUTRAL, SUBLANG_NEUTRAL',
'userdefault' : 'LANG_NEUTRAL, SUBLANG_DEFAULT',
'ar' : 'LANG_ARABIC, SUBLANG_DEFAULT',
'fi' : 'LANG_FINNISH, SUBLANG_DEFAULT',
'ko' : 'LANG_KOREAN, SUBLANG_KOREAN',
'es' : 'LANG_SPANISH, SUBLANG_SPANISH_MODERN',
'bg' : 'LANG_BULGARIAN, SUBLANG_DEFAULT',
# LANG_FILIPINO (100) not in VC 7 winnt.h.
'fil' : '100, SUBLANG_DEFAULT',
'fr' : 'LANG_FRENCH, SUBLANG_FRENCH',
'lv' : 'LANG_LATVIAN, SUBLANG_DEFAULT',
'sv' : 'LANG_SWEDISH, SUBLANG_SWEDISH',
'ca' : 'LANG_CATALAN, SUBLANG_DEFAULT',
'de' : 'LANG_GERMAN, SUBLANG_GERMAN',
'lt' : 'LANG_LITHUANIAN, SUBLANG_LITHUANIAN',
# Do not use! See above.
'tl' : 'LANG_NEUTRAL, SUBLANG_DEFAULT',
'zh-CN' : 'LANG_CHINESE, SUBLANG_CHINESE_SIMPLIFIED',
'zh-TW' : 'LANG_CHINESE, SUBLANG_CHINESE_TRADITIONAL',
'zh-HK' : 'LANG_CHINESE, SUBLANG_CHINESE_HONGKONG',
'el' : 'LANG_GREEK, SUBLANG_DEFAULT',
'no' : 'LANG_NORWEGIAN, SUBLANG_DEFAULT',
'nb' : 'LANG_NORWEGIAN, SUBLANG_NORWEGIAN_BOKMAL',
'nn' : 'LANG_NORWEGIAN, SUBLANG_NORWEGIAN_NYNORSK',
'th' : 'LANG_THAI, SUBLANG_DEFAULT',
'he' : 'LANG_HEBREW, SUBLANG_DEFAULT',
'iw' : 'LANG_HEBREW, SUBLANG_DEFAULT',
'pl' : 'LANG_POLISH, SUBLANG_DEFAULT',
'tr' : 'LANG_TURKISH, SUBLANG_DEFAULT',
'hr' : 'LANG_CROATIAN, SUBLANG_DEFAULT',
'hi' : 'LANG_HINDI, SUBLANG_DEFAULT',
'pt-PT' : 'LANG_PORTUGUESE, SUBLANG_PORTUGUESE',
'pt-BR' : 'LANG_PORTUGUESE, SUBLANG_DEFAULT',
'uk' : 'LANG_UKRAINIAN, SUBLANG_DEFAULT',
'cs' : 'LANG_CZECH, SUBLANG_DEFAULT',
'hu' : 'LANG_HUNGARIAN, SUBLANG_DEFAULT',
'ro' : 'LANG_ROMANIAN, SUBLANG_DEFAULT',
'ur' : 'LANG_URDU, SUBLANG_DEFAULT',
'da' : 'LANG_DANISH, SUBLANG_DEFAULT',
'is' : 'LANG_ICELANDIC, SUBLANG_DEFAULT',
'ru' : 'LANG_RUSSIAN, SUBLANG_DEFAULT',
'vi' : 'LANG_VIETNAMESE, SUBLANG_DEFAULT',
'nl' : 'LANG_DUTCH, SUBLANG_DEFAULT',
'id' : 'LANG_INDONESIAN, SUBLANG_DEFAULT',
'sr' : 'LANG_SERBIAN, SUBLANG_SERBIAN_LATIN',
'en-GB' : 'LANG_ENGLISH, SUBLANG_ENGLISH_UK',
'it' : 'LANG_ITALIAN, SUBLANG_DEFAULT',
'sk' : 'LANG_SLOVAK, SUBLANG_DEFAULT',
'et' : 'LANG_ESTONIAN, SUBLANG_DEFAULT',
'ja' : 'LANG_JAPANESE, SUBLANG_DEFAULT',
'sl' : 'LANG_SLOVENIAN, SUBLANG_DEFAULT',
'en' : 'LANG_ENGLISH, SUBLANG_ENGLISH_US',
# No L.A. sublang exists.
'es-419' : 'LANG_SPANISH, SUBLANG_SPANISH_MEXICAN',
'bn' : 'LANG_BENGALI, SUBLANG_DEFAULT',
'fa' : 'LANG_PERSIAN, SUBLANG_DEFAULT',
'gu' : 'LANG_GUJARATI, SUBLANG_DEFAULT',
'kn' : 'LANG_KANNADA, SUBLANG_DEFAULT',
'ms' : 'LANG_MALAY, SUBLANG_DEFAULT',
'ml' : 'LANG_MALAYALAM, SUBLANG_DEFAULT',
'mr' : 'LANG_MARATHI, SUBLANG_DEFAULT',
'or' : 'LANG_ORIYA, SUBLANG_DEFAULT',
'ta' : 'LANG_TAMIL, SUBLANG_DEFAULT',
'te' : 'LANG_TELUGU, SUBLANG_DEFAULT',
'am' : 'LANG_AMHARIC, SUBLANG_DEFAULT',
'sw' : 'LANG_SWAHILI, SUBLANG_DEFAULT',
'af' : 'LANG_AFRIKAANS, SUBLANG_DEFAULT',
'eu' : 'LANG_BASQUE, SUBLANG_DEFAULT',
'fr-CA' : 'LANG_FRENCH, SUBLANG_FRENCH_CANADIAN',
'gl' : 'LANG_GALICIAN, SUBLANG_DEFAULT',
'zu' : 'LANG_ZULU, SUBLANG_DEFAULT',
'pa' : 'LANG_PUNJABI, SUBLANG_PUNJABI_INDIA',
'sa' : 'LANG_SANSKRIT, SUBLANG_SANSKRIT_INDIA',
'si' : 'LANG_SINHALESE, SUBLANG_SINHALESE_SRI_LANKA',
'ne' : 'LANG_NEPALI, SUBLANG_NEPALI_NEPAL',
'ti' : 'LANG_TIGRIGNA, SUBLANG_TIGRIGNA_ERITREA',
'fake-bidi' : 'LANG_HEBREW, SUBLANG_DEFAULT',
}
# A note on 'no-specific-language' in the following few functions:
# Some build systems may wish to call GRIT to scan for dependencies in
# a language-agnostic way, and can then specify this fake language as
# the output context. It should never be used when output is actually
# being generated.
def GetLangCharsetPair(language):
if language in _LANGUAGE_CHARSET_PAIR:
return _LANGUAGE_CHARSET_PAIR[language]
if language != 'no-specific-language':
print('Warning:GetLangCharsetPair() found undefined language %s' % language)
return ''
def GetLangDirectivePair(language):
if language in _LANGUAGE_DIRECTIVE_PAIR:
return _LANGUAGE_DIRECTIVE_PAIR[language]
# We don't check for 'no-specific-language' here because this
# function should only get called when output is being formatted,
# and at that point we would not want to get
# 'no-specific-language' passed as the language.
print('Warning:GetLangDirectivePair() found undefined language %s' % language)
return 'unknown language: see tools/grit/format/rc.py'
def GetLangIdHex(language):
if language in _LANGUAGE_CHARSET_PAIR:
langcharset = _LANGUAGE_CHARSET_PAIR[language]
lang_id = '0x' + langcharset[0:4]
return lang_id
if language != 'no-specific-language':
print('Warning:GetLangIdHex() found undefined language %s' % language)
return ''
def GetCharsetIdDecimal(language):
if language in _LANGUAGE_CHARSET_PAIR:
langcharset = _LANGUAGE_CHARSET_PAIR[language]
charset_decimal = int(langcharset[4:], 16)
return str(charset_decimal)
if language != 'no-specific-language':
print('Warning:GetCharsetIdDecimal() found undefined language %s' % language)
return ''
def GetUnifiedLangCode(language) :
r = re.compile('([a-z]{1,2})_([a-z]{1,2})')
if r.match(language) :
underscore = language.find('_')
return language[0:underscore] + '-' + language[underscore + 1:].upper()
return language
def RcSubstitutions(substituter, lang):
'''Add language-based substitutions for Rc files to the substitutor.'''
unified_lang_code = GetUnifiedLangCode(lang)
substituter.AddSubstitutions({
'GRITVERLANGCHARSETHEX': GetLangCharsetPair(unified_lang_code),
'GRITVERLANGID': GetLangIdHex(unified_lang_code),
'GRITVERCHARSETID': GetCharsetIdDecimal(unified_lang_code)})
def _FormatHeader(root, lang, output_dir):
'''Returns the required preamble for RC files.'''
assert isinstance(lang, six.string_types)
assert isinstance(root, misc.GritNode)
# Find the location of the resource header file, so that we can include
# it.
resource_header = 'resource.h' # fall back to this
language_directive = ''
for output in root.GetOutputFiles():
if output.attrs['type'] == 'rc_header':
resource_header = os.path.abspath(output.GetOutputFilename())
resource_header = util.MakeRelativePath(output_dir, resource_header)
if output.attrs['lang'] != lang:
continue
if output.attrs['language_section'] == '':
# If no language_section is requested, no directive is added
# (Used when the generated rc will be included from another rc
# file that will have the appropriate language directive)
language_directive = ''
elif output.attrs['language_section'] == 'neutral':
# If a neutral language section is requested (default), add a
# neutral language directive
language_directive = 'LANGUAGE LANG_NEUTRAL, SUBLANG_NEUTRAL'
elif output.attrs['language_section'] == 'lang':
language_directive = 'LANGUAGE %s' % GetLangDirectivePair(lang)
resource_header = resource_header.replace('\\', '\\\\')
return '''// This file is automatically generated by GRIT. Do not edit.
#include "%s"
#include <winresrc.h>
#ifdef IDC_STATIC
#undef IDC_STATIC
#endif
#define IDC_STATIC (-1)
%s
''' % (resource_header, language_directive)
# end _FormatHeader() function
def FormatMessage(item, lang):
'''Returns a single message of a string table.'''
message = item.ws_at_start + item.Translate(lang) + item.ws_at_end
# Escape quotation marks (RC format uses doubling-up
message = message.replace('"', '""')
# Replace linebreaks with a \n escape
message = util.LINEBREAKS.sub(r'\\n', message)
if hasattr(item.GetRoot(), 'GetSubstituter'):
substituter = item.GetRoot().GetSubstituter()
message = substituter.Substitute(message)
name_attr = item.GetTextualIds()[0]
return ' %-15s "%s"\n' % (name_attr, message)
def _FormatSection(item, lang, output_dir):
'''Writes out an .rc file section.'''
assert isinstance(lang, six.string_types)
from grit.node import structure
assert isinstance(item, structure.StructureNode)
if item.IsExcludedFromRc():
return ''
text = item.gatherer.Translate(
lang, skeleton_gatherer=item.GetSkeletonGatherer(),
pseudo_if_not_available=item.PseudoIsAllowed(),
fallback_to_english=item.ShouldFallbackToEnglish()) + '\n\n'
# Replace the language expand_variables in version rc info.
if item.ExpandVariables() and hasattr(item.GetRoot(), 'GetSubstituter'):
substituter = item.GetRoot().GetSubstituter()
text = substituter.Substitute(text)
return text
def FormatInclude(item, lang, output_dir, type=None, process_html=False):
'''Formats an item that is included in an .rc file (e.g. an ICON).
Args:
item: an IncludeNode or StructureNode
lang, output_dir: standard formatter parameters
type: .rc file resource type, e.g. 'ICON' (ignored unless item is a
StructureNode)
process_html: False/True (ignored unless item is a StructureNode)
'''
assert isinstance(lang, six.string_types)
from grit.node import structure
from grit.node import include
assert isinstance(item, (structure.StructureNode, include.IncludeNode))
if isinstance(item, include.IncludeNode):
type = item.attrs['type'].upper()
process_html = item.attrs['flattenhtml'] == 'true'
filename_only = item.attrs['filenameonly'] == 'true'
relative_path = item.attrs['relativepath'] == 'true'
else:
assert (isinstance(item, structure.StructureNode) and item.attrs['type'] in
['admin_template', 'chrome_html', 'chrome_scaled_image',
'tr_html', 'txt'])
filename_only = False
relative_path = False
# By default, we use relative pathnames to included resources so that
# sharing the resulting .rc files is possible.
#
# The FileForLanguage() Function has the side effect of generating the file
# if needed (e.g. if it is an HTML file include).
file_for_lang = item.FileForLanguage(lang, output_dir)
if file_for_lang is None:
return ''
filename = os.path.abspath(file_for_lang)
if process_html:
filename = item.Process(output_dir)
elif filename_only:
filename = os.path.basename(filename)
elif relative_path:
filename = util.MakeRelativePath(output_dir, filename)
filename = filename.replace('\\', '\\\\') # escape for the RC format
if isinstance(item, structure.StructureNode) and item.IsExcludedFromRc():
return ''
name = item.attrs['name']
item_id = item.GetRoot().GetIdMap()[name]
return '// ID: %d\n%-18s %-18s "%s"\n' % (item_id, name, type, filename)
def _DoNotFormat(item, lang, output_dir):
return ''
# Formatter instance to use for each type attribute
# when formatting Structure nodes.
_STRUCTURE_FORMATTERS = {
'accelerators' : _FormatSection,
'dialog' : _FormatSection,
'menu' : _FormatSection,
'rcdata' : _FormatSection,
'version' : _FormatSection,
'admin_template' : partial(FormatInclude, type='ADM'),
'chrome_html' : partial(FormatInclude, type='BINDATA',
process_html=True),
'chrome_scaled_image' : partial(FormatInclude, type='BINDATA'),
'tr_html' : partial(FormatInclude, type='HTML'),
'txt' : partial(FormatInclude, type='TXT'),
'policy_template_metafile': _DoNotFormat,
}
def FormatStructure(item, lang, output_dir):
formatter = _STRUCTURE_FORMATTERS[item.attrs['type']]
return formatter(item, lang, output_dir)

48
third_party/libwebrtc/tools/grit/grit/format/rc_header.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,48 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Item formatters for RC headers.
'''
from __future__ import print_function
def Format(root, lang='en', output_dir='.'):
yield '''\
// This file is automatically generated by GRIT. Do not edit.
#pragma once
'''
# Check for emit nodes under the rc_header. If any emit node
# is present, we assume it means the GRD file wants to override
# the default header, with no includes.
default_includes = ['#include <atlres.h>', '']
emit_lines = []
for output_node in root.GetOutputFiles():
if output_node.GetType() == 'rc_header':
for child in output_node.children:
if child.name == 'emit' and child.attrs['emit_type'] == 'prepend':
emit_lines.append(child.GetCdata())
for line in emit_lines or default_includes:
yield line + '\n'
if root.IsWhitelistSupportEnabled():
yield '#include "ui/base/resource/whitelist.h"\n'
for line in FormatDefines(root):
yield line
def FormatDefines(root):
'''Yields #define SYMBOL 1234 lines.
Args:
root: A GritNode.
'''
tids = root.GetIdMap()
rc_header_format = '#define {0} {1}\n'
if root.IsWhitelistSupportEnabled():
rc_header_format = '#define {0} (::ui::WhitelistedResource<{1}>(), {1})\n'
for item in root.ActiveDescendants():
with item:
for tid in item.GetTextualIds():
yield rc_header_format.format(tid, tids[tid])

Просмотреть файл

@ -0,0 +1,138 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for the rc_header formatter'''
# GRD samples exceed the 80 character limit.
# pylint: disable-msg=C6310
from __future__ import print_function
import os
import sys
import unittest
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
from grit import util
from grit.format import rc_header
class RcHeaderFormatterUnittest(unittest.TestCase):
def FormatAll(self, grd):
output = rc_header.FormatDefines(grd)
return ''.join(output).replace(' ', '')
def testFormatter(self):
grd = util.ParseGrdForUnittest('''
<includes first_id="300" comment="bingo">
<include type="gif" name="ID_LOGO" file="images/logo.gif" />
</includes>
<messages first_id="10000">
<message name="IDS_GREETING" desc="Printed to greet the currently logged in user">
Hello <ph name="USERNAME">%s<ex>Joi</ex></ph>, how are you doing today?
</message>
<message name="IDS_BONGO">
Bongo!
</message>
</messages>
<structures>
<structure type="dialog" name="IDD_NARROW_DIALOG" file="rc_files/dialogs.rc" />
<structure type="version" name="VS_VERSION_INFO" file="rc_files/version.rc" />
</structures>''')
output = self.FormatAll(grd)
self.failUnless(output.count('IDS_GREETING10000'))
self.failUnless(output.count('ID_LOGO300'))
def testOnlyDefineResourcesThatSatisfyOutputCondition(self):
grd = util.ParseGrdForUnittest('''
<includes first_id="300" comment="bingo">
<include type="gif" name="ID_LOGO" file="images/logo.gif" />
</includes>
<messages first_id="10000">
<message name="IDS_FIRSTPRESENTSTRING" desc="Present in .rc file.">
I will appear in the .rc file.
</message>
<if expr="False"> <!--Do not include in the .rc files until used.-->
<message name="IDS_MISSINGSTRING" desc="Not present in .rc file.">
I will not appear in the .rc file.
</message>
</if>
<if expr="lang != 'es'">
<message name="IDS_LANGUAGESPECIFICSTRING" desc="Present in .rc file.">
Hello.
</message>
</if>
<if expr="lang == 'es'">
<message name="IDS_LANGUAGESPECIFICSTRING" desc="Present in .rc file.">
Hola.
</message>
</if>
<message name="IDS_THIRDPRESENTSTRING" desc="Present in .rc file.">
I will also appear in the .rc file.
</message>
</messages>''')
output = self.FormatAll(grd)
self.failUnless(output.count('IDS_FIRSTPRESENTSTRING10000'))
self.failIf(output.count('IDS_MISSINGSTRING'))
self.failUnless(output.count('IDS_LANGUAGESPECIFICSTRING10002'))
self.failUnless(output.count('IDS_THIRDPRESENTSTRING10003'))
def testEmit(self):
grd = util.ParseGrdForUnittest('''
<outputs>
<output type="rc_all" filename="dummy">
<emit emit_type="prepend">Wrong</emit>
</output>
<if expr="False">
<output type="rc_header" filename="dummy">
<emit emit_type="prepend">No</emit>
</output>
</if>
<output type="rc_header" filename="dummy">
<emit emit_type="append">Error</emit>
</output>
<output type="rc_header" filename="dummy">
<emit emit_type="prepend">Bingo</emit>
</output>
</outputs>''')
output = ''.join(rc_header.Format(grd, 'en', '.'))
output = util.StripBlankLinesAndComments(output)
self.assertEqual('#pragma once\nBingo', output)
def testRcHeaderFormat(self):
grd = util.ParseGrdForUnittest('''
<includes first_id="300" comment="bingo">
<include type="gif" name="IDR_LOGO" file="images/logo.gif" />
</includes>
<messages first_id="10000">
<message name="IDS_GREETING" desc="Printed to greet the currently logged in user">
Hello <ph name="USERNAME">%s<ex>Joi</ex></ph>, how are you doing today?
</message>
<message name="IDS_BONGO">
Bongo!
</message>
</messages>''')
# Using the default settings.
output = rc_header.FormatDefines(grd)
self.assertEqual(('#define IDR_LOGO 300\n'
'#define IDS_GREETING 10000\n'
'#define IDS_BONGO 10001\n'), ''.join(output))
# Using resource whitelist support.
grd.SetWhitelistSupportEnabled(True)
output = rc_header.FormatDefines(grd)
self.assertEqual(('#define IDR_LOGO '
'(::ui::WhitelistedResource<300>(), 300)\n'
'#define IDS_GREETING '
'(::ui::WhitelistedResource<10000>(), 10000)\n'
'#define IDS_BONGO '
'(::ui::WhitelistedResource<10001>(), 10001)\n'),
''.join(output))
if __name__ == '__main__':
unittest.main()

415
third_party/libwebrtc/tools/grit/grit/format/rc_unittest.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,415 @@
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grit.format.rc'''
from __future__ import print_function
import os
import re
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import tempfile
import unittest
from six import StringIO
from grit import grd_reader
from grit import util
from grit.node import structure
from grit.tool import build
_PREAMBLE = '''\
#include "resource.h"
#include <winresrc.h>
#ifdef IDC_STATIC
#undef IDC_STATIC
#endif
#define IDC_STATIC (-1)
'''
class DummyOutput(object):
def __init__(self, type, language, file = 'hello.gif'):
self.type = type
self.language = language
self.file = file
def GetType(self):
return self.type
def GetLanguage(self):
return self.language
def GetOutputFilename(self):
return self.file
class FormatRcUnittest(unittest.TestCase):
def testMessages(self):
root = util.ParseGrdForUnittest("""
<messages>
<message name="IDS_BTN_GO" desc="Button text" meaning="verb">Go!</message>
<message name="IDS_GREETING" desc="Printed to greet the currently logged in user">
Hello <ph name="USERNAME">%s<ex>Joi</ex></ph>, how are you doing today?
</message>
<message name="BONGO" desc="Flippo nippo">
Howdie "Mr. Elephant", how are you doing? '''
</message>
<message name="IDS_WITH_LINEBREAKS">
Good day sir,
I am a bee
Sting sting
</message>
</messages>
""")
buf = StringIO()
build.RcBuilder.ProcessNode(root, DummyOutput('rc_all', 'en'), buf)
output = util.StripBlankLinesAndComments(buf.getvalue())
self.assertEqual(_PREAMBLE + u'''\
STRINGTABLE
BEGIN
IDS_BTN_GO "Go!"
IDS_GREETING "Hello %s, how are you doing today?"
BONGO "Howdie ""Mr. Elephant"", how are you doing? "
IDS_WITH_LINEBREAKS "Good day sir,\\nI am a bee\\nSting sting"
END''', output)
def testRcSection(self):
root = util.ParseGrdForUnittest(r'''
<structures>
<structure type="menu" name="IDC_KLONKMENU" file="grit\testdata\klonk.rc" encoding="utf-16" />
<structure type="dialog" name="IDD_ABOUTBOX" file="grit\testdata\klonk.rc" encoding="utf-16" />
<structure type="version" name="VS_VERSION_INFO" file="grit\testdata\klonk.rc" encoding="utf-16" />
</structures>''')
root.SetOutputLanguage('en')
root.RunGatherers()
buf = StringIO()
build.RcBuilder.ProcessNode(root, DummyOutput('rc_all', 'en'), buf)
output = util.StripBlankLinesAndComments(buf.getvalue())
expected = _PREAMBLE + u'''\
IDC_KLONKMENU MENU
BEGIN
POPUP "&File"
BEGIN
MENUITEM "E&xit", IDM_EXIT
MENUITEM "This be ""Klonk"" me like", ID_FILE_THISBE
POPUP "gonk"
BEGIN
MENUITEM "Klonk && is [good]", ID_GONK_KLONKIS
END
END
POPUP "&Help"
BEGIN
MENUITEM "&About ...", IDM_ABOUT
END
END
IDD_ABOUTBOX DIALOGEX 22, 17, 230, 75
STYLE DS_SETFONT | DS_MODALFRAME | WS_CAPTION | WS_SYSMENU
CAPTION "About"
FONT 8, "System", 0, 0, 0x0
BEGIN
ICON IDI_KLONK,IDC_MYICON,14,9,20,20
LTEXT "klonk Version ""yibbee"" 1.0",IDC_STATIC,49,10,119,8,
SS_NOPREFIX
LTEXT "Copyright (C) 2005",IDC_STATIC,49,20,119,8
DEFPUSHBUTTON "OK",IDOK,195,6,30,11,WS_GROUP
CONTROL "Jack ""Black"" Daniels",IDC_RADIO1,"Button",
BS_AUTORADIOBUTTON,46,51,84,10
END
VS_VERSION_INFO VERSIONINFO
FILEVERSION 1,0,0,1
PRODUCTVERSION 1,0,0,1
FILEFLAGSMASK 0x17L
#ifdef _DEBUG
FILEFLAGS 0x1L
#else
FILEFLAGS 0x0L
#endif
FILEOS 0x4L
FILETYPE 0x1L
FILESUBTYPE 0x0L
BEGIN
BLOCK "StringFileInfo"
BEGIN
BLOCK "040904b0"
BEGIN
VALUE "FileDescription", "klonk Application"
VALUE "FileVersion", "1, 0, 0, 1"
VALUE "InternalName", "klonk"
VALUE "LegalCopyright", "Copyright (C) 2005"
VALUE "OriginalFilename", "klonk.exe"
VALUE "ProductName", " klonk Application"
VALUE "ProductVersion", "1, 0, 0, 1"
END
END
BLOCK "VarFileInfo"
BEGIN
VALUE "Translation", 0x409, 1200
END
END'''.strip()
for expected_line, output_line in zip(expected.split(), output.split()):
self.assertEqual(expected_line, output_line)
def testRcIncludeStructure(self):
root = util.ParseGrdForUnittest('''
<structures>
<structure type="tr_html" name="IDR_HTML" file="bingo.html"/>
<structure type="tr_html" name="IDR_HTML2" file="bingo2.html"/>
</structures>''', base_dir = '/temp')
# We do not run gatherers as it is not needed and wouldn't find the file
buf = StringIO()
build.RcBuilder.ProcessNode(root, DummyOutput('rc_all', 'en'), buf)
output = util.StripBlankLinesAndComments(buf.getvalue())
expected = (_PREAMBLE +
u'IDR_HTML HTML "%s"\n'
u'IDR_HTML2 HTML "%s"'
% (util.normpath('/temp/bingo.html').replace('\\', '\\\\'),
util.normpath('/temp/bingo2.html').replace('\\', '\\\\')))
# hackety hack to work on win32&lin
output = re.sub(r'"[c-zC-Z]:', '"', output)
self.assertEqual(expected, output)
def testRcIncludeFile(self):
root = util.ParseGrdForUnittest('''
<includes>
<include type="TXT" name="TEXT_ONE" file="bingo.txt"/>
<include type="TXT" name="TEXT_TWO" file="bingo2.txt" filenameonly="true" />
</includes>''', base_dir = '/temp')
buf = StringIO()
build.RcBuilder.ProcessNode(root, DummyOutput('rc_all', 'en'), buf)
output = util.StripBlankLinesAndComments(buf.getvalue())
expected = (_PREAMBLE +
u'TEXT_ONE TXT "%s"\n'
u'TEXT_TWO TXT "%s"'
% (util.normpath('/temp/bingo.txt').replace('\\', '\\\\'),
'bingo2.txt'))
# hackety hack to work on win32&lin
output = re.sub(r'"[c-zC-Z]:', '"', output)
self.assertEqual(expected, output)
def testRcIncludeFlattenedHtmlFile(self):
input_file = util.PathFromRoot('grit/testdata/include_test.html')
output_file = '%s/HTML_FILE1_include_test.html' % tempfile.gettempdir()
root = util.ParseGrdForUnittest('''
<includes>
<include name="HTML_FILE1" flattenhtml="true" file="%s" type="BINDATA" />
</includes>''' % input_file)
buf = StringIO()
build.RcBuilder.ProcessNode(root, DummyOutput('rc_all', 'en', output_file),
buf)
output = util.StripBlankLinesAndComments(buf.getvalue())
expected = (_PREAMBLE +
u'HTML_FILE1 BINDATA "HTML_FILE1_include_test.html"')
# hackety hack to work on win32&lin
output = re.sub(r'"[c-zC-Z]:', '"', output)
self.assertEqual(expected, output)
file_contents = util.ReadFile(output_file, 'utf-8')
# Check for the content added by the <include> tag.
self.failUnless(file_contents.find('Hello Include!') != -1)
# Check for the content that was removed by if tag.
self.failUnless(file_contents.find('should be removed') == -1)
# Check for the content that was kept in place by if.
self.failUnless(file_contents.find('should be kept') != -1)
self.failUnless(file_contents.find('in the middle...') != -1)
self.failUnless(file_contents.find('at the end...') != -1)
# Check for nested content that was kept
self.failUnless(file_contents.find('nested true should be kept') != -1)
self.failUnless(file_contents.find('silbing true should be kept') != -1)
# Check for removed "<if>" and "</if>" tags.
self.failUnless(file_contents.find('<if expr=') == -1)
self.failUnless(file_contents.find('</if>') == -1)
os.remove(output_file)
def testStructureNodeOutputfile(self):
input_file = util.PathFromRoot('grit/testdata/simple.html')
root = util.ParseGrdForUnittest('''
<structures>
<structure type="tr_html" name="IDR_HTML" file="%s" />
</structures>''' % input_file)
struct, = root.GetChildrenOfType(structure.StructureNode)
# We must run the gatherer since we'll be wanting the translation of the
# file. The file exists in the location pointed to.
root.SetOutputLanguage('en')
root.RunGatherers()
output_dir = tempfile.gettempdir()
en_file = struct.FileForLanguage('en', output_dir)
self.failUnless(en_file == input_file)
fr_file = struct.FileForLanguage('fr', output_dir)
self.failUnless(fr_file == os.path.join(output_dir, 'fr_simple.html'))
contents = util.ReadFile(fr_file, 'utf-8')
self.failUnless(contents.find('<p>') != -1) # should contain the markup
self.failUnless(contents.find('Hello!') == -1) # should be translated
os.remove(fr_file)
def testChromeHtmlNodeOutputfile(self):
input_file = util.PathFromRoot('grit/testdata/chrome_html.html')
output_file = '%s/HTML_FILE1_chrome_html.html' % tempfile.gettempdir()
root = util.ParseGrdForUnittest('''
<structures>
<structure type="chrome_html" name="HTML_FILE1" file="%s" flattenhtml="true" />
</structures>''' % input_file)
struct, = root.GetChildrenOfType(structure.StructureNode)
struct.gatherer.SetDefines({'scale_factors': '2x'})
# We must run the gatherers since we'll be wanting the chrome_html output.
# The file exists in the location pointed to.
root.SetOutputLanguage('en')
root.RunGatherers()
buf = StringIO()
build.RcBuilder.ProcessNode(root, DummyOutput('rc_all', 'en', output_file),
buf)
output = util.StripBlankLinesAndComments(buf.getvalue())
expected = (_PREAMBLE +
u'HTML_FILE1 BINDATA "HTML_FILE1_chrome_html.html"')
# hackety hack to work on win32&lin
output = re.sub(r'"[c-zC-Z]:', '"', output)
self.assertEqual(expected, output)
file_contents = util.ReadFile(output_file, 'utf-8')
# Check for the content added by the <include> tag.
self.failUnless(file_contents.find('Hello Include!') != -1)
# Check for inserted -webkit-image-set.
self.failUnless(file_contents.find('content: -webkit-image-set') != -1)
os.remove(output_file)
def testSubstitutionHtml(self):
input_file = util.PathFromRoot('grit/testdata/toolbar_about.html')
root = grd_reader.Parse(StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="1" allow_pseudo="False">
<structures fallback_to_english="True">
<structure type="tr_html" name="IDR_HTML" file="%s" expand_variables="true"/>
</structures>
</release>
</grit>
''' % input_file), util.PathFromRoot('.'))
root.SetOutputLanguage('ar')
# We must run the gatherers since we'll be wanting the translation of the
# file. The file exists in the location pointed to.
root.RunGatherers()
output_dir = tempfile.gettempdir()
struct, = root.GetChildrenOfType(structure.StructureNode)
ar_file = struct.FileForLanguage('ar', output_dir)
self.failUnless(ar_file == os.path.join(output_dir,
'ar_toolbar_about.html'))
contents = util.ReadFile(ar_file, 'utf-8')
self.failUnless(contents.find('dir="RTL"') != -1)
os.remove(ar_file)
def testFallbackToEnglish(self):
root = util.ParseGrdForUnittest(r'''
<structures fallback_to_english="True">
<structure type="dialog" name="IDD_ABOUTBOX" file="grit\testdata\klonk.rc" encoding="utf-16" />
</structures>''', base_dir=util.PathFromRoot('.'))
root.SetOutputLanguage('en')
root.RunGatherers()
buf = StringIO()
formatter = build.RcBuilder.ProcessNode(
root, DummyOutput('rc_all', 'bingobongo'), buf)
output = util.StripBlankLinesAndComments(buf.getvalue())
self.assertEqual(_PREAMBLE + '''\
IDD_ABOUTBOX DIALOGEX 22, 17, 230, 75
STYLE DS_SETFONT | DS_MODALFRAME | WS_CAPTION | WS_SYSMENU
CAPTION "About"
FONT 8, "System", 0, 0, 0x0
BEGIN
ICON IDI_KLONK,IDC_MYICON,14,9,20,20
LTEXT "klonk Version ""yibbee"" 1.0",IDC_STATIC,49,10,119,8,
SS_NOPREFIX
LTEXT "Copyright (C) 2005",IDC_STATIC,49,20,119,8
DEFPUSHBUTTON "OK",IDOK,195,6,30,11,WS_GROUP
CONTROL "Jack ""Black"" Daniels",IDC_RADIO1,"Button",
BS_AUTORADIOBUTTON,46,51,84,10
END''', output)
def testSubstitutionRc(self):
root = grd_reader.Parse(StringIO(r'''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3"
base_dir=".">
<outputs>
<output lang="en" type="rc_all" filename="grit\testdata\klonk_resources.rc"/>
</outputs>
<release seq="1" allow_pseudo="False">
<structures>
<structure type="menu" name="IDC_KLONKMENU"
file="grit\testdata\klonk.rc" encoding="utf-16"
expand_variables="true" />
</structures>
<messages>
<message name="good" sub_variable="true">
excellent
</message>
</messages>
</release>
</grit>
'''), util.PathFromRoot('.'))
root.SetOutputLanguage('en')
root.RunGatherers()
buf = StringIO()
build.RcBuilder.ProcessNode(root, DummyOutput('rc_all', 'en'), buf)
output = buf.getvalue()
self.assertEqual('''
// This file is automatically generated by GRIT. Do not edit.
#include "resource.h"
#include <winresrc.h>
#ifdef IDC_STATIC
#undef IDC_STATIC
#endif
#define IDC_STATIC (-1)
LANGUAGE LANG_NEUTRAL, SUBLANG_NEUTRAL
IDC_KLONKMENU MENU
BEGIN
POPUP "&File"
BEGIN
MENUITEM "E&xit", IDM_EXIT
MENUITEM "This be ""Klonk"" me like", ID_FILE_THISBE
POPUP "gonk"
BEGIN
MENUITEM "Klonk && is excellent", ID_GONK_KLONKIS
END
END
POPUP "&Help"
BEGIN
MENUITEM "&About ...", IDM_ABOUT
END
END
STRINGTABLE
BEGIN
good "excellent"
END
'''.strip(), output.strip())
if __name__ == '__main__':
unittest.main()

159
third_party/libwebrtc/tools/grit/grit/format/resource_map.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,159 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''This file contains item formatters for resource_map_header and
resource_map_source files. A resource map is a mapping between resource names
(string) and the internal resource ID.'''
from __future__ import print_function
import os
from functools import partial
from grit import util
def GetFormatter(type):
if type == 'resource_map_header':
return _FormatHeader
if type == 'resource_file_map_source':
return partial(_FormatSource, _GetItemPath)
if type == 'resource_map_source':
return partial(_FormatSource, _GetItemName)
def GetMapName(root):
'''Get the name of the resource map based on the header file name. E.g.,
if our header filename is theme_resources.h, we name our resource map
kThemeResourcesMap.
|root| is the grd file root.'''
outputs = root.GetOutputFiles()
rc_header_file = None
for output in outputs:
if 'rc_header' == output.GetType():
rc_header_file = output.GetFilename()
if not rc_header_file:
raise Exception('unable to find resource header filename')
filename = os.path.splitext(os.path.split(rc_header_file)[1])[0]
filename = filename[0].upper() + filename[1:]
while True:
pos = filename.find('_')
if pos == -1 or pos >= len(filename):
break
filename = filename[:pos] + filename[pos + 1].upper() + filename[pos + 2:]
return 'k' + filename
def _FormatHeader(root, lang='en', output_dir='.'):
'''Create the header file for the resource mapping. This file just declares
an array of name/value pairs.'''
return '''\
// This file is automatically generated by GRIT. Do not edit.
#include <stddef.h>
#ifndef GRIT_RESOURCE_MAP_STRUCT_
#define GRIT_RESOURCE_MAP_STRUCT_
struct GritResourceMap {
const char* const name;
int value;
};
#endif // GRIT_RESOURCE_MAP_STRUCT_
extern const GritResourceMap %(map_name)s[];
extern const size_t %(map_name)sSize;
''' % { 'map_name': GetMapName(root) }
def _FormatSourceHeader(root, output_dir):
'''Create the header of the C++ source file for the resource mapping.'''
rc_header_file = None
map_header_file = None
for output in root.GetOutputFiles():
type = output.GetType()
if 'rc_header' == type:
rc_header_file = util.MakeRelativePath(output_dir,
output.GetOutputFilename())
elif 'resource_map_header' == type:
map_header_file = util.MakeRelativePath(output_dir,
output.GetOutputFilename())
if not rc_header_file or not map_header_file:
raise Exception('resource_map_source output type requires '
'a resource_map_header and rc_header outputs')
return '''\
// This file is automatically generated by GRIT. Do not edit.
#include "%(map_header_file)s"
#include <stddef.h>
#include "base/stl_util.h"
#include "%(rc_header_file)s"
const GritResourceMap %(map_name)s[] = {
''' % { 'map_header_file': map_header_file,
'rc_header_file': rc_header_file,
'map_name': GetMapName(root),
}
def _FormatSourceFooter(root):
# Return the footer text.
return '''\
};
const size_t %(map_name)sSize = base::size(%(map_name)s);
''' % { 'map_name': GetMapName(root) }
def _FormatSource(get_key, root, lang, output_dir):
from grit.node import include, structure, message
id_map = root.GetIdMap()
yield _FormatSourceHeader(root, output_dir)
seen = set()
for item in root.ActiveDescendants():
if not item.IsResourceMapSource():
continue
key = get_key(item)
tid = item.attrs['name']
if tid not in id_map or key in seen:
continue
seen.add(key)
yield ' {"%s", %s},\n' % (key, tid)
yield _FormatSourceFooter(root)
def _GetItemName(item):
return item.attrs['name']
# Check if |path2| is a subpath of |path1|.
def _IsSubpath(path1, path2):
path1_abs = os.path.abspath(path1)
path2_abs = os.path.abspath(path2)
common = os.path.commonprefix([path1_abs, path2_abs])
return path1_abs == common
def _GetItemPath(item):
path = item.GetInputPath().replace("\\", "/")
# Handle the case where the file resides within the output folder,
# by expanding any variables as well as replacing the output folder name with
# a fixed string such that the key added to the map does not depend on a given
# developer's setup.
#
# For example this will convert the following path:
# ../../out/gchrome/${root_gen_dir}/ui/webui/resources/js/foo.js
# to:
# @out_folder@/gen/ui/webui/resources/js/foo.js
real_path = item.ToRealPath(item.GetInputPath())
if (item.attrs.get('use_base_dir', 'true') != 'true' and
_IsSubpath(os.path.curdir, real_path)):
path = os.path.join(
'@out_folder@', os.path.relpath(real_path)).replace("\\", "/")
assert '$' not in path, 'all variables should have been expanded'
return path

Просмотреть файл

@ -0,0 +1,345 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grit.format.resource_map'''
from __future__ import print_function
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
from grit import util
from grit.format import resource_map
class FormatResourceMapUnittest(unittest.TestCase):
def testFormatResourceMap(self):
grd = util.ParseGrdForUnittest('''
<outputs>
<output type="rc_header" filename="the_rc_header.h" />
<output type="resource_map_header"
filename="the_resource_map_header.h" />
</outputs>
<release seq="3">
<structures first_id="300">
<structure type="menu" name="IDC_KLONKMENU"
file="grit\\testdata\\klonk.rc" encoding="utf-16" />
</structures>
<includes first_id="10000">
<include type="foo" file="abc" name="IDS_FIRSTPRESENT" />
<if expr="False">
<include type="foo" file="def" name="IDS_MISSING" />
</if>
<if expr="lang != 'es'">
<include type="foo" file="ghi" name="IDS_LANGUAGESPECIFIC" />
</if>
<if expr="lang == 'es'">
<include type="foo" file="jkl" name="IDS_LANGUAGESPECIFIC" />
</if>
<include type="foo" file="mno" name="IDS_THIRDPRESENT" />
<include type="foo" file="opq" name="IDS_FOURTHPRESENT"
skip_in_resource_map="true" />
</includes>
</release>''', run_gatherers=True)
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_map_header')(grd, 'en', '.')))
self.assertEqual('''\
#include <stddef.h>
#ifndef GRIT_RESOURCE_MAP_STRUCT_
#define GRIT_RESOURCE_MAP_STRUCT_
struct GritResourceMap {
const char* const name;
int value;
};
#endif // GRIT_RESOURCE_MAP_STRUCT_
extern const GritResourceMap kTheRcHeader[];
extern const size_t kTheRcHeaderSize;''', output)
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_map_source')(grd, 'en', '.')))
self.assertEqual('''\
#include "the_resource_map_header.h"
#include <stddef.h>
#include "base/stl_util.h"
#include "the_rc_header.h"
const GritResourceMap kTheRcHeader[] = {
{"IDC_KLONKMENU", IDC_KLONKMENU},
{"IDS_FIRSTPRESENT", IDS_FIRSTPRESENT},
{"IDS_LANGUAGESPECIFIC", IDS_LANGUAGESPECIFIC},
{"IDS_THIRDPRESENT", IDS_THIRDPRESENT},
};
const size_t kTheRcHeaderSize = base::size(kTheRcHeader);''', output)
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_file_map_source')(grd, 'en', '.')))
self.assertEqual('''\
#include "the_resource_map_header.h"
#include <stddef.h>
#include "base/stl_util.h"
#include "the_rc_header.h"
const GritResourceMap kTheRcHeader[] = {
{"grit/testdata/klonk.rc", IDC_KLONKMENU},
{"abc", IDS_FIRSTPRESENT},
{"ghi", IDS_LANGUAGESPECIFIC},
{"mno", IDS_THIRDPRESENT},
};
const size_t kTheRcHeaderSize = base::size(kTheRcHeader);''', output)
def testFormatResourceMapWithGeneratedFile(self):
os.environ["root_gen_dir"] = "gen"
grd = util.ParseGrdForUnittest('''\
<outputs>
<output type="rc_header" filename="the_rc_header.h" />
<output type="resource_map_header"
filename="resource_map_header.h" />
</outputs>
<release seq="3">
<includes first_id="10000">
<include type="BINDATA"
file="${root_gen_dir}/foo/bar/baz.js"
name="IDR_FOO_BAR_BAZ_JS"
use_base_dir="false"
compress="gzip" />
</includes>
</release>''', run_gatherers=True)
formatter = resource_map.GetFormatter('resource_file_map_source')
output = util.StripBlankLinesAndComments(''.join(formatter(grd, 'en', '.')))
expected = '''\
#include "resource_map_header.h"
#include <stddef.h>
#include "base/stl_util.h"
#include "the_rc_header.h"
const GritResourceMap kTheRcHeader[] = {
{"@out_folder@/gen/foo/bar/baz.js", IDR_FOO_BAR_BAZ_JS},
};
const size_t kTheRcHeaderSize = base::size(kTheRcHeader);'''
self.assertEqual(expected, output)
def testFormatResourceMapWithOutputAllEqualsFalseForStructures(self):
grd = util.ParseGrdForUnittest('''
<outputs>
<output type="rc_header" filename="the_rc_header.h" />
<output type="resource_map_header"
filename="the_resource_map_header.h" />
<output type="resource_map_source"
filename="the_resource_map_header.cc" />
</outputs>
<release seq="3">
<structures first_id="300">
<structure type="chrome_scaled_image" name="IDR_KLONKMENU"
file="foo.png" />
<if expr="False">
<structure type="chrome_scaled_image" name="IDR_MISSING"
file="bar.png" />
</if>
<if expr="True">
<structure type="chrome_scaled_image" name="IDR_BLOB"
file="blob.png" />
</if>
<if expr="True">
<then>
<structure type="chrome_scaled_image" name="IDR_METEOR"
file="meteor.png" />
</then>
<else>
<structure type="chrome_scaled_image" name="IDR_METEOR"
file="roetem.png" />
</else>
</if>
<if expr="False">
<structure type="chrome_scaled_image" name="IDR_LAST"
file="zyx.png" />
</if>
<if expr="True">
<structure type="chrome_scaled_image" name="IDR_LAST"
file="xyz.png" />
</if>
</structures>
</release>''', run_gatherers=True)
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_map_header')(grd, 'en', '.')))
self.assertEqual('''\
#include <stddef.h>
#ifndef GRIT_RESOURCE_MAP_STRUCT_
#define GRIT_RESOURCE_MAP_STRUCT_
struct GritResourceMap {
const char* const name;
int value;
};
#endif // GRIT_RESOURCE_MAP_STRUCT_
extern const GritResourceMap kTheRcHeader[];
extern const size_t kTheRcHeaderSize;''', output)
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_map_source')(grd, 'en', '.')))
self.assertEqual('''\
#include "the_resource_map_header.h"
#include <stddef.h>
#include "base/stl_util.h"
#include "the_rc_header.h"
const GritResourceMap kTheRcHeader[] = {
{"IDR_KLONKMENU", IDR_KLONKMENU},
{"IDR_BLOB", IDR_BLOB},
{"IDR_METEOR", IDR_METEOR},
{"IDR_LAST", IDR_LAST},
};
const size_t kTheRcHeaderSize = base::size(kTheRcHeader);''', output)
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_map_source')(grd, 'en', '.')))
self.assertEqual('''\
#include "the_resource_map_header.h"
#include <stddef.h>
#include "base/stl_util.h"
#include "the_rc_header.h"
const GritResourceMap kTheRcHeader[] = {
{"IDR_KLONKMENU", IDR_KLONKMENU},
{"IDR_BLOB", IDR_BLOB},
{"IDR_METEOR", IDR_METEOR},
{"IDR_LAST", IDR_LAST},
};
const size_t kTheRcHeaderSize = base::size(kTheRcHeader);''', output)
def testFormatResourceMapWithOutputAllEqualsFalseForIncludes(self):
grd = util.ParseGrdForUnittest('''
<outputs>
<output type="rc_header" filename="the_rc_header.h" />
<output type="resource_map_header"
filename="the_resource_map_header.h" />
</outputs>
<release seq="3">
<structures first_id="300">
<structure type="menu" name="IDC_KLONKMENU"
file="grit\\testdata\\klonk.rc" encoding="utf-16" />
</structures>
<includes first_id="10000">
<include type="foo" file="abc" name="IDS_FIRSTPRESENT" />
<if expr="False">
<include type="foo" file="def" name="IDS_MISSING" />
</if>
<include type="foo" file="mno" name="IDS_THIRDPRESENT" />
<if expr="True">
<include type="foo" file="blob" name="IDS_BLOB" />
</if>
<if expr="True">
<then>
<include type="foo" file="meteor" name="IDS_METEOR" />
</then>
<else>
<include type="foo" file="roetem" name="IDS_METEOR" />
</else>
</if>
<if expr="False">
<include type="foo" file="zyx" name="IDS_LAST" />
</if>
<if expr="True">
<include type="foo" file="xyz" name="IDS_LAST" />
</if>
</includes>
</release>''', run_gatherers=True)
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_map_header')(grd, 'en', '.')))
self.assertEqual('''\
#include <stddef.h>
#ifndef GRIT_RESOURCE_MAP_STRUCT_
#define GRIT_RESOURCE_MAP_STRUCT_
struct GritResourceMap {
const char* const name;
int value;
};
#endif // GRIT_RESOURCE_MAP_STRUCT_
extern const GritResourceMap kTheRcHeader[];
extern const size_t kTheRcHeaderSize;''', output)
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_map_source')(grd, 'en', '.')))
self.assertEqual('''\
#include "the_resource_map_header.h"
#include <stddef.h>
#include "base/stl_util.h"
#include "the_rc_header.h"
const GritResourceMap kTheRcHeader[] = {
{"IDC_KLONKMENU", IDC_KLONKMENU},
{"IDS_FIRSTPRESENT", IDS_FIRSTPRESENT},
{"IDS_THIRDPRESENT", IDS_THIRDPRESENT},
{"IDS_BLOB", IDS_BLOB},
{"IDS_METEOR", IDS_METEOR},
{"IDS_LAST", IDS_LAST},
};
const size_t kTheRcHeaderSize = base::size(kTheRcHeader);''', output)
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_file_map_source')(grd, 'en', '.')))
self.assertEqual('''\
#include "the_resource_map_header.h"
#include <stddef.h>
#include "base/stl_util.h"
#include "the_rc_header.h"
const GritResourceMap kTheRcHeader[] = {
{"grit/testdata/klonk.rc", IDC_KLONKMENU},
{"abc", IDS_FIRSTPRESENT},
{"mno", IDS_THIRDPRESENT},
{"blob", IDS_BLOB},
{"meteor", IDS_METEOR},
{"xyz", IDS_LAST},
};
const size_t kTheRcHeaderSize = base::size(kTheRcHeader);''', output)
def testFormatStringResourceMap(self):
grd = util.ParseGrdForUnittest('''
<outputs>
<output type="rc_header" filename="the_rc_header.h" />
<output type="resource_map_header" filename="the_rc_map_header.h" />
<output type="resource_map_source" filename="the_rc_map_source.cc" />
</outputs>
<release seq="1" allow_pseudo="false">
<messages fallback_to_english="true">
<message name="IDS_PRODUCT_NAME" desc="The application name">
Application
</message>
<if expr="True">
<message name="IDS_DEFAULT_TAB_TITLE_TITLE_CASE"
desc="In Title Case: The default title in a tab.">
New Tab
</message>
</if>
<if expr="False">
<message name="IDS_DEFAULT_TAB_TITLE"
desc="The default title in a tab.">
New tab
</message>
</if>
</messages>
</release>''', run_gatherers=True)
grd.InitializeIds()
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_map_header')(grd, 'en', '.')))
self.assertEqual('''\
#include <stddef.h>
#ifndef GRIT_RESOURCE_MAP_STRUCT_
#define GRIT_RESOURCE_MAP_STRUCT_
struct GritResourceMap {
const char* const name;
int value;
};
#endif // GRIT_RESOURCE_MAP_STRUCT_
extern const GritResourceMap kTheRcHeader[];
extern const size_t kTheRcHeaderSize;''', output)
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_map_source')(grd, 'en', '.')))
self.assertEqual('''\
#include "the_rc_map_header.h"
#include <stddef.h>
#include "base/stl_util.h"
#include "the_rc_header.h"
const GritResourceMap kTheRcHeader[] = {
{"IDS_PRODUCT_NAME", IDS_PRODUCT_NAME},
{"IDS_DEFAULT_TAB_TITLE_TITLE_CASE", IDS_DEFAULT_TAB_TITLE_TITLE_CASE},
};
const size_t kTheRcHeaderSize = base::size(kTheRcHeader);''', output)
if __name__ == '__main__':
unittest.main()

8
third_party/libwebrtc/tools/grit/grit/gather/__init__.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,8 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Module grit.gather
'''
pass

62
third_party/libwebrtc/tools/grit/grit/gather/admin_template.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,62 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Gatherer for administrative template files.
'''
from __future__ import print_function
import re
from grit.gather import regexp
from grit import exception
from grit import lazy_re
class MalformedAdminTemplateException(exception.Base):
'''This file doesn't look like a .adm file to me.'''
pass
class AdmGatherer(regexp.RegexpGatherer):
'''Gatherer for the translateable portions of an admin template.
This gatherer currently makes the following assumptions:
- there is only one [strings] section and it is always the last section
of the file
- translateable strings do not need to be escaped.
'''
# Finds the strings section as the group named 'strings'
_STRINGS_SECTION = lazy_re.compile(
r'(?P<first_part>.+^\[strings\])(?P<strings>.+)\Z',
re.MULTILINE | re.DOTALL)
# Finds the translateable sections from within the [strings] section.
_TRANSLATEABLES = lazy_re.compile(
r'^\s*[A-Za-z0-9_]+\s*=\s*"(?P<text>.+)"\s*$',
re.MULTILINE)
def Escape(self, text):
return text.replace('\n', '\\n')
def UnEscape(self, text):
return text.replace('\\n', '\n')
def Parse(self):
if self.have_parsed_:
return
self.have_parsed_ = True
self.text_ = self._LoadInputFile().strip()
m = self._STRINGS_SECTION.match(self.text_)
if not m:
raise MalformedAdminTemplateException()
# Add the first part, which is all nontranslateable, to the skeleton
self._AddNontranslateableChunk(m.group('first_part'))
# Then parse the rest using the _TRANSLATEABLES regexp.
self._RegExpParse(self._TRANSLATEABLES, m.group('strings'))
def GetTextualIds(self):
return [self.extkey]

Просмотреть файл

@ -0,0 +1,115 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for the admin template gatherer.'''
from __future__ import print_function
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
from six import StringIO
from grit.gather import admin_template
from grit import util
from grit import grd_reader
from grit import grit_runner
from grit.tool import build
class AdmGathererUnittest(unittest.TestCase):
def testParsingAndTranslating(self):
pseudofile = StringIO(
'bingo bongo\n'
'ding dong\n'
'[strings] \n'
'whatcha="bingo bongo"\n'
'gotcha = "bingolabongola "the wise" fingulafongula" \n')
gatherer = admin_template.AdmGatherer(pseudofile)
gatherer.Parse()
self.failUnless(len(gatherer.GetCliques()) == 2)
self.failUnless(gatherer.GetCliques()[1].GetMessage().GetRealContent() ==
'bingolabongola "the wise" fingulafongula')
translation = gatherer.Translate('en')
self.failUnless(translation == gatherer.GetText().strip())
def testErrorHandling(self):
pseudofile = StringIO(
'bingo bongo\n'
'ding dong\n'
'whatcha="bingo bongo"\n'
'gotcha = "bingolabongola "the wise" fingulafongula" \n')
gatherer = admin_template.AdmGatherer(pseudofile)
self.assertRaises(admin_template.MalformedAdminTemplateException,
gatherer.Parse)
_TRANSLATABLES_FROM_FILE = (
'Google', 'Google Desktop', 'Preferences',
'Controls Google Desktop preferences',
'Indexing and Capture Control',
'Controls what files, web pages, and other content will be indexed by Google Desktop.',
'Prevent indexing of email',
# there are lots more but we don't check any further
)
def VerifyCliquesFromAdmFile(self, cliques):
self.failUnless(len(cliques) > 20)
for clique, expected in zip(cliques, self._TRANSLATABLES_FROM_FILE):
text = clique.GetMessage().GetRealContent()
self.failUnless(text == expected)
def testFromFile(self):
fname = util.PathFromRoot('grit/testdata/GoogleDesktop.adm')
gatherer = admin_template.AdmGatherer(fname)
gatherer.Parse()
cliques = gatherer.GetCliques()
self.VerifyCliquesFromAdmFile(cliques)
def MakeGrd(self):
grd = grd_reader.Parse(StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3">
<release seq="3">
<structures>
<structure type="admin_template" name="IDAT_GOOGLE_DESKTOP_SEARCH"
file="GoogleDesktop.adm" exclude_from_rc="true" />
<structure type="txt" name="BINGOBONGO"
file="README.txt" exclude_from_rc="true" />
</structures>
</release>
<outputs>
<output filename="de_res.rc" type="rc_all" lang="de" />
</outputs>
</grit>'''), util.PathFromRoot('grit/testdata'))
grd.SetOutputLanguage('en')
grd.RunGatherers()
return grd
def testInGrd(self):
grd = self.MakeGrd()
cliques = grd.children[0].children[0].children[0].GetCliques()
self.VerifyCliquesFromAdmFile(cliques)
def testFileIsOutput(self):
grd = self.MakeGrd()
dirname = util.TempDir({})
try:
tool = build.RcBuilder()
tool.o = grit_runner.Options()
tool.output_directory = dirname.GetPath()
tool.res = grd
tool.Process()
self.failUnless(os.path.isfile(dirname.GetPath('de_GoogleDesktop.adm')))
self.failUnless(os.path.isfile(dirname.GetPath('de_README.txt')))
finally:
dirname.CleanUp()
if __name__ == '__main__':
unittest.main()

377
third_party/libwebrtc/tools/grit/grit/gather/chrome_html.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,377 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prepares a Chrome HTML file by inlining resources and adding references to
high DPI resources and removing references to unsupported scale factors.
This is a small gatherer that takes a HTML file, looks for src attributes
and inlines the specified file, producing one HTML file with no external
dependencies. It recursively inlines the included files. When inlining CSS
image files this script also checks for the existence of high DPI versions
of the inlined file including those on relevant platforms. Unsupported scale
factors are also removed from existing image sets to support explicitly
referencing all available images.
"""
from __future__ import print_function
import os
import re
from grit import lazy_re
from grit import util
from grit.format import html_inline
from grit.gather import interface
# Distribution string to replace with distribution.
DIST_SUBSTR = '%DISTRIBUTION%'
# Matches a chrome theme source URL.
_THEME_SOURCE = lazy_re.compile(
r'(?P<baseurl>chrome://theme/IDR_[A-Z0-9_]*)(?P<query>\?.*)?')
# Pattern for matching CSS url() function.
_CSS_URL_PATTERN = r'url\((?P<quote>"|\'|)(?P<filename>[^"\'()]*)(?P=quote)\)'
# Matches CSS url() functions with the capture group 'filename'.
_CSS_URL = lazy_re.compile(_CSS_URL_PATTERN)
# Matches one or more CSS image urls used in given properties.
_CSS_IMAGE_URLS = lazy_re.compile(
r'(?P<attribute>content|background|[\w-]*-image):\s*'
r'(?P<urls>(' + _CSS_URL_PATTERN + r'\s*,?\s*)+)')
# Matches CSS image sets.
_CSS_IMAGE_SETS = lazy_re.compile(
r'(?P<attribute>content|background|[\w-]*-image):[ ]*'
r'-webkit-image-set\((?P<images>'
r'(\s*,?\s*url\((?P<quote>"|\'|)[^"\'()]*(?P=quote)\)[ ]*[0-9.]*x)*)\)',
re.MULTILINE)
# Matches a single image in a CSS image set with the capture group scale.
_CSS_IMAGE_SET_IMAGE = lazy_re.compile(r'\s*,?\s*'
r'url\((?P<quote>"|\'|)[^"\'()]*(?P=quote)\)[ ]*(?P<scale>[0-9.]*x)',
re.MULTILINE)
_HTML_IMAGE_SRC = lazy_re.compile(
r'<img[^>]+src=\"(?P<filename>[^">]*)\"[^>]*>')
def GetImageList(
base_path, filename, scale_factors, distribution,
filename_expansion_function=None):
"""Generate the list of images which match the provided scale factors.
Takes an image filename and checks for files of the same name in folders
corresponding to the supported scale factors. If the file is from a
chrome://theme/ source, inserts supported @Nx scale factors as high DPI
versions.
Args:
base_path: path to look for relative file paths in
filename: name of the base image file
scale_factors: a list of the supported scale factors (i.e. ['2x'])
distribution: string that should replace %DISTRIBUTION%
Returns:
array of tuples containing scale factor and image (i.e.
[('1x', 'image.png'), ('2x', '2x/image.png')]).
"""
# Any matches for which a chrome URL handler will serve all scale factors
# can simply request all scale factors.
theme_match = _THEME_SOURCE.match(filename)
if theme_match:
images = [('1x', filename)]
for scale_factor in scale_factors:
scale_filename = "%s@%s" % (theme_match.group('baseurl'), scale_factor)
if theme_match.group('query'):
scale_filename += theme_match.group('query')
images.append((scale_factor, scale_filename))
return images
if filename.find(':') != -1:
# filename is probably a URL, only return filename itself.
return [('1x', filename)]
filename = filename.replace(DIST_SUBSTR, distribution)
if filename_expansion_function:
filename = filename_expansion_function(filename)
filepath = os.path.join(base_path, filename)
images = [('1x', filename)]
for scale_factor in scale_factors:
# Check for existence of file and add to image set.
scale_path = os.path.split(os.path.join(base_path, filename))
scale_image_path = os.path.join(scale_path[0], scale_factor, scale_path[1])
if os.path.isfile(scale_image_path):
# HTML/CSS always uses forward slashed paths.
parts = filename.rsplit('/', 1)
if len(parts) == 1:
path = ''
else:
path = parts[0] + '/'
scale_image_name = path + scale_factor + '/' + parts[-1]
images.append((scale_factor, scale_image_name))
return images
def GenerateImageSet(images, quote):
"""Generates a -webkit-image-set for the provided list of images.
Args:
images: an array of tuples giving scale factor and file path
(i.e. [('1x', 'image.png'), ('2x', '2x/image.png')]).
quote: a string giving the quotation character to use (i.e. "'")
Returns:
string giving a -webkit-image-set rule referencing the provided images.
(i.e. '-webkit-image-set(url('image.png') 1x, url('2x/image.png') 2x)')
"""
imageset = []
for (scale_factor, filename) in images:
imageset.append("url(%s%s%s) %s" % (quote, filename, quote, scale_factor))
return "-webkit-image-set(%s)" % (', '.join(imageset))
def UrlToImageSet(
src_match, base_path, scale_factors, distribution,
filename_expansion_function=None):
"""Regex replace function which replaces url() with -webkit-image-set.
Takes a regex match for url('path'). If the file is local, checks for
files of the same name in folders corresponding to the supported scale
factors. If the file is from a chrome://theme/ source, inserts the
supported @Nx scale factor request. In either case inserts a
-webkit-image-set rule to fetch the appropriate image for the current
scale factor.
Args:
src_match: regex match object from _CSS_URLS
base_path: path to look for relative file paths in
scale_factors: a list of the supported scale factors (i.e. ['2x'])
distribution: string that should replace %DISTRIBUTION%.
Returns:
string
"""
quote = src_match.group('quote')
filename = src_match.group('filename')
image_list = GetImageList(
base_path, filename, scale_factors, distribution,
filename_expansion_function=filename_expansion_function)
# Don't modify the source if there is only one image.
if len(image_list) == 1:
return src_match.group(0)
return GenerateImageSet(image_list, quote)
def InsertImageSet(
src_match, base_path, scale_factors, distribution,
filename_expansion_function=None):
"""Regex replace function which inserts -webkit-image-set rules.
Takes a regex match for `property: url('path')[, url('path')]+`.
Replaces one or more occurances of the match with image set rules.
Args:
src_match: regex match object from _CSS_IMAGE_URLS
base_path: path to look for relative file paths in
scale_factors: a list of the supported scale factors (i.e. ['2x'])
distribution: string that should replace %DISTRIBUTION%.
Returns:
string
"""
attr = src_match.group('attribute')
urls = _CSS_URL.sub(
lambda m: UrlToImageSet(m, base_path, scale_factors, distribution,
filename_expansion_function),
src_match.group('urls'))
return "%s: %s" % (attr, urls)
def InsertImageStyle(
src_match, base_path, scale_factors, distribution,
filename_expansion_function=None):
"""Regex replace function which adds a content style to an <img>.
Takes a regex match from _HTML_IMAGE_SRC and replaces the attribute with a CSS
style which defines the image set.
"""
filename = src_match.group('filename')
image_list = GetImageList(
base_path, filename, scale_factors, distribution,
filename_expansion_function=filename_expansion_function)
# Don't modify the source if there is only one image or image already defines
# a style.
if src_match.group(0).find(" style=\"") != -1 or len(image_list) == 1:
return src_match.group(0)
return "%s style=\"content: %s;\">" % (src_match.group(0)[:-1],
GenerateImageSet(image_list, "'"))
def InsertImageSets(
filepath, text, scale_factors, distribution,
filename_expansion_function=None):
"""Helper function that adds references to external images available in any of
scale_factors in CSS backgrounds.
"""
# Add high DPI urls for css attributes: content, background,
# or *-image or <img src="foo">.
return _CSS_IMAGE_URLS.sub(
lambda m: InsertImageSet(
m, filepath, scale_factors, distribution,
filename_expansion_function=filename_expansion_function),
_HTML_IMAGE_SRC.sub(
lambda m: InsertImageStyle(
m, filepath, scale_factors, distribution,
filename_expansion_function=filename_expansion_function),
text))
def RemoveImagesNotIn(scale_factors, src_match):
"""Regex replace function which removes images for scale factors not in
scale_factors.
Takes a regex match for _CSS_IMAGE_SETS. For each image in the group images,
checks if this scale factor is in scale_factors and if not, removes it.
Args:
scale_factors: a list of the supported scale factors (i.e. ['1x', '2x'])
src_match: regex match object from _CSS_IMAGE_SETS
Returns:
string
"""
attr = src_match.group('attribute')
images = _CSS_IMAGE_SET_IMAGE.sub(
lambda m: m.group(0) if m.group('scale') in scale_factors else '',
src_match.group('images'))
return "%s: -webkit-image-set(%s)" % (attr, images)
def RemoveImageSetImages(text, scale_factors):
"""Helper function which removes images in image sets not in the list of
supported scale_factors.
"""
return _CSS_IMAGE_SETS.sub(
lambda m: RemoveImagesNotIn(scale_factors, m), text)
def ProcessImageSets(
filepath, text, scale_factors, distribution,
filename_expansion_function=None):
"""Helper function that adds references to external images available in other
scale_factors and removes images from image-sets in unsupported scale_factors.
"""
# Explicitly add 1x to supported scale factors so that it is not removed.
supported_scale_factors = ['1x']
supported_scale_factors.extend(scale_factors)
return InsertImageSets(
filepath,
RemoveImageSetImages(text, supported_scale_factors),
scale_factors,
distribution,
filename_expansion_function=filename_expansion_function)
class ChromeHtml(interface.GathererBase):
"""Represents an HTML document processed for Chrome WebUI.
HTML documents used in Chrome WebUI have local resources inlined and
automatically insert references to high DPI assets used in CSS properties
with the use of the -webkit-image-set value. References to unsupported scale
factors in image sets are also removed. This does not generate any
translateable messages and instead generates a single DataPack resource.
"""
def __init__(self, *args, **kwargs):
super(ChromeHtml, self).__init__(*args, **kwargs)
self.allow_external_script_ = False
self.flatten_html_ = False
self.preprocess_only_ = False
# 1x resources are implicitly already in the source and do not need to be
# added.
self.scale_factors_ = []
self.filename_expansion_function = None
def SetAttributes(self, attrs):
self.allow_external_script_ = ('allowexternalscript' in attrs and
attrs['allowexternalscript'] == 'true')
self.preprocess_only_ = ('preprocess' in attrs and
attrs['preprocess'] == 'true')
self.flatten_html_ = (self.preprocess_only_ or ('flattenhtml' in attrs and
attrs['flattenhtml'] == 'true'))
def SetDefines(self, defines):
if 'scale_factors' in defines:
self.scale_factors_ = defines['scale_factors'].split(',')
def GetText(self):
"""Returns inlined text of the HTML document."""
return self.inlined_text_
def GetTextualIds(self):
return [self.extkey]
def GetData(self, lang, encoding):
"""Returns inlined text of the HTML document."""
ret = self.inlined_text_
if encoding == util.BINARY:
ret = ret.encode('utf-8')
return ret
def GetHtmlResourceFilenames(self):
"""Returns a set of all filenames inlined by this file."""
if self.flatten_html_:
return html_inline.GetResourceFilenames(
self.grd_node.ToRealPath(self.GetInputPath()),
self.grd_node,
allow_external_script=self.allow_external_script_,
rewrite_function=lambda fp, t, d: ProcessImageSets(
fp, t, self.scale_factors_, d,
filename_expansion_function=self.filename_expansion_function),
filename_expansion_function=self.filename_expansion_function)
return []
def Translate(self, lang, pseudo_if_not_available=True,
skeleton_gatherer=None, fallback_to_english=False):
"""Returns this document translated."""
return self.inlined_text_
def SetFilenameExpansionFunction(self, fn):
self.filename_expansion_function = fn
def Parse(self):
"""Parses and inlines the represented file."""
filename = self.GetInputPath()
# If there is a grd_node, prefer its GetInputPath(), as that may do more
# processing to make the call to ToRealPath() below work correctly.
if self.grd_node:
filename = self.grd_node.GetInputPath()
if self.filename_expansion_function:
filename = self.filename_expansion_function(filename)
# Hack: some unit tests supply an absolute path and no root node.
if not os.path.isabs(filename):
filename = self.grd_node.ToRealPath(filename)
if self.flatten_html_:
self.inlined_text_ = html_inline.InlineToString(
filename,
self.grd_node,
allow_external_script = self.allow_external_script_,
strip_whitespace=True,
preprocess_only = self.preprocess_only_,
rewrite_function=lambda fp, t, d: ProcessImageSets(
fp, t, self.scale_factors_, d,
filename_expansion_function=self.filename_expansion_function),
filename_expansion_function=self.filename_expansion_function)
else:
distribution = html_inline.GetDistribution()
self.inlined_text_ = ProcessImageSets(
os.path.dirname(filename),
util.ReadFile(filename, 'utf-8'),
self.scale_factors_,
distribution,
filename_expansion_function=self.filename_expansion_function)

Просмотреть файл

@ -0,0 +1,610 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grit.gather.chrome_html'''
from __future__ import print_function
import os
import re
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
from grit import lazy_re
from grit import util
from grit.gather import chrome_html
_NEW_LINE = lazy_re.compile('(\r\n|\r|\n)', re.MULTILINE)
def StandardizeHtml(text):
'''Standardizes the newline format and png mime type in Html text.'''
return _NEW_LINE.sub('\n', text).replace('data:image/x-png;',
'data:image/png;')
class ChromeHtmlUnittest(unittest.TestCase):
'''Unit tests for ChromeHtml.'''
def testFileResources(self):
'''Tests inlined image file resources with available high DPI assets.'''
tmp_dir = util.TempDir({
'index.html': '''
<!DOCTYPE HTML>
<html>
<head>
<link rel="stylesheet" href="test.css">
</head>
<body>
<!-- Don't need a body. -->
</body>
</html>
''',
'test.css': '''
.image {
background: url('test.png');
}
''',
'test.png': 'PNG DATA',
'1.4x/test.png': '1.4x PNG DATA',
'1.8x/test.png': '1.8x PNG DATA',
})
html = chrome_html.ChromeHtml(tmp_dir.GetPath('index.html'))
html.SetDefines({'scale_factors': '1.4x,1.8x'})
html.SetAttributes({'flattenhtml': 'true'})
html.Parse()
self.failUnlessEqual(StandardizeHtml(html.GetData('en', 'utf-8')),
StandardizeHtml('''
<!DOCTYPE HTML>
<html>
<head>
<style>
.image {
background: -webkit-image-set(url('data:image/png;base64,UE5HIERBVEE=') 1x, url('data:image/png;base64,MS40eCBQTkcgREFUQQ==') 1.4x, url('data:image/png;base64,MS44eCBQTkcgREFUQQ==') 1.8x);
}
</style>
</head>
<body>
<!-- Don't need a body. -->
</body>
</html>
'''))
tmp_dir.CleanUp()
def testFileResourcesImageTag(self):
'''Tests inlined image file resources with available high DPI assets on
an image tag.'''
tmp_dir = util.TempDir({
'index.html': '''
<!DOCTYPE HTML>
<html>
<body>
<img id="foo" src="test.png">
</body>
</html>
''',
'test.png': 'PNG DATA',
'2x/test.png': '2x PNG DATA',
})
html = chrome_html.ChromeHtml(tmp_dir.GetPath('index.html'))
html.SetDefines({'scale_factors': '2x'})
html.SetAttributes({'flattenhtml': 'true'})
html.Parse()
self.failUnlessEqual(StandardizeHtml(html.GetData('en', 'utf-8')),
StandardizeHtml('''
<!DOCTYPE HTML>
<html>
<body>
<img id="foo" src="data:image/png;base64,UE5HIERBVEE=" style="content: -webkit-image-set(url('data:image/png;base64,UE5HIERBVEE=') 1x, url('data:image/png;base64,MnggUE5HIERBVEE=') 2x);">
</body>
</html>
'''))
tmp_dir.CleanUp()
def testFileResourcesNoFlatten(self):
'''Tests non-inlined image file resources with available high DPI assets.'''
tmp_dir = util.TempDir({
'test.css': '''
.image {
background: url('test.png');
}
''',
'test.png': 'PNG DATA',
'1.4x/test.png': '1.4x PNG DATA',
'1.8x/test.png': '1.8x PNG DATA',
})
html = chrome_html.ChromeHtml(tmp_dir.GetPath('test.css'))
html.SetDefines({'scale_factors': '1.4x,1.8x'})
html.SetAttributes({'flattenhtml': 'false'})
html.Parse()
self.failUnlessEqual(StandardizeHtml(html.GetData('en', 'utf-8')),
StandardizeHtml('''
.image {
background: -webkit-image-set(url('test.png') 1x, url('1.4x/test.png') 1.4x, url('1.8x/test.png') 1.8x);
}
'''))
tmp_dir.CleanUp()
def testFileResourcesNoFlattenSubdir(self):
'''Tests non-inlined image file resources w/high DPI assets in subdirs.'''
tmp_dir = util.TempDir({
'test.css': '''
.image {
background: url('sub/test.png');
}
''',
'sub/test.png': 'PNG DATA',
'sub/1.4x/test.png': '1.4x PNG DATA',
'sub/1.8x/test.png': '1.8x PNG DATA',
})
html = chrome_html.ChromeHtml(tmp_dir.GetPath('test.css'))
html.SetDefines({'scale_factors': '1.4x,1.8x'})
html.SetAttributes({'flattenhtml': 'false'})
html.Parse()
self.failUnlessEqual(StandardizeHtml(html.GetData('en', 'utf-8')),
StandardizeHtml('''
.image {
background: -webkit-image-set(url('sub/test.png') 1x, url('sub/1.4x/test.png') 1.4x, url('sub/1.8x/test.png') 1.8x);
}
'''))
tmp_dir.CleanUp()
def testFileResourcesPreprocess(self):
'''Tests preprocessed image file resources with available high DPI
assets.'''
tmp_dir = util.TempDir({
'test.css': '''
.image {
background: url('test.png');
}
''',
'test.png': 'PNG DATA',
'1.4x/test.png': '1.4x PNG DATA',
'1.8x/test.png': '1.8x PNG DATA',
})
html = chrome_html.ChromeHtml(tmp_dir.GetPath('test.css'))
html.SetDefines({'scale_factors': '1.4x,1.8x'})
html.SetAttributes({'flattenhtml': 'false', 'preprocess': 'true'})
html.Parse()
self.failUnlessEqual(StandardizeHtml(html.GetData('en', 'utf-8')),
StandardizeHtml('''
.image {
background: -webkit-image-set(url('test.png') 1x, url('1.4x/test.png') 1.4x, url('1.8x/test.png') 1.8x);
}
'''))
tmp_dir.CleanUp()
def testFileResourcesDoubleQuotes(self):
'''Tests inlined image file resources if url() filename is double quoted.'''
tmp_dir = util.TempDir({
'test.css': '''
.image {
background: url("test.png");
}
''',
'test.png': 'PNG DATA',
'2x/test.png': '2x PNG DATA',
})
html = chrome_html.ChromeHtml(tmp_dir.GetPath('test.css'))
html.SetDefines({'scale_factors': '2x'})
html.SetAttributes({'flattenhtml': 'true'})
html.Parse()
self.failUnlessEqual(StandardizeHtml(html.GetData('en', 'utf-8')),
StandardizeHtml('''
.image {
background: -webkit-image-set(url("data:image/png;base64,UE5HIERBVEE=") 1x, url("data:image/png;base64,MnggUE5HIERBVEE=") 2x);
}
'''))
tmp_dir.CleanUp()
def testFileResourcesNoQuotes(self):
'''Tests inlined image file resources when url() filename is unquoted.'''
tmp_dir = util.TempDir({
'test.css': '''
.image {
background: url(test.png);
}
''',
'test.png': 'PNG DATA',
'2x/test.png': '2x PNG DATA',
})
html = chrome_html.ChromeHtml(tmp_dir.GetPath('test.css'))
html.SetDefines({'scale_factors': '2x'})
html.SetAttributes({'flattenhtml': 'true'})
html.Parse()
self.failUnlessEqual(StandardizeHtml(html.GetData('en', 'utf-8')),
StandardizeHtml('''
.image {
background: -webkit-image-set(url(data:image/png;base64,UE5HIERBVEE=) 1x, url(data:image/png;base64,MnggUE5HIERBVEE=) 2x);
}
'''))
tmp_dir.CleanUp()
def testFileResourcesSubdirs(self):
'''Tests inlined image file resources if url() filename is in a subdir.'''
tmp_dir = util.TempDir({
'test.css': '''
.image {
background: url('some/sub/path/test.png');
}
''',
'some/sub/path/test.png': 'PNG DATA',
'some/sub/path/2x/test.png': '2x PNG DATA',
})
html = chrome_html.ChromeHtml(tmp_dir.GetPath('test.css'))
html.SetDefines({'scale_factors': '2x'})
html.SetAttributes({'flattenhtml': 'true'})
html.Parse()
self.failUnlessEqual(StandardizeHtml(html.GetData('en', 'utf-8')),
StandardizeHtml('''
.image {
background: -webkit-image-set(url('data:image/png;base64,UE5HIERBVEE=') 1x, url('data:image/png;base64,MnggUE5HIERBVEE=') 2x);
}
'''))
tmp_dir.CleanUp()
def testFileResourcesNoFile(self):
'''Tests inlined image file resources without available high DPI assets.'''
tmp_dir = util.TempDir({
'index.html': '''
<!DOCTYPE HTML>
<html>
<head>
<link rel="stylesheet" href="test.css">
</head>
<body>
<!-- Don't need a body. -->
</body>
</html>
''',
'test.css': '''
.image {
background: url('test.png');
}
''',
'test.png': 'PNG DATA',
})
html = chrome_html.ChromeHtml(tmp_dir.GetPath('index.html'))
html.SetDefines({'scale_factors': '2x'})
html.SetAttributes({'flattenhtml': 'true'})
html.Parse()
self.failUnlessEqual(StandardizeHtml(html.GetData('en', 'utf-8')),
StandardizeHtml('''
<!DOCTYPE HTML>
<html>
<head>
<style>
.image {
background: url('data:image/png;base64,UE5HIERBVEE=');
}
</style>
</head>
<body>
<!-- Don't need a body. -->
</body>
</html>
'''))
tmp_dir.CleanUp()
def testFileResourcesMultipleBackgrounds(self):
'''Tests inlined image file resources with two url()s.'''
tmp_dir = util.TempDir({
'test.css': '''
.image {
background: url(test.png), url(test.png);
}
''',
'test.png': 'PNG DATA',
'2x/test.png': '2x PNG DATA',
})
html = chrome_html.ChromeHtml(tmp_dir.GetPath('test.css'))
html.SetDefines({'scale_factors': '2x'})
html.SetAttributes({'flattenhtml': 'true'})
html.Parse()
self.failUnlessEqual(StandardizeHtml(html.GetData('en', 'utf-8')),
StandardizeHtml('''
.image {
background: -webkit-image-set(url(data:image/png;base64,UE5HIERBVEE=) 1x, url(data:image/png;base64,MnggUE5HIERBVEE=) 2x), -webkit-image-set(url(data:image/png;base64,UE5HIERBVEE=) 1x, url(data:image/png;base64,MnggUE5HIERBVEE=) 2x);
}
'''))
tmp_dir.CleanUp()
def testFileResourcesMultipleBackgroundsWithNewline1(self):
'''Tests inlined image file resources with line break after first url().'''
tmp_dir = util.TempDir({
'test.css': '''
.image {
background: url(test.png),
url(test.png);
}
''',
'test.png': 'PNG DATA',
'2x/test.png': '2x PNG DATA',
})
html = chrome_html.ChromeHtml(tmp_dir.GetPath('test.css'))
html.SetDefines({'scale_factors': '2x'})
html.SetAttributes({'flattenhtml': 'true'})
html.Parse()
self.failUnlessEqual(StandardizeHtml(html.GetData('en', 'utf-8')),
StandardizeHtml('''
.image {
background: -webkit-image-set(url(data:image/png;base64,UE5HIERBVEE=) 1x, url(data:image/png;base64,MnggUE5HIERBVEE=) 2x),
-webkit-image-set(url(data:image/png;base64,UE5HIERBVEE=) 1x, url(data:image/png;base64,MnggUE5HIERBVEE=) 2x);
}
'''))
tmp_dir.CleanUp()
def testFileResourcesMultipleBackgroundsWithNewline2(self):
'''Tests inlined image file resources with line break before first url()
and before second url().'''
tmp_dir = util.TempDir({
'test.css': '''
.image {
background:
url(test.png),
url(test.png);
}
''',
'test.png': 'PNG DATA',
'2x/test.png': '2x PNG DATA',
})
html = chrome_html.ChromeHtml(tmp_dir.GetPath('test.css'))
html.SetDefines({'scale_factors': '2x'})
html.SetAttributes({'flattenhtml': 'true'})
html.Parse()
self.failUnlessEqual(StandardizeHtml(html.GetData('en', 'utf-8')),
StandardizeHtml('''
.image {
background: -webkit-image-set(url(data:image/png;base64,UE5HIERBVEE=) 1x, url(data:image/png;base64,MnggUE5HIERBVEE=) 2x),
-webkit-image-set(url(data:image/png;base64,UE5HIERBVEE=) 1x, url(data:image/png;base64,MnggUE5HIERBVEE=) 2x);
}
'''))
tmp_dir.CleanUp()
def testFileResourcesCRLF(self):
'''Tests inlined image file resource when url() is preceded by a Windows
style line break.'''
tmp_dir = util.TempDir({
'test.css': '''
.image {
background:\r\nurl(test.png);
}
''',
'test.png': 'PNG DATA',
'2x/test.png': '2x PNG DATA',
})
html = chrome_html.ChromeHtml(tmp_dir.GetPath('test.css'))
html.SetDefines({'scale_factors': '2x'})
html.SetAttributes({'flattenhtml': 'true'})
html.Parse()
self.failUnlessEqual(StandardizeHtml(html.GetData('en', 'utf-8')),
StandardizeHtml('''
.image {
background: -webkit-image-set(url(data:image/png;base64,UE5HIERBVEE=) 1x, url(data:image/png;base64,MnggUE5HIERBVEE=) 2x);
}
'''))
tmp_dir.CleanUp()
def testThemeResources(self):
'''Tests inserting high DPI chrome://theme references.'''
tmp_dir = util.TempDir({
'index.html': '''
<!DOCTYPE HTML>
<html>
<head>
<link rel="stylesheet" href="test.css">
</head>
<body>
<!-- Don't need a body. -->
</body>
</html>
''',
'test.css': '''
.image {
background: url('chrome://theme/IDR_RESOURCE_NAME');
content: url('chrome://theme/IDR_RESOURCE_NAME_WITH_Q?$1');
}
''',
})
html = chrome_html.ChromeHtml(tmp_dir.GetPath('index.html'))
html.SetDefines({'scale_factors': '2x'})
html.SetAttributes({'flattenhtml': 'true'})
html.Parse()
self.failUnlessEqual(StandardizeHtml(html.GetData('en', 'utf-8')),
StandardizeHtml('''
<!DOCTYPE HTML>
<html>
<head>
<style>
.image {
background: -webkit-image-set(url('chrome://theme/IDR_RESOURCE_NAME') 1x, url('chrome://theme/IDR_RESOURCE_NAME@2x') 2x);
content: -webkit-image-set(url('chrome://theme/IDR_RESOURCE_NAME_WITH_Q?$1') 1x, url('chrome://theme/IDR_RESOURCE_NAME_WITH_Q@2x?$1') 2x);
}
</style>
</head>
<body>
<!-- Don't need a body. -->
</body>
</html>
'''))
tmp_dir.CleanUp()
def testRemoveUnsupportedScale(self):
'''Tests removing an unsupported scale factor from an explicit image-set.'''
tmp_dir = util.TempDir({
'index.html': '''
<!DOCTYPE HTML>
<html>
<head>
<link rel="stylesheet" href="test.css">
</head>
<body>
<!-- Don't need a body. -->
</body>
</html>
''',
'test.css': '''
.image {
background: -webkit-image-set(url('test.png') 1x,
url('test1.4.png') 1.4x,
url('test1.8.png') 1.8x);
}
''',
'test.png': 'PNG DATA',
'test1.4.png': '1.4x PNG DATA',
'test1.8.png': '1.8x PNG DATA',
})
html = chrome_html.ChromeHtml(tmp_dir.GetPath('index.html'))
html.SetDefines({'scale_factors': '1.8x'})
html.SetAttributes({'flattenhtml': 'true'})
html.Parse()
self.failUnlessEqual(StandardizeHtml(html.GetData('en', 'utf-8')),
StandardizeHtml('''
<!DOCTYPE HTML>
<html>
<head>
<style>
.image {
background: -webkit-image-set(url('data:image/png;base64,UE5HIERBVEE=') 1x,
url('data:image/png;base64,MS44eCBQTkcgREFUQQ==') 1.8x);
}
</style>
</head>
<body>
<!-- Don't need a body. -->
</body>
</html>
'''))
tmp_dir.CleanUp()
def testExpandVariablesInFilename(self):
'''
Tests variable substitution in filenames while flattening images
with multiple scale factors.
'''
tmp_dir = util.TempDir({
'index.html': '''
<!DOCTYPE HTML>
<html>
<head>
<link rel="stylesheet" href="test.css">
</head>
<body>
<!-- Don't need a body. -->
</body>
</html>
''',
'test.css': '''
.image {
background: url('test[WHICH].png');
}
''',
'test1.png': 'PNG DATA',
'1.4x/test1.png': '1.4x PNG DATA',
'1.8x/test1.png': '1.8x PNG DATA',
})
def replacer(var, repl):
return lambda filename: filename.replace('[%s]' % var, repl)
html = chrome_html.ChromeHtml(tmp_dir.GetPath('index.html'))
html.SetDefines({'scale_factors': '1.4x,1.8x'})
html.SetAttributes({'flattenhtml': 'true'})
html.SetFilenameExpansionFunction(replacer('WHICH', '1'));
html.Parse()
self.failUnlessEqual(StandardizeHtml(html.GetData('en', 'utf-8')),
StandardizeHtml('''
<!DOCTYPE HTML>
<html>
<head>
<style>
.image {
background: -webkit-image-set(url('data:image/png;base64,UE5HIERBVEE=') 1x, url('data:image/png;base64,MS40eCBQTkcgREFUQQ==') 1.4x, url('data:image/png;base64,MS44eCBQTkcgREFUQQ==') 1.8x);
}
</style>
</head>
<body>
<!-- Don't need a body. -->
</body>
</html>
'''))
tmp_dir.CleanUp()
if __name__ == '__main__':
unittest.main()

Просмотреть файл

@ -0,0 +1,157 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Gatherer for <structure type="chrome_scaled_image">.
'''
from __future__ import print_function
import os
import struct
from grit import exception
from grit import lazy_re
from grit import util
from grit.gather import interface
_PNG_SCALE_CHUNK = b'\0\0\0\0csCl\xc1\x30\x60\x4d'
def _RescaleImage(data, from_scale, to_scale):
if from_scale != to_scale:
assert from_scale == 100
# Rather than rescaling the image we add a custom chunk directing Chrome to
# rescale it on load. Just append it to the PNG data since
# _MoveSpecialChunksToFront will move it later anyway.
data += _PNG_SCALE_CHUNK
return data
_PNG_MAGIC = b'\x89PNG\r\n\x1a\n'
'''Mandatory first chunk in order for the png to be valid.'''
_FIRST_CHUNK = b'IHDR'
'''Special chunks to move immediately after the IHDR chunk. (so that the PNG
remains valid.)
'''
_SPECIAL_CHUNKS = frozenset(b'csCl npTc'.split())
'''Any ancillary chunk not in this list is deleted from the PNG.'''
_ANCILLARY_CHUNKS_TO_LEAVE = frozenset(
b'bKGD cHRM gAMA iCCP pHYs sBIT sRGB tRNS acTL fcTL fdAT'.split())
def _MoveSpecialChunksToFront(data):
'''Move special chunks immediately after the IHDR chunk (so that the PNG
remains valid). Also delete ancillary chunks that are not on our whitelist.
'''
first = [_PNG_MAGIC]
special_chunks = []
rest = []
for chunk in _ChunkifyPNG(data):
type = chunk[4:8]
critical = type < b'a'
if type == _FIRST_CHUNK:
first.append(chunk)
elif type in _SPECIAL_CHUNKS:
special_chunks.append(chunk)
elif critical or type in _ANCILLARY_CHUNKS_TO_LEAVE:
rest.append(chunk)
return b''.join(first + special_chunks + rest)
def _ChunkifyPNG(data):
'''Given a PNG image, yield its chunks in order.'''
assert data.startswith(_PNG_MAGIC)
pos = 8
while pos != len(data):
length = 12 + struct.unpack_from('>I', data, pos)[0]
assert 12 <= length <= len(data) - pos
yield data[pos:pos+length]
pos += length
def _MakeBraceGlob(strings):
'''Given ['foo', 'bar'], return '{foo,bar}', for error reporting.
'''
if len(strings) == 1:
return strings[0]
else:
return '{' + ','.join(strings) + '}'
class ChromeScaledImage(interface.GathererBase):
'''Represents an image that exists in multiple layout variants
(e.g. "default", "touch") and multiple scale variants
(e.g. "100_percent", "200_percent").
'''
split_context_re_ = lazy_re.compile(r'(.+)_(\d+)_percent\Z')
def _FindInputFile(self):
output_context = self.grd_node.GetRoot().output_context
match = self.split_context_re_.match(output_context)
if not match:
raise exception.MissingMandatoryAttribute(
'All <output> nodes must have an appropriate context attribute'
' (e.g. context="touch_200_percent")')
req_layout, req_scale = match.group(1), int(match.group(2))
layouts = [req_layout]
try_default_layout = self.grd_node.GetRoot().fallback_to_default_layout
if try_default_layout and 'default' not in layouts:
layouts.append('default')
scales = [req_scale]
try_low_res = self.grd_node.FindBooleanAttribute(
'fallback_to_low_resolution', default=False, skip_self=False)
if try_low_res and 100 not in scales:
scales.append(100)
for layout in layouts:
for scale in scales:
dir = '%s_%s_percent' % (layout, scale)
path = os.path.join(dir, self.rc_file)
if os.path.exists(self.grd_node.ToRealPath(path)):
return path, scale, req_scale
if not try_default_layout:
# The file was not found in the specified output context and it was
# explicitly indicated that the default context should not be searched
# as a fallback, so return an empty path.
return None, 100, req_scale
# The file was found in neither the specified context nor the default
# context, so raise an exception.
dir = "%s_%s_percent" % (_MakeBraceGlob(layouts),
_MakeBraceGlob([str(x) for x in scales]))
raise exception.FileNotFound(
'Tried ' + self.grd_node.ToRealPath(os.path.join(dir, self.rc_file)))
def GetInputPath(self):
path, scale, req_scale = self._FindInputFile()
return path
def Parse(self):
pass
def GetTextualIds(self):
return [self.extkey]
def GetData(self, lang, encoding):
assert encoding == util.BINARY
path, scale, req_scale = self._FindInputFile()
if path is None:
return None
data = util.ReadFile(self.grd_node.ToRealPath(path), util.BINARY)
data = _RescaleImage(data, scale, req_scale)
data = _MoveSpecialChunksToFront(data)
return data
def Translate(self, *args, **kwargs):
return self.GetData()

Просмотреть файл

@ -0,0 +1,209 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for ChromeScaledImage.'''
from __future__ import print_function
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
'../..')))
import re
import struct
import unittest
import zlib
from grit import exception
from grit import util
from grit.format import data_pack
from grit.tool import build
_OUTFILETYPES = [
('.h', 'rc_header'),
('_map.cc', 'resource_map_source'),
('_map.h', 'resource_map_header'),
('.pak', 'data_package'),
('.rc', 'rc_all'),
]
_PNG_HEADER = (
b'\x89\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52'
b'\x00\x00\x00\x01\x00\x00\x00\x01\x08\x02\x00\x00\x00\x90\x77\x53'
b'\xde')
_PNG_FOOTER = (
b'\x00\x00\x00\x0c\x49\x44\x41\x54\x18\x57\x63\xf8\xff\xff\x3f\x00'
b'\x05\xfe\x02\xfe\xa7\x35\x81\x84\x00\x00\x00\x00\x49\x45\x4e\x44'
b'\xae\x42\x60\x82')
def _MakePNG(chunks):
# Python 3 changed the return value of zlib.crc32 to an unsigned int.
format = 'i' if sys.version_info.major < 3 else 'I'
pack_int32 = struct.Struct('>' + format).pack
chunks = [pack_int32(len(payload)) + type + payload +
pack_int32(zlib.crc32(type + payload))
for type, payload in chunks]
return _PNG_HEADER + b''.join(chunks) + _PNG_FOOTER
def _GetFilesInPak(pakname):
'''Get a set of the files that were actually included in the .pak output.
'''
return set(data_pack.ReadDataPack(pakname).resources.values())
def _GetFilesInRc(rcname, tmp_dir, contents):
'''Get a set of the files that were actually included in the .rc output.
'''
data = util.ReadFile(rcname, util.BINARY).decode('utf-16')
contents = dict((tmp_dir.GetPath(k), v) for k, v in contents.items())
return set(contents[os.path.normpath(m.group(1))]
for m in re.finditer(r'(?m)^\w+\s+BINDATA\s+"([^"]+)"$', data))
def _MakeFallbackAttr(fallback):
if fallback is None:
return ''
else:
return ' fallback_to_low_resolution="%s"' % ('false', 'true')[fallback]
def _Structures(fallback, *body):
return '<structures%s>\n%s\n</structures>' % (
_MakeFallbackAttr(fallback), '\n'.join(body))
def _Structure(name, file, fallback=None):
return '<structure name="%s" file="%s" type="chrome_scaled_image"%s />' % (
name, file, _MakeFallbackAttr(fallback))
def _If(expr, *body):
return '<if expr="%s">\n%s\n</if>' % (expr, '\n'.join(body))
def _RunBuildTest(self, structures, inputs, expected_outputs, skip_rc=False,
layout_fallback=''):
outputs = '\n'.join('<output filename="out/%s%s" type="%s" context="%s"%s />'
% (context, ext, type, context, layout_fallback)
for ext, type in _OUTFILETYPES
for context in expected_outputs)
infiles = {
'in/in.grd': ('''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="0" current_release="1">
<outputs>
%s
</outputs>
<release seq="1">
%s
</release>
</grit>
''' % (outputs, structures)).encode('utf-8'),
}
for pngpath, pngdata in inputs.items():
normpath = os.path.normpath('in/' + pngpath)
infiles[normpath] = pngdata
class Options(object):
pass
with util.TempDir(infiles, mode='wb') as tmp_dir:
with tmp_dir.AsCurrentDir():
options = Options()
options.input = tmp_dir.GetPath('in/in.grd')
options.verbose = False
options.extra_verbose = False
build.RcBuilder().Run(options, [])
for context, expected_data in expected_outputs.items():
self.assertEquals(expected_data,
_GetFilesInPak(tmp_dir.GetPath('out/%s.pak' % context)))
if not skip_rc:
self.assertEquals(expected_data,
_GetFilesInRc(tmp_dir.GetPath('out/%s.rc' % context),
tmp_dir, infiles))
class ChromeScaledImageUnittest(unittest.TestCase):
def testNormalFallback(self):
d123a = _MakePNG([(b'AbCd', b'')])
t123a = _MakePNG([(b'EfGh', b'')])
d123b = _MakePNG([(b'IjKl', b'')])
_RunBuildTest(self,
_Structures(None,
_Structure('IDR_A', 'a.png'),
_Structure('IDR_B', 'b.png'),
),
{'default_123_percent/a.png': d123a,
'tactile_123_percent/a.png': t123a,
'default_123_percent/b.png': d123b,
},
{'default_123_percent': set([d123a, d123b]),
'tactile_123_percent': set([t123a, d123b]),
})
def testNormalFallbackFailure(self):
self.assertRaises(
exception.FileNotFound, _RunBuildTest, self,
_Structures(
None,
_Structure('IDR_A', 'a.png'),
), {
'default_100_percent/a.png': _MakePNG([(b'AbCd', b'')]),
'tactile_100_percent/a.png': _MakePNG([(b'EfGh', b'')]),
}, {'tactile_123_percent': 'should fail before using this'})
def testLowresFallback(self):
png = _MakePNG([(b'Abcd', b'')])
png_with_csCl = _MakePNG([(b'csCl', b''), (b'Abcd', b'')])
for outer in (None, False, True):
for inner in (None, False, True):
args = (
self,
_Structures(outer,
_Structure('IDR_A', 'a.png', inner),
),
{'default_100_percent/a.png': png},
{'tactile_200_percent': set([png_with_csCl])})
if inner or (inner is None and outer):
# should fall back to 100%
_RunBuildTest(*args, skip_rc=True)
else:
# shouldn't fall back
self.assertRaises(exception.FileNotFound, _RunBuildTest, *args)
# Test fallback failure with fallback_to_low_resolution=True
self.assertRaises(exception.FileNotFound,
_RunBuildTest, self,
_Structures(True,
_Structure('IDR_A', 'a.png'),
),
{}, # no files
{'tactile_123_percent': 'should fail before using this'})
def testNoFallbackToDefaultLayout(self):
d123a = _MakePNG([(b'AbCd', b'')])
t123a = _MakePNG([(b'EfGh', b'')])
d123b = _MakePNG([(b'IjKl', b'')])
_RunBuildTest(self,
_Structures(None,
_Structure('IDR_A', 'a.png'),
_Structure('IDR_B', 'b.png'),
),
{'default_123_percent/a.png': d123a,
'tactile_123_percent/a.png': t123a,
'default_123_percent/b.png': d123b,
},
{'default_123_percent': set([d123a, d123b]),
'tactile_123_percent': set([t123a]),
},
layout_fallback=' fallback_to_default_layout="false"')
if __name__ == '__main__':
unittest.main()

172
third_party/libwebrtc/tools/grit/grit/gather/interface.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,172 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Interface for all gatherers.
'''
from __future__ import print_function
import os.path
import six
from grit import clique
from grit import util
class GathererBase(object):
'''Interface for all gatherer implementations. Subclasses must implement
all methods that raise NotImplemented.'''
def __init__(self, rc_file, extkey=None, encoding='cp1252', is_skeleton=False):
'''Initializes the gatherer object's attributes, but does not attempt to
read the input file.
Args:
rc_file: The 'file' attribute of the <structure> node (usually the
relative path to the source file).
extkey: e.g. 'ID_MY_DIALOG'
encoding: e.g. 'utf-8'
is_skeleton: Indicates whether this gatherer is a skeleton gatherer, in
which case we should not do some types of processing on the
translateable bits.
'''
self.rc_file = rc_file
self.extkey = extkey
self.encoding = encoding
# A default uberclique that is local to this object. Users can override
# this with the uberclique they are using.
self.uberclique = clique.UberClique()
# Indicates whether this gatherer is a skeleton gatherer, in which case
# we should not do some types of processing on the translateable bits.
self.is_skeleton = is_skeleton
# Stores the grd node on which this gatherer is running. This allows
# evaluating expressions.
self.grd_node = None
def SetAttributes(self, attrs):
'''Sets node attributes used by the gatherer.
By default, this does nothing. If special handling is desired, it should be
overridden by the child gatherer.
Args:
attrs: The mapping of node attributes.
'''
pass
def SetDefines(self, defines):
'''Sets global defines used by the gatherer.
By default, this does nothing. If special handling is desired, it should be
overridden by the child gatherer.
Args:
defines: The mapping of define values.
'''
pass
def SetGrdNode(self, node):
'''Sets the grd node on which this gatherer is running.
'''
self.grd_node = node
def SetUberClique(self, uberclique):
'''Overrides the default uberclique so that cliques created by this object
become part of the uberclique supplied by the user.
'''
self.uberclique = uberclique
def Parse(self):
'''Reads and parses the contents of what is being gathered.'''
raise NotImplementedError()
def GetData(self, lang, encoding):
'''Returns the data to be added to the DataPack for this node or None if
this node does not add a DataPack entry.
'''
return None
def GetText(self):
'''Returns the text of what is being gathered.'''
raise NotImplementedError()
def GetTextualIds(self):
'''Returns the mnemonic IDs that need to be defined for the resource
being gathered to compile correctly.'''
return []
def GetCliques(self):
'''Returns the MessageClique objects for all translateable portions.'''
return []
def GetInputPath(self):
return self.rc_file
def GetHtmlResourceFilenames(self):
"""Returns a set of all filenames inlined by this gatherer."""
return []
def Translate(self, lang, pseudo_if_not_available=True,
skeleton_gatherer=None, fallback_to_english=False):
'''Returns the resource being gathered, with translateable portions filled
with the translation for language 'lang'.
If pseudo_if_not_available is true, a pseudotranslation will be used for any
message that doesn't have a real translation available.
If no translation is available and pseudo_if_not_available is false,
fallback_to_english controls the behavior. If it is false, throw an error.
If it is true, use the English version of the message as its own
"translation".
If skeleton_gatherer is specified, the translation will use the nontranslateable
parts from the gatherer 'skeleton_gatherer', which must be of the same type
as 'self'.
If fallback_to_english
Args:
lang: 'en'
pseudo_if_not_available: True | False
skeleton_gatherer: other_gatherer
fallback_to_english: True | False
Return:
e.g. 'ID_THIS_SECTION TYPE\n...BEGIN\n "Translated message"\n......\nEND'
Raises:
grit.exception.NotReady() if used before Parse() has been successfully
called.
grit.exception.NoSuchTranslation() if 'pseudo_if_not_available' and
fallback_to_english are both false and there is no translation for the
requested language.
'''
raise NotImplementedError()
def SubstituteMessages(self, substituter):
'''Applies substitutions to all messages in the gatherer.
Args:
substituter: a grit.util.Substituter object.
'''
pass
def SetFilenameExpansionFunction(self, fn):
'''Sets a function for rewriting filenames before gathering.'''
pass
# TODO(benrg): Move this elsewhere, since it isn't part of the interface.
def _LoadInputFile(self):
'''A convenience function for subclasses that loads the contents of the
input file.
'''
if isinstance(self.rc_file, six.string_types):
path = self.GetInputPath()
# Hack: some unit tests supply an absolute path and no root node.
if not os.path.isabs(path):
path = self.grd_node.ToRealPath(path)
return util.ReadFile(path, self.encoding)
else:
return self.rc_file.read()

27
third_party/libwebrtc/tools/grit/grit/gather/json_loader.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,27 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from grit.gather import interface
class JsonLoader(interface.GathererBase):
'''A simple gatherer that loads and parses a JSON file.'''
def Parse(self):
'''Reads and parses the text of self._json_text into the data structure in
self._data.
'''
self._json_text = self._LoadInputFile()
self._data = None
globs = {}
exec('data = ' + self._json_text, globs)
self._data = globs['data']
def GetData(self, lang, encoding):
'''Returns the parsed JSON data.'''
assert encoding == 'utf-8'
return self._data

325
third_party/libwebrtc/tools/grit/grit/gather/policy_json.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,325 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Support for "policy_templates.json" format used by the policy template
generator as a source for generating ADM,ADMX,etc files.'''
from __future__ import print_function
import json
import sys
import six
from grit.gather import skeleton_gatherer
from grit import util
from grit import tclib
from xml.dom import minidom
from xml.parsers.expat import ExpatError
class PolicyJson(skeleton_gatherer.SkeletonGatherer):
'''Collects and translates the following strings from policy_templates.json:
- captions, descriptions, labels and Android app support details of policies
- captions of enumeration items
- misc strings from the 'messages' section
Translatable strings may have untranslateable placeholders with the same
format that is used in .grd files.
'''
def _AddEndline(self, add_comma):
'''Adds an endline to the skeleton tree. If add_comma is true, adds a
comma before the endline.
Args:
add_comma: A boolean to add a comma or not.
'''
self._AddNontranslateableChunk(',\n' if add_comma else '\n')
def _ParsePlaceholder(self, placeholder, msg):
'''Extracts a placeholder from a DOM node and adds it to a tclib Message.
Args:
placeholder: A DOM node of the form:
<ph name="PLACEHOLDER_NAME">Placeholder text<ex>Example value</ex></ph>
msg: The placeholder is added to this message.
'''
text = []
example_text = []
for node1 in placeholder.childNodes:
if (node1.nodeType == minidom.Node.TEXT_NODE):
text.append(node1.data)
elif (node1.nodeType == minidom.Node.ELEMENT_NODE and
node1.tagName == 'ex'):
for node2 in node1.childNodes:
example_text.append(node2.toxml())
else:
raise Exception('Unexpected element inside a placeholder: ' +
node2.toxml())
if example_text == []:
# In such cases the original text is okay for an example.
example_text = text
replaced_text = self.Escape(''.join(text).strip())
replaced_text = replaced_text.replace('$1', self._config['app_name'])
replaced_text = replaced_text.replace('$2', self._config['os_name'])
replaced_text = replaced_text.replace('$3', self._config['frame_name'])
msg.AppendPlaceholder(tclib.Placeholder(
placeholder.attributes['name'].value,
replaced_text,
''.join(example_text).strip()))
def _ParseMessage(self, string, desc):
'''Parses a given string and adds it to the output as a translatable chunk
with a given description.
Args:
string: The message string to parse.
desc: The description of the message (for the translators).
'''
msg = tclib.Message(description=desc)
xml = '<msg>' + string + '</msg>'
try:
node = minidom.parseString(xml).childNodes[0]
except ExpatError:
reason = '''Input isn't valid XML (has < & > been escaped?): ''' + string
six.reraise(Exception, reason, sys.exc_info()[2])
for child in node.childNodes:
if child.nodeType == minidom.Node.TEXT_NODE:
msg.AppendText(child.data)
elif child.nodeType == minidom.Node.ELEMENT_NODE:
if child.tagName == 'ph':
self._ParsePlaceholder(child, msg)
else:
raise Exception("Not implemented.")
else:
raise Exception("Not implemented.")
self.skeleton_.append(self.uberclique.MakeClique(msg))
def _ParseNode(self, node):
'''Traverses the subtree of a DOM node, and register a tclib message for
all the <message> nodes.
'''
att_text = []
if node.attributes:
for key, value in sorted(node.attributes.items()):
att_text.append(' %s=\"%s\"' % (key, value))
self._AddNontranslateableChunk("<%s%s>" %
(node.tagName, ''.join(att_text)))
if node.tagName == 'message':
msg = tclib.Message(description=node.attributes['desc'])
for child in node.childNodes:
if child.nodeType == minidom.Node.TEXT_NODE:
if msg == None:
self._AddNontranslateableChunk(child.data)
else:
msg.AppendText(child.data)
elif child.nodeType == minidom.Node.ELEMENT_NODE:
if child.tagName == 'ph':
self._ParsePlaceholder(child, msg)
else:
assert False
self.skeleton_.append(self.uberclique.MakeClique(msg))
else:
for child in node.childNodes:
if child.nodeType == minidom.Node.TEXT_NODE:
self._AddNontranslateableChunk(child.data)
elif node.nodeType == minidom.Node.ELEMENT_NODE:
self._ParseNode(child)
self._AddNontranslateableChunk("</%s>" % node.tagName)
def _AddIndentedNontranslateableChunk(self, depth, string):
'''Adds a nontranslateable chunk of text to the internally stored output.
Args:
depth: The number of double spaces to prepend to the next argument string.
string: The chunk of text to add.
'''
result = []
while depth > 0:
result.append(' ')
depth = depth - 1
result.append(string)
self._AddNontranslateableChunk(''.join(result))
def _GetDescription(self, item, item_type, parent_item, key):
'''Creates a description for a translatable message. The description gives
some context for the person who will translate this message.
Args:
item: A policy or an enumeration item.
item_type: 'enum_item' | 'policy'
parent_item: The owner of item. (A policy of type group or enum.)
key: The name of the key to parse.
depth: The level of indentation.
'''
key_map = {
'desc': 'Description',
'caption': 'Caption',
'label': 'Label',
'arc_support': 'Information about the effect on Android apps'
}
if item_type == 'policy':
return ('%s of the policy named %s [owner(s): %s]' %
(key_map[key], item['name'],
','.join(item['owners'] if 'owners' in item else 'unknown')))
if item_type == 'enum_item':
return ('%s of the option named %s in policy %s [owner(s): %s]' %
(key_map[key], item['name'], parent_item['name'],
','.join(parent_item['owners'] if 'owners' in parent_item else 'unknown')))
raise Exception('Unexpected type %s' % item_type)
def _AddSchemaKeys(self, obj, depth):
obj_type = type(obj)
if obj_type == dict:
self._AddNontranslateableChunk('{\n')
keys = sorted(obj.keys())
for count, (key) in enumerate(keys, 1):
json_key = "%s: " % json.dumps(key)
self._AddIndentedNontranslateableChunk(depth + 1, json_key)
if key == 'description' and type(obj[key]) == str:
self._AddNontranslateableChunk("\"")
self._ParseMessage(obj[key], 'Description of schema property')
self._AddNontranslateableChunk("\"")
elif type(obj[key]) in (bool, int, str):
self._AddSchemaKeys(obj[key], 0)
else:
self._AddSchemaKeys(obj[key], depth + 1)
self._AddEndline(count < len(keys))
self._AddIndentedNontranslateableChunk(depth, '}')
elif obj_type == list:
self._AddNontranslateableChunk('[\n')
for count, (item) in enumerate(obj, 1):
self._AddSchemaKeys(item, depth + 1)
self._AddEndline(count < len(obj))
self._AddIndentedNontranslateableChunk(depth, ']')
elif obj_type in (bool, int, str):
self._AddIndentedNontranslateableChunk(depth, json.dumps(obj))
else:
raise Exception('Invalid schema object: %s' % obj)
def _AddPolicyKey(self, item, item_type, parent_item, key, depth):
'''Given a policy/enumeration item and a key, adds that key and its value
into the output.
E.g.:
'example_value': 123
If key indicates that the value is a translatable string, then it is parsed
as a translatable string.
Args:
item: A policy or an enumeration item.
item_type: 'enum_item' | 'policy'
parent_item: The owner of item. (A policy of type group or enum.)
key: The name of the key to parse.
depth: The level of indentation.
'''
self._AddIndentedNontranslateableChunk(depth, "%s: " % json.dumps(key))
if key in ('desc', 'caption', 'label', 'arc_support'):
self._AddNontranslateableChunk("\"")
self._ParseMessage(
item[key],
self._GetDescription(item, item_type, parent_item, key))
self._AddNontranslateableChunk("\"")
elif key in ('schema', 'validation_schema', 'description_schema'):
self._AddSchemaKeys(item[key], depth)
else:
self._AddNontranslateableChunk(json.dumps(item[key], ensure_ascii=False))
def _AddItems(self, items, item_type, parent_item, depth):
'''Parses and adds a list of items from the JSON file. Items can be policies
or parts of an enum policy.
Args:
items: Either a list of policies or a list of dictionaries.
item_type: 'enum_item' | 'policy'
parent_item: If items contains a list of policies, then this is the policy
group that owns them. If items contains a list of enumeration items,
then this is the enum policy that holds them.
depth: Indicates the depth of our position in the JSON hierarchy. Used to
add nice line-indent to the output.
'''
for item_count, (item1) in enumerate(items, 1):
self._AddIndentedNontranslateableChunk(depth, "{\n")
keys = sorted(item1.keys())
for keys_count, (key) in enumerate(keys, 1):
if key == 'items':
self._AddIndentedNontranslateableChunk(depth + 1, "\"items\": [\n")
self._AddItems(item1['items'], 'enum_item', item1, depth + 2)
self._AddIndentedNontranslateableChunk(depth + 1, "]")
elif key == 'policies' and all(not isinstance(x, str)
for x in item1['policies']):
self._AddIndentedNontranslateableChunk(depth + 1, "\"policies\": [\n")
self._AddItems(item1['policies'], 'policy', item1, depth + 2)
self._AddIndentedNontranslateableChunk(depth + 1, "]")
else:
self._AddPolicyKey(item1, item_type, parent_item, key, depth + 1)
self._AddEndline(keys_count < len(keys))
self._AddIndentedNontranslateableChunk(depth, "}")
self._AddEndline(item_count < len(items))
def _AddMessages(self):
'''Processed and adds the 'messages' section to the output.'''
self._AddNontranslateableChunk(" \"messages\": {\n")
messages = self.data['messages'].items()
for count, (name, message) in enumerate(messages, 1):
self._AddNontranslateableChunk(" %s: {\n" % json.dumps(name))
self._AddNontranslateableChunk(" \"text\": \"")
self._ParseMessage(message['text'], message['desc'])
self._AddNontranslateableChunk("\"\n")
self._AddNontranslateableChunk(" }")
self._AddEndline(count < len(self.data['messages']))
self._AddNontranslateableChunk(" }\n")
# Although we use the RegexpGatherer base class, we do not use the
# _RegExpParse method of that class to implement Parse(). Instead, we
# parse using a DOM parser.
def Parse(self):
if self.have_parsed_:
return
self.have_parsed_ = True
self.text_ = self._LoadInputFile()
if util.IsExtraVerbose():
print(self.text_)
self.data = eval(self.text_)
self._AddNontranslateableChunk('{\n')
self._AddNontranslateableChunk(" \"policy_definitions\": [\n")
self._AddItems(self.data['policy_definitions'], 'policy', None, 2)
self._AddNontranslateableChunk(" ],\n")
self._AddNontranslateableChunk(" \"policy_atomic_group_definitions\": [\n")
if 'policy_atomic_group_definitions' in self.data:
self._AddItems(self.data['policy_atomic_group_definitions'],
'policy', None, 2)
self._AddNontranslateableChunk(" ],\n")
self._AddMessages()
self._AddNontranslateableChunk('\n}')
def Escape(self, text):
return json.dumps(text, ensure_ascii=False)[1:-1]
def SetDefines(self, defines):
if not defines:
raise Exception('Must pass valid defines')
if '_chromium' in defines:
self._config = {
'build': 'chromium',
'app_name': 'Chromium',
'frame_name': 'Chromium Frame',
'os_name': 'Chromium OS',
}
elif '_google_chrome' in defines:
self._config = {
'build': 'chrome',
'app_name': 'Google Chrome',
'frame_name': 'Google Chrome Frame',
'os_name': 'Google Chrome OS',
}
else:
raise Exception('Unknown build')

Просмотреть файл

@ -0,0 +1,347 @@
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grit.gather.policy_json'''
from __future__ import print_function
import json
import os
import re
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
from six import StringIO
from grit.gather import policy_json
class PolicyJsonUnittest(unittest.TestCase):
def GetExpectedOutput(self, original):
expected = eval(original)
for key, message in expected['messages'].items():
del message['desc']
return expected
def testEmpty(self):
original = """{
'policy_definitions': [],
'policy_atomic_group_definitions': [],
'messages': {}
}"""
gatherer = policy_json.PolicyJson(StringIO(original))
gatherer.Parse()
self.failUnless(len(gatherer.GetCliques()) == 0)
self.failUnless(eval(original) == json.loads(gatherer.Translate('en')))
def testGeneralPolicy(self):
original = (
"{"
" 'policy_definitions': ["
" {"
" 'name': 'HomepageLocation',"
" 'type': 'string',"
" 'owners': ['foo@bar.com'],"
" 'supported_on': ['chrome.*:8-'],"
" 'features': {'dynamic_refresh': 1},"
" 'example_value': 'http://chromium.org',"
" 'caption': 'nothing special 1',"
" 'desc': 'nothing special 2',"
" 'label': 'nothing special 3',"
" },"
" ],"
" 'policy_atomic_group_definitions': [],"
" 'messages': {"
" 'msg_identifier': {"
" 'text': 'nothing special 3',"
" 'desc': 'nothing special descr 3',"
" }"
" }"
"}")
gatherer = policy_json.PolicyJson(StringIO(original))
gatherer.Parse()
self.failUnless(len(gatherer.GetCliques()) == 4)
expected = self.GetExpectedOutput(original)
self.failUnless(expected == json.loads(gatherer.Translate('en')))
def testEnum(self):
original = (
"{"
" 'policy_definitions': ["
" {"
" 'name': 'Policy1',"
" 'owners': ['a@b'],"
" 'items': ["
" {"
" 'name': 'Item1',"
" 'caption': 'nothing special',"
" }"
" ]"
" },"
" ],"
" 'policy_atomic_group_definitions': [],"
" 'messages': {}"
"}")
gatherer = policy_json.PolicyJson(StringIO(original))
gatherer.Parse()
self.failUnless(len(gatherer.GetCliques()) == 1)
expected = self.GetExpectedOutput(original)
self.failUnless(expected == json.loads(gatherer.Translate('en')))
def testSchema(self):
original = ("{"
" 'policy_definitions': ["
" {"
" 'name': 'Policy1',"
" 'schema': {"
" 'type': 'object',"
" 'properties': {"
" 'outer': {"
" 'description': 'outer description',"
" 'type': 'object',"
" 'inner': {"
" 'description': 'inner description',"
" 'type': 'integer', 'minimum': 0, 'maximum': 100"
" },"
" 'inner2': {"
" 'description': 'inner2 description',"
" 'type': 'integer',"
" 'enum': [ 1, 2, 3 ],"
" 'sensitiveValue': True"
" },"
" },"
" },"
" },"
" 'caption': 'nothing special',"
" 'owners': ['a@b']"
" },"
" ],"
" 'policy_atomic_group_definitions': [],"
" 'messages': {}"
"}")
gatherer = policy_json.PolicyJson(StringIO(original))
gatherer.Parse()
self.failUnless(len(gatherer.GetCliques()) == 4)
expected = self.GetExpectedOutput(original)
self.failUnless(expected == json.loads(gatherer.Translate('en')))
def testValidationSchema(self):
original = ("{"
" 'policy_definitions': ["
" {"
" 'name': 'Policy1',"
" 'owners': ['a@b'],"
" 'validation_schema': {"
" 'type': 'object',"
" 'properties': {"
" 'description': 'properties description',"
" 'type': 'object',"
" },"
" },"
" },"
" ],"
" 'policy_atomic_group_definitions': [],"
" 'messages': {}"
"}")
gatherer = policy_json.PolicyJson(StringIO(original))
gatherer.Parse()
self.failUnless(len(gatherer.GetCliques()) == 1)
expected = self.GetExpectedOutput(original)
self.failUnless(expected == json.loads(gatherer.Translate('en')))
def testDescriptionSchema(self):
original = ("{"
" 'policy_definitions': ["
" {"
" 'name': 'Policy1',"
" 'owners': ['a@b'],"
" 'description_schema': {"
" 'type': 'object',"
" 'properties': {"
" 'description': 'properties description',"
" 'type': 'object',"
" },"
" },"
" },"
" ],"
" 'policy_atomic_group_definitions': [],"
" 'messages': {}"
"}")
gatherer = policy_json.PolicyJson(StringIO(original))
gatherer.Parse()
self.failUnless(len(gatherer.GetCliques()) == 1)
expected = self.GetExpectedOutput(original)
self.failUnless(expected == json.loads(gatherer.Translate('en')))
# Keeping for backwards compatibility.
def testSubPolicyOldFormat(self):
original = (
"{"
" 'policy_definitions': ["
" {"
" 'type': 'group',"
" 'policies': ["
" {"
" 'name': 'Policy1',"
" 'caption': 'nothing special',"
" 'owners': ['a@b']"
" }"
" ]"
" }"
" ],"
" 'policy_atomic_group_definitions': [],"
" 'messages': {}"
"}")
gatherer = policy_json.PolicyJson(StringIO(original))
gatherer.Parse()
self.failUnless(len(gatherer.GetCliques()) == 1)
expected = self.GetExpectedOutput(original)
self.failUnless(expected == json.loads(gatherer.Translate('en')))
def testSubPolicyNewFormat(self):
original = (
"{"
" 'policy_definitions': ["
" {"
" 'type': 'group',"
" 'policies': ['Policy1']"
" },"
" {"
" 'name': 'Policy1',"
" 'caption': 'nothing special',"
" 'owners': ['a@b']"
" }"
" ],"
" 'policy_atomic_group_definitions': [],"
" 'messages': {}"
"}")
gatherer = policy_json.PolicyJson(StringIO(original))
gatherer.Parse()
self.failUnless(len(gatherer.GetCliques()) == 1)
expected = self.GetExpectedOutput(original)
self.failUnless(expected == json.loads(gatherer.Translate('en')))
def testEscapingAndLineBreaks(self):
original = """{
'policy_definitions': [],
'policy_atomic_group_definitions': [],
'messages': {
'msg1': {
# The following line will contain two backslash characters when it
# ends up in eval().
'text': '''backslashes, Sir? \\\\''',
'desc': ''
},
'msg2': {
'text': '''quotes, Madam? "''',
'desc': ''
},
'msg3': {
# The following line will contain two backslash characters when it
# ends up in eval().
'text': 'backslashes, Sir? \\\\',
'desc': ''
},
'msg4': {
'text': "quotes, Madam? '",
'desc': ''
},
'msg5': {
'text': '''what happens
with a newline?''',
'desc': ''
},
'msg6': {
# The following line will contain a backslash+n when it ends up in
# eval().
'text': 'what happens\\nwith a newline? (Episode 1)',
'desc': ''
}
}
}"""
gatherer = policy_json.PolicyJson(StringIO(original))
gatherer.Parse()
self.failUnless(len(gatherer.GetCliques()) == 6)
expected = self.GetExpectedOutput(original)
self.failUnless(expected == json.loads(gatherer.Translate('en')))
def testPlaceholdersChromium(self):
original = """{
"policy_definitions": [
{
"name": "Policy1",
"caption": "Please install\\n<ph name=\\"PRODUCT_NAME\\">$1<ex>Google Chrome</ex></ph>.",
"owners": "a@b"
}
],
"policy_atomic_group_definitions": [],
"messages": {}
}"""
gatherer = policy_json.PolicyJson(StringIO(original))
gatherer.SetDefines({'_chromium': True})
gatherer.Parse()
self.failUnless(len(gatherer.GetCliques()) == 1)
expected = json.loads(re.sub('<ph.*ph>', 'Chromium', original))
self.failUnless(expected == json.loads(gatherer.Translate('en')))
self.failUnless(gatherer.GetCliques()[0].translateable)
msg = gatherer.GetCliques()[0].GetMessage()
self.failUnless(len(msg.GetPlaceholders()) == 1)
ph = msg.GetPlaceholders()[0]
self.failUnless(ph.GetOriginal() == 'Chromium')
self.failUnless(ph.GetPresentation() == 'PRODUCT_NAME')
self.failUnless(ph.GetExample() == 'Google Chrome')
def testPlaceholdersChrome(self):
original = """{
"policy_definitions": [
{
"name": "Policy1",
"caption": "Please install\\n<ph name=\\"PRODUCT_NAME\\">$1<ex>Google Chrome</ex></ph>.",
"owners": "a@b"
}
],
"policy_atomic_group_definitions": [],
"messages": {}
}"""
gatherer = policy_json.PolicyJson(StringIO(original))
gatherer.SetDefines({'_google_chrome': True})
gatherer.Parse()
self.failUnless(len(gatherer.GetCliques()) == 1)
expected = json.loads(re.sub('<ph.*ph>', 'Google Chrome', original))
self.failUnless(expected == json.loads(gatherer.Translate('en')))
self.failUnless(gatherer.GetCliques()[0].translateable)
msg = gatherer.GetCliques()[0].GetMessage()
self.failUnless(len(msg.GetPlaceholders()) == 1)
ph = msg.GetPlaceholders()[0]
self.failUnless(ph.GetOriginal() == 'Google Chrome')
self.failUnless(ph.GetPresentation() == 'PRODUCT_NAME')
self.failUnless(ph.GetExample() == 'Google Chrome')
def testGetDescription(self):
gatherer = policy_json.PolicyJson({})
gatherer.SetDefines({'_google_chrome': True})
self.assertEquals(
gatherer._GetDescription({'name': 'Policy1', 'owners': ['a@b']},
'policy', None, 'desc'),
'Description of the policy named Policy1 [owner(s): a@b]')
self.assertEquals(
gatherer._GetDescription({'name': 'Plcy2', 'owners': ['a@b', 'c@d']},
'policy', None, 'caption'),
'Caption of the policy named Plcy2 [owner(s): a@b,c@d]')
self.assertEquals(
gatherer._GetDescription({'name': 'Plcy3', 'owners': ['a@b']},
'policy', None, 'label'),
'Label of the policy named Plcy3 [owner(s): a@b]')
self.assertEquals(
gatherer._GetDescription({'name': 'Item'}, 'enum_item',
{'name': 'Plcy', 'owners': ['a@b']}, 'caption'),
'Caption of the option named Item in policy Plcy [owner(s): a@b]')
if __name__ == '__main__':
unittest.main()

343
third_party/libwebrtc/tools/grit/grit/gather/rc.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,343 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Support for gathering resources from RC files.
'''
from __future__ import print_function
import re
from grit import exception
from grit import lazy_re
from grit import tclib
from grit.gather import regexp
# Find portions that need unescaping in resource strings. We need to be
# careful that a \\n is matched _first_ as a \\ rather than matching as
# a \ followed by a \n.
# TODO(joi) Handle ampersands if we decide to change them into <ph>
# TODO(joi) May need to handle other control characters than \n
_NEED_UNESCAPE = lazy_re.compile(r'""|\\\\|\\n|\\t')
# Find portions that need escaping to encode string as a resource string.
_NEED_ESCAPE = lazy_re.compile(r'"|\n|\t|\\|\&nbsp\;')
# How to escape certain characters
_ESCAPE_CHARS = {
'"' : '""',
'\n' : '\\n',
'\t' : '\\t',
'\\' : '\\\\',
'&nbsp;' : ' '
}
# How to unescape certain strings
_UNESCAPE_CHARS = dict([[value, key] for key, value in _ESCAPE_CHARS.items()])
class Section(regexp.RegexpGatherer):
'''A section from a resource file.'''
@staticmethod
def Escape(text):
'''Returns a version of 'text' with characters escaped that need to be
for inclusion in a resource section.'''
def Replace(match):
return _ESCAPE_CHARS[match.group()]
return _NEED_ESCAPE.sub(Replace, text)
@staticmethod
def UnEscape(text):
'''Returns a version of 'text' with escaped characters unescaped.'''
def Replace(match):
return _UNESCAPE_CHARS[match.group()]
return _NEED_UNESCAPE.sub(Replace, text)
def _RegExpParse(self, rexp, text_to_parse):
'''Overrides _RegExpParse to add shortcut group handling. Otherwise
the same.
'''
super(Section, self)._RegExpParse(rexp, text_to_parse)
if not self.is_skeleton and len(self.GetTextualIds()) > 0:
group_name = self.GetTextualIds()[0]
for c in self.GetCliques():
c.AddToShortcutGroup(group_name)
def ReadSection(self):
rc_text = self._LoadInputFile()
out = ''
begin_count = 0
assert self.extkey
first_line_re = re.compile(r'\s*' + self.extkey + r'\b')
for line in rc_text.splitlines(True):
if out or first_line_re.match(line):
out += line
# we stop once we reach the END for the outermost block.
begin_count_was = begin_count
if len(out) > 0 and line.strip() == 'BEGIN':
begin_count += 1
elif len(out) > 0 and line.strip() == 'END':
begin_count -= 1
if begin_count_was == 1 and begin_count == 0:
break
if len(out) == 0:
raise exception.SectionNotFound('%s in file %s' % (self.extkey, self.rc_file))
self.text_ = out.strip()
class Dialog(Section):
'''A resource section that contains a dialog resource.'''
# A typical dialog resource section looks like this:
#
# IDD_ABOUTBOX DIALOGEX 22, 17, 230, 75
# STYLE DS_SETFONT | DS_MODALFRAME | WS_CAPTION | WS_SYSMENU
# CAPTION "About"
# FONT 8, "System", 0, 0, 0x0
# BEGIN
# ICON IDI_KLONK,IDC_MYICON,14,9,20,20
# LTEXT "klonk Version ""yibbee"" 1.0",IDC_STATIC,49,10,119,8,
# SS_NOPREFIX
# LTEXT "Copyright (C) 2005",IDC_STATIC,49,20,119,8
# DEFPUSHBUTTON "OK",IDOK,195,6,30,11,WS_GROUP
# CONTROL "Jack ""Black"" Daniels",IDC_RADIO1,"Button",
# BS_AUTORADIOBUTTON,46,51,84,10
# END
# We are using a sorted set of keys, and we assume that the
# group name used for descriptions (type) will come after the "text"
# group in alphabetical order. We also assume that there cannot be
# more than one description per regular expression match.
# If that's not the case some descriptions will be clobbered.
dialog_re_ = lazy_re.compile(r'''
# The dialog's ID in the first line
(?P<id1>[A-Z0-9_]+)\s+DIALOG(EX)?
|
# The caption of the dialog
(?P<type1>CAPTION)\s+"(?P<text1>.*?([^"]|""))"\s
|
# Lines for controls that have text and an ID
\s+(?P<type2>[A-Z]+)\s+"(?P<text2>.*?([^"]|"")?)"\s*,\s*(?P<id2>[A-Z0-9_]+)\s*,
|
# Lines for controls that have text only
\s+(?P<type3>[A-Z]+)\s+"(?P<text3>.*?([^"]|"")?)"\s*,
|
# Lines for controls that reference other resources
\s+[A-Z]+\s+[A-Z0-9_]+\s*,\s*(?P<id3>[A-Z0-9_]*[A-Z][A-Z0-9_]*)
|
# This matches "NOT SOME_STYLE" so that it gets consumed and doesn't get
# matched by the next option (controls that have only an ID and then just
# numbers)
\s+NOT\s+[A-Z][A-Z0-9_]+
|
# Lines for controls that have only an ID and then just numbers
\s+[A-Z]+\s+(?P<id4>[A-Z0-9_]*[A-Z][A-Z0-9_]*)\s*,
''', re.MULTILINE | re.VERBOSE)
def Parse(self):
'''Knows how to parse dialog resource sections.'''
self.ReadSection()
self._RegExpParse(self.dialog_re_, self.text_)
class Menu(Section):
'''A resource section that contains a menu resource.'''
# A typical menu resource section looks something like this:
#
# IDC_KLONK MENU
# BEGIN
# POPUP "&File"
# BEGIN
# MENUITEM "E&xit", IDM_EXIT
# MENUITEM "This be ""Klonk"" me like", ID_FILE_THISBE
# POPUP "gonk"
# BEGIN
# MENUITEM "Klonk && is ""good""", ID_GONK_KLONKIS
# END
# END
# POPUP "&Help"
# BEGIN
# MENUITEM "&About ...", IDM_ABOUT
# END
# END
# Description used for the messages generated for menus, to explain to
# the translators how to handle them.
MENU_MESSAGE_DESCRIPTION = (
'This message represents a menu. Each of the items appears in sequence '
'(some possibly within sub-menus) in the menu. The XX01XX placeholders '
'serve to separate items. Each item contains an & (ampersand) character '
'in front of the keystroke that should be used as a shortcut for that item '
'in the menu. Please make sure that no two items in the same menu share '
'the same shortcut.'
)
# A dandy regexp to suck all the IDs and translateables out of a menu
# resource
menu_re_ = lazy_re.compile(r'''
# Match the MENU ID on the first line
^(?P<id1>[A-Z0-9_]+)\s+MENU
|
# Match the translateable caption for a popup menu
POPUP\s+"(?P<text1>.*?([^"]|""))"\s
|
# Match the caption & ID of a MENUITEM
MENUITEM\s+"(?P<text2>.*?([^"]|""))"\s*,\s*(?P<id2>[A-Z0-9_]+)
''', re.MULTILINE | re.VERBOSE)
def Parse(self):
'''Knows how to parse menu resource sections. Because it is important that
menu shortcuts are unique within the menu, we return each menu as a single
message with placeholders to break up the different menu items, rather than
return a single message per menu item. we also add an automatic description
with instructions for the translators.'''
self.ReadSection()
self.single_message_ = tclib.Message(description=self.MENU_MESSAGE_DESCRIPTION)
self._RegExpParse(self.menu_re_, self.text_)
class Version(Section):
'''A resource section that contains a VERSIONINFO resource.'''
# A typical version info resource can look like this:
#
# VS_VERSION_INFO VERSIONINFO
# FILEVERSION 1,0,0,1
# PRODUCTVERSION 1,0,0,1
# FILEFLAGSMASK 0x3fL
# #ifdef _DEBUG
# FILEFLAGS 0x1L
# #else
# FILEFLAGS 0x0L
# #endif
# FILEOS 0x4L
# FILETYPE 0x2L
# FILESUBTYPE 0x0L
# BEGIN
# BLOCK "StringFileInfo"
# BEGIN
# BLOCK "040904e4"
# BEGIN
# VALUE "CompanyName", "TODO: <Company name>"
# VALUE "FileDescription", "TODO: <File description>"
# VALUE "FileVersion", "1.0.0.1"
# VALUE "LegalCopyright", "TODO: (c) <Company name>. All rights reserved."
# VALUE "InternalName", "res_format_test.dll"
# VALUE "OriginalFilename", "res_format_test.dll"
# VALUE "ProductName", "TODO: <Product name>"
# VALUE "ProductVersion", "1.0.0.1"
# END
# END
# BLOCK "VarFileInfo"
# BEGIN
# VALUE "Translation", 0x409, 1252
# END
# END
#
#
# In addition to the above fields, VALUE fields named "Comments" and
# "LegalTrademarks" may also be translateable.
version_re_ = lazy_re.compile(r'''
# Match the ID on the first line
^(?P<id1>[A-Z0-9_]+)\s+VERSIONINFO
|
# Match all potentially translateable VALUE sections
\s+VALUE\s+"
(
CompanyName|FileDescription|LegalCopyright|
ProductName|Comments|LegalTrademarks
)",\s+"(?P<text1>.*?([^"]|""))"\s
''', re.MULTILINE | re.VERBOSE)
def Parse(self):
'''Knows how to parse VERSIONINFO resource sections.'''
self.ReadSection()
self._RegExpParse(self.version_re_, self.text_)
# TODO(joi) May need to override the Translate() method to change the
# "Translation" VALUE block to indicate the correct language code.
class RCData(Section):
'''A resource section that contains some data .'''
# A typical rcdataresource section looks like this:
#
# IDR_BLAH RCDATA { 1, 2, 3, 4 }
dialog_re_ = lazy_re.compile(r'''
^(?P<id1>[A-Z0-9_]+)\s+RCDATA\s+(DISCARDABLE)?\s+\{.*?\}
''', re.MULTILINE | re.VERBOSE | re.DOTALL)
def Parse(self):
'''Implementation for resource types w/braces (not BEGIN/END)
'''
rc_text = self._LoadInputFile()
out = ''
begin_count = 0
openbrace_count = 0
assert self.extkey
first_line_re = re.compile(r'\s*' + self.extkey + r'\b')
for line in rc_text.splitlines(True):
if out or first_line_re.match(line):
out += line
# We stop once the braces balance (could happen in one line).
begin_count_was = begin_count
if len(out) > 0:
openbrace_count += line.count('{')
begin_count += line.count('{')
begin_count -= line.count('}')
if ((begin_count_was == 1 and begin_count == 0) or
(openbrace_count > 0 and begin_count == 0)):
break
if len(out) == 0:
raise exception.SectionNotFound('%s in file %s' % (self.extkey, self.rc_file))
self.text_ = out
self._RegExpParse(self.dialog_re_, out)
class Accelerators(Section):
'''An ACCELERATORS table.
'''
# A typical ACCELERATORS section looks like this:
#
# IDR_ACCELERATOR1 ACCELERATORS
# BEGIN
# "^C", ID_ACCELERATOR32770, ASCII, NOINVERT
# "^V", ID_ACCELERATOR32771, ASCII, NOINVERT
# VK_INSERT, ID_ACCELERATOR32772, VIRTKEY, CONTROL, NOINVERT
# END
accelerators_re_ = lazy_re.compile(r'''
# Match the ID on the first line
^(?P<id1>[A-Z0-9_]+)\s+ACCELERATORS\s+
|
# Match accelerators specified as VK_XXX
\s+VK_[A-Z0-9_]+,\s*(?P<id2>[A-Z0-9_]+)\s*,
|
# Match accelerators specified as e.g. "^C"
\s+"[^"]*",\s+(?P<id3>[A-Z0-9_]+)\s*,
''', re.MULTILINE | re.VERBOSE)
def Parse(self):
'''Knows how to parse ACCELERATORS resource sections.'''
self.ReadSection()
self._RegExpParse(self.accelerators_re_, self.text_)

372
third_party/libwebrtc/tools/grit/grit/gather/rc_unittest.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,372 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grit.gather.rc'''
from __future__ import print_function
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
from six import StringIO
from grit.gather import rc
from grit import util
class RcUnittest(unittest.TestCase):
part_we_want = '''IDC_KLONKACC ACCELERATORS
BEGIN
"?", IDM_ABOUT, ASCII, ALT
"/", IDM_ABOUT, ASCII, ALT
END'''
def testSectionFromFile(self):
buf = '''IDC_SOMETHINGELSE BINGO
BEGIN
BLA BLA
BLA BLA
END
%s
IDC_KLONK BINGOBONGO
BEGIN
HONGO KONGO
END
''' % self.part_we_want
f = StringIO(buf)
out = rc.Section(f, 'IDC_KLONKACC')
out.ReadSection()
self.failUnless(out.GetText() == self.part_we_want)
out = rc.Section(util.PathFromRoot(r'grit/testdata/klonk.rc'),
'IDC_KLONKACC',
encoding='utf-16')
out.ReadSection()
out_text = out.GetText().replace('\t', '')
out_text = out_text.replace(' ', '')
self.part_we_want = self.part_we_want.replace(' ', '')
self.failUnless(out_text.strip() == self.part_we_want.strip())
def testDialog(self):
dlg = rc.Dialog(StringIO('''IDD_ABOUTBOX DIALOGEX 22, 17, 230, 75
STYLE DS_SETFONT | DS_MODALFRAME | WS_CAPTION | WS_SYSMENU
CAPTION "About"
FONT 8, "System", 0, 0, 0x0
BEGIN
ICON IDI_KLONK,IDC_MYICON,14,9,20,20
LTEXT "klonk Version ""yibbee"" 1.0",IDC_STATIC,49,10,119,8,
SS_NOPREFIX
LTEXT "Copyright (C) 2005",IDC_STATIC,49,20,119,8
DEFPUSHBUTTON "OK",IDOK,195,6,30,11,WS_GROUP
CONTROL "Jack ""Black"" Daniels",IDC_RADIO1,"Button",
BS_AUTORADIOBUTTON,46,51,84,10
// try a line where the ID is on the continuation line
LTEXT "blablablabla blablabla blablablablablablablabla blablabla",
ID_SMURF, whatever...
END
'''), 'IDD_ABOUTBOX')
dlg.Parse()
self.failUnless(len(dlg.GetTextualIds()) == 7)
self.failUnless(len(dlg.GetCliques()) == 6)
self.failUnless(dlg.GetCliques()[1].GetMessage().GetRealContent() ==
'klonk Version "yibbee" 1.0')
transl = dlg.Translate('en')
self.failUnless(transl.strip() == dlg.GetText().strip())
def testAlternateSkeleton(self):
dlg = rc.Dialog(StringIO('''IDD_ABOUTBOX DIALOGEX 22, 17, 230, 75
STYLE DS_SETFONT | DS_MODALFRAME | WS_CAPTION | WS_SYSMENU
CAPTION "About"
FONT 8, "System", 0, 0, 0x0
BEGIN
LTEXT "Yipee skippy",IDC_STATIC,49,10,119,8,
SS_NOPREFIX
END
'''), 'IDD_ABOUTBOX')
dlg.Parse()
alt_dlg = rc.Dialog(StringIO('''IDD_ABOUTBOX DIALOGEX 040704, 17, 230, 75
STYLE DS_SETFONT | DS_MODALFRAME | WS_CAPTION | WS_SYSMENU
CAPTION "XXXXXXXXX"
FONT 8, "System", 0, 0, 0x0
BEGIN
LTEXT "XXXXXXXXXXXXXXXXX",IDC_STATIC,110978,10,119,8,
SS_NOPREFIX
END
'''), 'IDD_ABOUTBOX')
alt_dlg.Parse()
transl = dlg.Translate('en', skeleton_gatherer=alt_dlg)
self.failUnless(transl.count('040704') and
transl.count('110978'))
self.failUnless(transl.count('Yipee skippy'))
def testMenu(self):
menu = rc.Menu(StringIO('''IDC_KLONK MENU
BEGIN
POPUP "&File """
BEGIN
MENUITEM "E&xit", IDM_EXIT
MENUITEM "This be ""Klonk"" me like", ID_FILE_THISBE
POPUP "gonk"
BEGIN
MENUITEM "Klonk && is ""good""", ID_GONK_KLONKIS
END
MENUITEM "This is a very long menu caption to try to see if we can make the ID go to a continuation line, blablabla blablabla bla blabla blablabla blablabla blablabla blablabla...",
ID_FILE_THISISAVERYLONGMENUCAPTIONTOTRYTOSEEIFWECANMAKETHEIDGOTOACONTINUATIONLINE
END
POPUP "&Help"
BEGIN
MENUITEM "&About ...", IDM_ABOUT
END
END'''), 'IDC_KLONK')
menu.Parse()
self.failUnless(len(menu.GetTextualIds()) == 6)
self.failUnless(len(menu.GetCliques()) == 1)
self.failUnless(len(menu.GetCliques()[0].GetMessage().GetPlaceholders()) ==
9)
transl = menu.Translate('en')
self.failUnless(transl.strip() == menu.GetText().strip())
def testVersion(self):
version = rc.Version(StringIO('''
VS_VERSION_INFO VERSIONINFO
FILEVERSION 1,0,0,1
PRODUCTVERSION 1,0,0,1
FILEFLAGSMASK 0x3fL
#ifdef _DEBUG
FILEFLAGS 0x1L
#else
FILEFLAGS 0x0L
#endif
FILEOS 0x4L
FILETYPE 0x2L
FILESUBTYPE 0x0L
BEGIN
BLOCK "StringFileInfo"
BEGIN
BLOCK "040904e4"
BEGIN
VALUE "CompanyName", "TODO: <Company name>"
VALUE "FileDescription", "TODO: <File description>"
VALUE "FileVersion", "1.0.0.1"
VALUE "LegalCopyright", "TODO: (c) <Company name>. All rights reserved."
VALUE "InternalName", "res_format_test.dll"
VALUE "OriginalFilename", "res_format_test.dll"
VALUE "ProductName", "TODO: <Product name>"
VALUE "ProductVersion", "1.0.0.1"
END
END
BLOCK "VarFileInfo"
BEGIN
VALUE "Translation", 0x409, 1252
END
END
'''.strip()), 'VS_VERSION_INFO')
version.Parse()
self.failUnless(len(version.GetTextualIds()) == 1)
self.failUnless(len(version.GetCliques()) == 4)
transl = version.Translate('en')
self.failUnless(transl.strip() == version.GetText().strip())
def testRegressionDialogBox(self):
dialog = rc.Dialog(StringIO('''
IDD_SIDEBAR_WEATHER_PANEL_PROPPAGE DIALOGEX 0, 0, 205, 157
STYLE DS_SETFONT | DS_FIXEDSYS | WS_CHILD
FONT 8, "MS Shell Dlg", 400, 0, 0x1
BEGIN
EDITTEXT IDC_SIDEBAR_WEATHER_NEW_CITY,3,27,112,14,ES_AUTOHSCROLL
DEFPUSHBUTTON "Add Location",IDC_SIDEBAR_WEATHER_ADD,119,27,50,14
LISTBOX IDC_SIDEBAR_WEATHER_CURR_CITIES,3,48,127,89,
LBS_NOINTEGRALHEIGHT | WS_VSCROLL | WS_TABSTOP
PUSHBUTTON "Move Up",IDC_SIDEBAR_WEATHER_MOVE_UP,134,104,50,14
PUSHBUTTON "Move Down",IDC_SIDEBAR_WEATHER_MOVE_DOWN,134,121,50,14
PUSHBUTTON "Remove",IDC_SIDEBAR_WEATHER_DELETE,134,48,50,14
LTEXT "To see current weather conditions and forecasts in the USA, enter the zip code (example: 94043) or city and state (example: Mountain View, CA).",
IDC_STATIC,3,0,199,25
CONTROL "Fahrenheit",IDC_SIDEBAR_WEATHER_FAHRENHEIT,"Button",
BS_AUTORADIOBUTTON | WS_GROUP | WS_TABSTOP,3,144,51,10
CONTROL "Celsius",IDC_SIDEBAR_WEATHER_CELSIUS,"Button",
BS_AUTORADIOBUTTON,57,144,38,10
END'''.strip()), 'IDD_SIDEBAR_WEATHER_PANEL_PROPPAGE')
dialog.Parse()
self.failUnless(len(dialog.GetTextualIds()) == 10)
def testRegressionDialogBox2(self):
dialog = rc.Dialog(StringIO('''
IDD_SIDEBAR_EMAIL_PANEL_PROPPAGE DIALOG DISCARDABLE 0, 0, 264, 220
STYLE WS_CHILD
FONT 8, "MS Shell Dlg"
BEGIN
GROUPBOX "Email Filters",IDC_STATIC,7,3,250,190
LTEXT "Click Add Filter to create the email filter.",IDC_STATIC,16,41,130,9
PUSHBUTTON "Add Filter...",IDC_SIDEBAR_EMAIL_ADD_FILTER,196,38,50,14
PUSHBUTTON "Remove",IDC_SIDEBAR_EMAIL_REMOVE,196,174,50,14
PUSHBUTTON "", IDC_SIDEBAR_EMAIL_HIDDEN, 200, 178, 5, 5, NOT WS_VISIBLE
LISTBOX IDC_SIDEBAR_EMAIL_LIST,16,60,230,108,
LBS_NOINTEGRALHEIGHT | WS_VSCROLL | WS_TABSTOP
LTEXT "You can prevent certain emails from showing up in the sidebar with a filter.",
IDC_STATIC,16,18,234,18
END'''.strip()), 'IDD_SIDEBAR_EMAIL_PANEL_PROPPAGE')
dialog.Parse()
self.failUnless('IDC_SIDEBAR_EMAIL_HIDDEN' in dialog.GetTextualIds())
def testRegressionMenuId(self):
menu = rc.Menu(StringIO('''
IDR_HYPERMENU_FOLDER MENU
BEGIN
POPUP "HyperFolder"
BEGIN
MENUITEM "Open Containing Folder", IDM_OPENFOLDER
END
END'''.strip()), 'IDR_HYPERMENU_FOLDER')
menu.Parse()
self.failUnless(len(menu.GetTextualIds()) == 2)
def testRegressionNewlines(self):
menu = rc.Menu(StringIO('''
IDR_HYPERMENU_FOLDER MENU
BEGIN
POPUP "Hyper\\nFolder"
BEGIN
MENUITEM "Open Containing Folder", IDM_OPENFOLDER
END
END'''.strip()), 'IDR_HYPERMENU_FOLDER')
menu.Parse()
transl = menu.Translate('en')
# Shouldn't find \\n (the \n shouldn't be changed to \\n)
self.failUnless(transl.find('\\\\n') == -1)
def testRegressionTabs(self):
menu = rc.Menu(StringIO('''
IDR_HYPERMENU_FOLDER MENU
BEGIN
POPUP "Hyper\\tFolder"
BEGIN
MENUITEM "Open Containing Folder", IDM_OPENFOLDER
END
END'''.strip()), 'IDR_HYPERMENU_FOLDER')
menu.Parse()
transl = menu.Translate('en')
# Shouldn't find \\t (the \t shouldn't be changed to \\t)
self.failUnless(transl.find('\\\\t') == -1)
def testEscapeUnescape(self):
original = 'Hello "bingo"\n How\\are\\you\\n?'
escaped = rc.Section.Escape(original)
self.failUnless(escaped == 'Hello ""bingo""\\n How\\\\are\\\\you\\\\n?')
unescaped = rc.Section.UnEscape(escaped)
self.failUnless(unescaped == original)
def testRegressionPathsWithSlashN(self):
original = '..\\\\..\\\\trs\\\\res\\\\nav_first.gif'
unescaped = rc.Section.UnEscape(original)
self.failUnless(unescaped == '..\\..\\trs\\res\\nav_first.gif')
def testRegressionDialogItemsTextOnly(self):
dialog = rc.Dialog(StringIO('''IDD_OPTIONS_SEARCH DIALOGEX 0, 0, 280, 292
STYLE DS_SETFONT | DS_MODALFRAME | DS_FIXEDSYS | DS_CENTER | WS_POPUP |
WS_DISABLED | WS_CAPTION | WS_SYSMENU
CAPTION "Search"
FONT 8, "MS Shell Dlg", 400, 0, 0x1
BEGIN
GROUPBOX "Select search buttons and options",-1,7,5,266,262
CONTROL "",IDC_OPTIONS,"SysTreeView32",TVS_DISABLEDRAGDROP |
WS_BORDER | WS_TABSTOP | 0x800,16,19,248,218
LTEXT "Use Google site:",-1,26,248,52,8
COMBOBOX IDC_GOOGLE_HOME,87,245,177,256,CBS_DROPDOWNLIST |
WS_VSCROLL | WS_TABSTOP
PUSHBUTTON "Restore Defaults...",IDC_RESET,187,272,86,14
END'''), 'IDD_OPTIONS_SEARCH')
dialog.Parse()
translateables = [c.GetMessage().GetRealContent()
for c in dialog.GetCliques()]
self.failUnless('Select search buttons and options' in translateables)
self.failUnless('Use Google site:' in translateables)
def testAccelerators(self):
acc = rc.Accelerators(StringIO('''\
IDR_ACCELERATOR1 ACCELERATORS
BEGIN
"^C", ID_ACCELERATOR32770, ASCII, NOINVERT
"^V", ID_ACCELERATOR32771, ASCII, NOINVERT
VK_INSERT, ID_ACCELERATOR32772, VIRTKEY, CONTROL, NOINVERT
END
'''), 'IDR_ACCELERATOR1')
acc.Parse()
self.failUnless(len(acc.GetTextualIds()) == 4)
self.failUnless(len(acc.GetCliques()) == 0)
transl = acc.Translate('en')
self.failUnless(transl.strip() == acc.GetText().strip())
def testRegressionEmptyString(self):
dlg = rc.Dialog(StringIO('''\
IDD_CONFIRM_QUIT_GD_DLG DIALOGEX 0, 0, 267, 108
STYLE DS_SETFONT | DS_MODALFRAME | DS_FIXEDSYS | DS_CENTER | WS_POPUP |
WS_CAPTION
EXSTYLE WS_EX_TOPMOST
CAPTION "Google Desktop"
FONT 8, "MS Shell Dlg", 400, 0, 0x1
BEGIN
DEFPUSHBUTTON "&Yes",IDYES,82,87,50,14
PUSHBUTTON "&No",IDNO,136,87,50,14
ICON 32514,IDC_STATIC,7,9,21,20
EDITTEXT IDC_TEXTBOX,34,7,231,60,ES_MULTILINE | ES_READONLY | NOT WS_BORDER
CONTROL "",
IDC_ENABLE_GD_AUTOSTART,"Button",BS_AUTOCHECKBOX |
WS_TABSTOP,33,70,231,10
END'''), 'IDD_CONFIRM_QUIT_GD_DLG')
dlg.Parse()
def Check():
self.failUnless(transl.count('IDC_ENABLE_GD_AUTOSTART'))
self.failUnless(transl.count('END'))
transl = dlg.Translate('de', pseudo_if_not_available=True,
fallback_to_english=True)
Check()
transl = dlg.Translate('de', pseudo_if_not_available=True,
fallback_to_english=False)
Check()
transl = dlg.Translate('de', pseudo_if_not_available=False,
fallback_to_english=True)
Check()
transl = dlg.Translate('de', pseudo_if_not_available=False,
fallback_to_english=False)
Check()
transl = dlg.Translate('en', pseudo_if_not_available=True,
fallback_to_english=True)
Check()
transl = dlg.Translate('en', pseudo_if_not_available=True,
fallback_to_english=False)
Check()
transl = dlg.Translate('en', pseudo_if_not_available=False,
fallback_to_english=True)
Check()
transl = dlg.Translate('en', pseudo_if_not_available=False,
fallback_to_english=False)
Check()
if __name__ == '__main__':
unittest.main()

82
third_party/libwebrtc/tools/grit/grit/gather/regexp.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,82 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''A baseclass for simple gatherers based on regular expressions.
'''
from __future__ import print_function
from grit.gather import skeleton_gatherer
class RegexpGatherer(skeleton_gatherer.SkeletonGatherer):
'''Common functionality of gatherers based on parsing using a single
regular expression.
'''
DescriptionMapping_ = {
'CAPTION' : 'This is a caption for a dialog',
'CHECKBOX' : 'This is a label for a checkbox',
'CONTROL': 'This is the text on a control',
'CTEXT': 'This is a label for a control',
'DEFPUSHBUTTON': 'This is a button definition',
'GROUPBOX': 'This is a label for a grouping',
'ICON': 'This is a label for an icon',
'LTEXT': 'This is the text for a label',
'PUSHBUTTON': 'This is the text for a button',
}
# Contextualization elements. Used for adding additional information
# to the message bundle description string from RC files.
def AddDescriptionElement(self, string):
if string in self.DescriptionMapping_:
description = self.DescriptionMapping_[string]
else:
description = string
if self.single_message_:
self.single_message_.SetDescription(description)
else:
if (self.translatable_chunk_):
message = self.skeleton_[len(self.skeleton_) - 1].GetMessage()
message.SetDescription(description)
def _RegExpParse(self, regexp, text_to_parse):
'''An implementation of Parse() that can be used for resource sections that
can be parsed using a single multi-line regular expression.
All translateables must be in named groups that have names starting with
'text'. All textual IDs must be in named groups that have names starting
with 'id'. All type definitions that can be included in the description
field for contextualization purposes should have a name that starts with
'type'.
Args:
regexp: re.compile('...', re.MULTILINE)
text_to_parse:
'''
chunk_start = 0
for match in regexp.finditer(text_to_parse):
groups = match.groupdict()
keys = sorted(groups.keys())
self.translatable_chunk_ = False
for group in keys:
if group.startswith('id') and groups[group]:
self._AddTextualId(groups[group])
elif group.startswith('text') and groups[group]:
self._AddNontranslateableChunk(
text_to_parse[chunk_start : match.start(group)])
chunk_start = match.end(group) # Next chunk will start after the match
self._AddTranslateableChunk(groups[group])
elif group.startswith('type') and groups[group]:
# Add the description to the skeleton_ list. This works because
# we are using a sort set of keys, and because we assume that the
# group name used for descriptions (type) will come after the "text"
# group in alphabetical order. We also assume that there cannot be
# more than one description per regular expression match.
self.AddDescriptionElement(groups[group])
self._AddNontranslateableChunk(text_to_parse[chunk_start:])
if self.single_message_:
self.skeleton_.append(self.uberclique.MakeClique(self.single_message_))

Просмотреть файл

@ -0,0 +1,149 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''A baseclass for simple gatherers that store their gathered resource in a
list.
'''
from __future__ import print_function
import six
from grit.gather import interface
from grit import clique
from grit import exception
from grit import tclib
class SkeletonGatherer(interface.GathererBase):
'''Common functionality of gatherers that parse their input as a skeleton of
translatable and nontranslatable chunks.
'''
def __init__(self, *args, **kwargs):
super(SkeletonGatherer, self).__init__(*args, **kwargs)
# List of parts of the document. Translateable parts are
# clique.MessageClique objects, nontranslateable parts are plain strings.
# Translated messages are inserted back into the skeleton using the quoting
# rules defined by self.Escape()
self.skeleton_ = []
# A list of the names of IDs that need to be defined for this resource
# section to compile correctly.
self.ids_ = []
# True if Parse() has already been called.
self.have_parsed_ = False
# True if a translatable chunk has been added
self.translatable_chunk_ = False
# If not None, all parts of the document will be put into this single
# message; otherwise the normal skeleton approach is used.
self.single_message_ = None
# Number to use for the next placeholder name. Used only if single_message
# is not None
self.ph_counter_ = 1
def GetText(self):
'''Returns the original text of the section'''
return self.text_
def Escape(self, text):
'''Subclasses can override. Base impl is identity.
'''
return text
def UnEscape(self, text):
'''Subclasses can override. Base impl is identity.
'''
return text
def GetTextualIds(self):
'''Returns the list of textual IDs that need to be defined for this
resource section to compile correctly.'''
return self.ids_
def _AddTextualId(self, id):
self.ids_.append(id)
def GetCliques(self):
'''Returns the message cliques for each translateable message in the
resource section.'''
return [x for x in self.skeleton_ if isinstance(x, clique.MessageClique)]
def Translate(self, lang, pseudo_if_not_available=True,
skeleton_gatherer=None, fallback_to_english=False):
if len(self.skeleton_) == 0:
raise exception.NotReady()
if skeleton_gatherer:
assert len(skeleton_gatherer.skeleton_) == len(self.skeleton_)
out = []
for ix in range(len(self.skeleton_)):
if isinstance(self.skeleton_[ix], six.string_types):
if skeleton_gatherer:
# Make sure the skeleton is like the original
assert(isinstance(skeleton_gatherer.skeleton_[ix], six.string_types))
out.append(skeleton_gatherer.skeleton_[ix])
else:
out.append(self.skeleton_[ix])
else:
if skeleton_gatherer: # Make sure the skeleton is like the original
assert(not isinstance(skeleton_gatherer.skeleton_[ix],
six.string_types))
msg = self.skeleton_[ix].MessageForLanguage(lang,
pseudo_if_not_available,
fallback_to_english)
def MyEscape(text):
return self.Escape(text)
text = msg.GetRealContent(escaping_function=MyEscape)
out.append(text)
return ''.join(out)
def Parse(self):
'''Parses the section. Implemented by subclasses. Idempotent.'''
raise NotImplementedError()
def _AddNontranslateableChunk(self, chunk):
'''Adds a nontranslateable chunk.'''
if self.single_message_:
ph = tclib.Placeholder('XX%02dXX' % self.ph_counter_, chunk, chunk)
self.ph_counter_ += 1
self.single_message_.AppendPlaceholder(ph)
else:
self.skeleton_.append(chunk)
def _AddTranslateableChunk(self, chunk):
'''Adds a translateable chunk. It will be unescaped before being added.'''
# We don't want empty messages since they are redundant and the TC
# doesn't allow them.
if chunk == '':
return
unescaped_text = self.UnEscape(chunk)
if self.single_message_:
self.single_message_.AppendText(unescaped_text)
else:
self.skeleton_.append(self.uberclique.MakeClique(
tclib.Message(text=unescaped_text)))
self.translatable_chunk_ = True
def SubstituteMessages(self, substituter):
'''Applies substitutions to all messages in the tree.
Goes through the skeleton and finds all MessageCliques.
Args:
substituter: a grit.util.Substituter object.
'''
if self.single_message_:
self.single_message_ = substituter.SubstituteMessage(self.single_message_)
new_skel = []
for chunk in self.skeleton_:
if isinstance(chunk, clique.MessageClique):
old_message = chunk.GetMessage()
new_message = substituter.SubstituteMessage(old_message)
if new_message is not old_message:
new_skel.append(self.uberclique.MakeClique(new_message))
continue
new_skel.append(chunk)
self.skeleton_ = new_skel

743
third_party/libwebrtc/tools/grit/grit/gather/tr_html.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,743 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''A gatherer for the TotalRecall brand of HTML templates with replaceable
portions. We wanted to reuse extern.tclib.api.handlers.html.TCHTMLParser
but this proved impossible due to the fact that the TotalRecall HTML templates
are in general quite far from parseable HTML and the TCHTMLParser derives
from HTMLParser.HTMLParser which requires relatively well-formed HTML. Some
examples of "HTML" from the TotalRecall HTML templates that wouldn't be
parseable include things like:
<a [PARAMS]>blabla</a> (not parseable because attributes are invalid)
<table><tr><td>[LOTSOFSTUFF]</tr></table> (not parseable because closing
</td> is in the HTML [LOTSOFSTUFF]
is replaced by)
The other problem with using general parsers (such as TCHTMLParser) is that
we want to make sure we output the TotalRecall template with as little changes
as possible in terms of whitespace characters, layout etc. With any parser
that generates a parse tree, and generates output by dumping the parse tree,
we would always have little inconsistencies which could cause bugs (the
TotalRecall template stuff is quite brittle and can break if e.g. a tab
character is replaced with spaces).
The solution, which may be applicable to some other HTML-like template
languages floating around Google, is to create a parser with a simple state
machine that keeps track of what kind of tag it's inside, and whether it's in
a translateable section or not. Translateable sections are:
a) text (including [BINGO] replaceables) inside of tags that
can contain translateable text (which is all tags except
for a few)
b) text inside of an 'alt' attribute in an <image> element, or
the 'value' attribute of a <submit>, <button> or <text>
element.
The parser does not build up a parse tree but rather a "skeleton" which
is a list of nontranslateable strings intermingled with grit.clique.MessageClique
objects. This simplifies the parser considerably compared to a regular HTML
parser. To output a translated document, each item in the skeleton is
printed out, with the relevant Translation from each MessageCliques being used
for the requested language.
This implementation borrows some code, constants and ideas from
extern.tclib.api.handlers.html.TCHTMLParser.
'''
from __future__ import print_function
import re
import six
from grit import clique
from grit import exception
from grit import lazy_re
from grit import util
from grit import tclib
from grit.gather import interface
# HTML tags which break (separate) chunks.
_BLOCK_TAGS = ['script', 'p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'br',
'body', 'style', 'head', 'title', 'table', 'tr', 'td', 'th',
'ul', 'ol', 'dl', 'nl', 'li', 'div', 'object', 'center',
'html', 'link', 'form', 'select', 'textarea',
'button', 'option', 'map', 'area', 'blockquote', 'pre',
'meta', 'xmp', 'noscript', 'label', 'tbody', 'thead',
'script', 'style', 'pre', 'iframe', 'img', 'input', 'nowrap',
'fieldset', 'legend']
# HTML tags which may appear within a chunk.
_INLINE_TAGS = ['b', 'i', 'u', 'tt', 'code', 'font', 'a', 'span', 'small',
'key', 'nobr', 'url', 'em', 's', 'sup', 'strike',
'strong']
# HTML tags within which linebreaks are significant.
_PREFORMATTED_TAGS = ['textarea', 'xmp', 'pre']
# An array mapping some of the inline HTML tags to more meaningful
# names for those tags. This will be used when generating placeholders
# representing these tags.
_HTML_PLACEHOLDER_NAMES = { 'a' : 'link', 'br' : 'break', 'b' : 'bold',
'i' : 'italic', 'li' : 'item', 'ol' : 'ordered_list', 'p' : 'paragraph',
'ul' : 'unordered_list', 'img' : 'image', 'em' : 'emphasis' }
# We append each of these characters in sequence to distinguish between
# different placeholders with basically the same name (e.g. BOLD1, BOLD2).
# Keep in mind that a placeholder name must not be a substring of any other
# placeholder name in the same message, so we can't simply count (BOLD_1
# would be a substring of BOLD_10).
_SUFFIXES = '123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# Matches whitespace in an HTML document. Also matches HTML comments, which are
# treated as whitespace.
_WHITESPACE = lazy_re.compile(r'(\s|&nbsp;|\\n|\\r|<!--\s*desc\s*=.*?-->)+',
re.DOTALL)
# Matches whitespace sequences which can be folded into a single whitespace
# character. This matches single characters so that non-spaces are replaced
# with spaces.
_FOLD_WHITESPACE = lazy_re.compile(r'\s+')
# Finds a non-whitespace character
_NON_WHITESPACE = lazy_re.compile(r'\S')
# Matches two or more &nbsp; in a row (a single &nbsp is not changed into
# placeholders because different languages require different numbers of spaces
# and placeholders must match exactly; more than one is probably a "special"
# whitespace sequence and should be turned into a placeholder).
_NBSP = lazy_re.compile(r'&nbsp;(&nbsp;)+')
# Matches nontranslateable chunks of the document
_NONTRANSLATEABLES = lazy_re.compile(r'''
<\s*script.+?<\s*/\s*script\s*>
|
<\s*style.+?<\s*/\s*style\s*>
|
<!--.+?-->
|
<\?IMPORT\s.+?> # import tag
|
<\s*[a-zA-Z_]+:.+?> # custom tag (open)
|
<\s*/\s*[a-zA-Z_]+:.+?> # custom tag (close)
|
<!\s*[A-Z]+\s*([^>]+|"[^"]+"|'[^']+')*?>
''', re.MULTILINE | re.DOTALL | re.VERBOSE | re.IGNORECASE)
# Matches a tag and its attributes
_ELEMENT = lazy_re.compile(r'''
# Optional closing /, element name
<\s*(?P<closing>/)?\s*(?P<element>[a-zA-Z0-9]+)\s*
# Attributes and/or replaceables inside the tag, if any
(?P<atts>(
\s*([a-zA-Z_][-:.a-zA-Z_0-9]*) # Attribute name
(\s*=\s*(\'[^\']*\'|"[^"]*"|[-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?
|
\s*\[(\$?\~)?([A-Z0-9-_]+?)(\~\$?)?\]
)*)
\s*(?P<empty>/)?\s*> # Optional empty-tag closing /, and tag close
''',
re.MULTILINE | re.DOTALL | re.VERBOSE)
# Matches elements that may have translateable attributes. The value of these
# special attributes is given by group 'value1' or 'value2'. Note that this
# regexp demands that the attribute value be quoted; this is necessary because
# the non-tree-building nature of the parser means we don't know when we're
# writing out attributes, so we wouldn't know to escape spaces.
_SPECIAL_ELEMENT = lazy_re.compile(r'''
<\s*(
input[^>]+?value\s*=\s*(\'(?P<value3>[^\']*)\'|"(?P<value4>[^"]*)")
[^>]+type\s*=\s*"?'?(button|reset|text|submit)'?"?
|
(
table[^>]+?title\s*=
|
img[^>]+?alt\s*=
|
input[^>]+?type\s*=\s*"?'?(button|reset|text|submit)'?"?[^>]+?value\s*=
)
\s*(\'(?P<value1>[^\']*)\'|"(?P<value2>[^"]*)")
)[^>]*?>
''', re.MULTILINE | re.DOTALL | re.VERBOSE | re.IGNORECASE)
# Matches stuff that is translateable if it occurs in the right context
# (between tags). This includes all characters and character entities.
# Note that this also matches &nbsp; which needs to be handled as whitespace
# before this regexp is applied.
_CHARACTERS = lazy_re.compile(r'''
(
\w
|
[\!\@\#\$\%\^\*\(\)\-\=\_\+\[\]\{\}\\\|\;\:\'\"\,\.\/\?\`\~]
|
&(\#[0-9]+|\#x[0-9a-fA-F]+|[A-Za-z0-9]+);
)+
''', re.MULTILINE | re.DOTALL | re.VERBOSE)
# Matches Total Recall's "replaceable" tags, which are just any text
# in capitals enclosed by delimiters like [] or [~~] or [$~~$] (e.g. [HELLO],
# [~HELLO~] and [$~HELLO~$]).
_REPLACEABLE = lazy_re.compile(r'\[(\$?\~)?(?P<name>[A-Z0-9-_]+?)(\~\$?)?\]',
re.MULTILINE)
# Matches the silly [!]-prefixed "header" that is used in some TotalRecall
# templates.
_SILLY_HEADER = lazy_re.compile(r'\[!\]\ntitle\t(?P<title>[^\n]+?)\n.+?\n\n',
re.MULTILINE | re.DOTALL)
# Matches a comment that provides a description for the message it occurs in.
_DESCRIPTION_COMMENT = lazy_re.compile(
r'<!--\s*desc\s*=\s*(?P<description>.+?)\s*-->', re.DOTALL)
# Matches a comment which is used to break apart multiple messages.
_MESSAGE_BREAK_COMMENT = lazy_re.compile(r'<!--\s*message-break\s*-->',
re.DOTALL)
# Matches a comment which is used to prevent block tags from splitting a message
_MESSAGE_NO_BREAK_COMMENT = re.compile(r'<!--\s*message-no-break\s*-->',
re.DOTALL)
_DEBUG = 0
def _DebugPrint(text):
if _DEBUG:
print(text.encode('utf-8'))
class HtmlChunks(object):
'''A parser that knows how to break an HTML-like document into a list of
chunks, where each chunk is either translateable or non-translateable.
The chunks are unmodified sections of the original document, so concatenating
the text of all chunks would result in the original document.'''
def InTranslateable(self):
return self.last_translateable != -1
def Rest(self):
return self.text_[self.current:]
def StartTranslateable(self):
assert not self.InTranslateable()
if self.current != 0:
# Append a nontranslateable chunk
chunk_text = self.text_[self.chunk_start : self.last_nontranslateable + 1]
# Needed in the case where document starts with a translateable.
if len(chunk_text) > 0:
self.AddChunk(False, chunk_text)
self.chunk_start = self.last_nontranslateable + 1
self.last_translateable = self.current
self.last_nontranslateable = -1
def EndTranslateable(self):
assert self.InTranslateable()
# Append a translateable chunk
self.AddChunk(True,
self.text_[self.chunk_start : self.last_translateable + 1])
self.chunk_start = self.last_translateable + 1
self.last_translateable = -1
self.last_nontranslateable = self.current
def AdvancePast(self, match):
self.current += match.end()
def AddChunk(self, translateable, text):
'''Adds a chunk to self, removing linebreaks and duplicate whitespace
if appropriate.
'''
m = _DESCRIPTION_COMMENT.search(text)
if m:
self.last_description = m.group('description')
# Remove the description from the output text
text = _DESCRIPTION_COMMENT.sub('', text)
m = _MESSAGE_BREAK_COMMENT.search(text)
if m:
# Remove the coment from the output text. It should already effectively
# break apart messages.
text = _MESSAGE_BREAK_COMMENT.sub('', text)
if translateable and not self.last_element_ in _PREFORMATTED_TAGS:
if self.fold_whitespace_:
# Fold whitespace sequences if appropriate. This is optional because it
# alters the output strings.
text = _FOLD_WHITESPACE.sub(' ', text)
else:
text = text.replace('\n', ' ')
text = text.replace('\r', ' ')
# This whitespace folding doesn't work in all cases, thus the
# fold_whitespace flag to support backwards compatibility.
text = text.replace(' ', ' ')
text = text.replace(' ', ' ')
if translateable:
description = self.last_description
self.last_description = ''
else:
description = ''
if text != '':
self.chunks_.append((translateable, text, description))
def Parse(self, text, fold_whitespace):
'''Parses self.text_ into an intermediate format stored in self.chunks_
which is translateable and nontranslateable chunks. Also returns
self.chunks_
Args:
text: The HTML for parsing.
fold_whitespace: Whether whitespace sequences should be folded into a
single space.
Return:
[chunk1, chunk2, chunk3, ...] (instances of class Chunk)
'''
#
# Chunker state
#
self.text_ = text
self.fold_whitespace_ = fold_whitespace
# A list of tuples (is_translateable, text) which represents the document
# after chunking.
self.chunks_ = []
# Start index of the last chunk, whether translateable or not
self.chunk_start = 0
# Index of the last for-sure translateable character if we are parsing
# a translateable chunk, -1 to indicate we are not in a translateable chunk.
# This is needed so that we don't include trailing whitespace in the
# translateable chunk (whitespace is neutral).
self.last_translateable = -1
# Index of the last for-sure nontranslateable character if we are parsing
# a nontranslateable chunk, -1 if we are not in a nontranslateable chunk.
# This is needed to make sure we can group e.g. "<b>Hello</b> there"
# together instead of just "Hello</b> there" which would be much worse
# for translation.
self.last_nontranslateable = -1
# Index of the character we're currently looking at.
self.current = 0
# The name of the last block element parsed.
self.last_element_ = ''
# The last explicit description we found.
self.last_description = ''
# Whether no-break was the last chunk seen
self.last_nobreak = False
while self.current < len(self.text_):
_DebugPrint('REST: %s' % self.text_[self.current:self.current+60])
m = _MESSAGE_NO_BREAK_COMMENT.match(self.Rest())
if m:
self.AdvancePast(m)
self.last_nobreak = True
continue
# Try to match whitespace
m = _WHITESPACE.match(self.Rest())
if m:
# Whitespace is neutral, it just advances 'current' and does not switch
# between translateable/nontranslateable. If we are in a
# nontranslateable section that extends to the current point, we extend
# it to include the whitespace. If we are in a translateable section,
# we do not extend it until we find
# more translateable parts, because we never want a translateable chunk
# to end with whitespace.
if (not self.InTranslateable() and
self.last_nontranslateable == self.current - 1):
self.last_nontranslateable = self.current + m.end() - 1
self.AdvancePast(m)
continue
# Then we try to match nontranslateables
m = _NONTRANSLATEABLES.match(self.Rest())
if m:
if self.InTranslateable():
self.EndTranslateable()
self.last_nontranslateable = self.current + m.end() - 1
self.AdvancePast(m)
continue
# Now match all other HTML element tags (opening, closing, or empty, we
# don't care).
m = _ELEMENT.match(self.Rest())
if m:
element_name = m.group('element').lower()
if element_name in _BLOCK_TAGS:
self.last_element_ = element_name
if self.InTranslateable():
if self.last_nobreak:
self.last_nobreak = False
else:
self.EndTranslateable()
# Check for "special" elements, i.e. ones that have a translateable
# attribute, and handle them correctly. Note that all of the
# "special" elements are block tags, so no need to check for this
# if the tag is not a block tag.
sm = _SPECIAL_ELEMENT.match(self.Rest())
if sm:
# Get the appropriate group name
for group in sm.groupdict():
if sm.groupdict()[group]:
break
# First make a nontranslateable chunk up to and including the
# quote before the translateable attribute value
self.AddChunk(False, self.text_[
self.chunk_start : self.current + sm.start(group)])
# Then a translateable for the translateable bit
self.AddChunk(True, self.Rest()[sm.start(group) : sm.end(group)])
# Finally correct the data invariant for the parser
self.chunk_start = self.current + sm.end(group)
self.last_nontranslateable = self.current + m.end() - 1
elif self.InTranslateable():
# We're in a translateable and the tag is an inline tag, so we
# need to include it in the translateable.
self.last_translateable = self.current + m.end() - 1
self.AdvancePast(m)
continue
# Anything else we find must be translateable, so we advance one character
# at a time until one of the above matches.
if not self.InTranslateable():
self.StartTranslateable()
else:
self.last_translateable = self.current
self.current += 1
# Close the final chunk
if self.InTranslateable():
self.AddChunk(True, self.text_[self.chunk_start : ])
else:
self.AddChunk(False, self.text_[self.chunk_start : ])
return self.chunks_
def HtmlToMessage(html, include_block_tags=False, description=''):
'''Takes a bit of HTML, which must contain only "inline" HTML elements,
and changes it into a tclib.Message. This involves escaping any entities and
replacing any HTML code with placeholders.
If include_block_tags is true, no error will be given if block tags (e.g.
<p> or <br>) are included in the HTML.
Args:
html: 'Hello <b>[USERNAME]</b>, how&nbsp;<i>are</i> you?'
include_block_tags: False
Return:
tclib.Message('Hello START_BOLD1USERNAMEEND_BOLD, '
'howNBSPSTART_ITALICareEND_ITALIC you?',
[ Placeholder('START_BOLD', '<b>', ''),
Placeholder('USERNAME', '[USERNAME]', ''),
Placeholder('END_BOLD', '</b>', ''),
Placeholder('START_ITALIC', '<i>', ''),
Placeholder('END_ITALIC', '</i>', ''), ])
'''
# Approach is:
# - first placeholderize, finding <elements>, [REPLACEABLES] and &nbsp;
# - then escape all character entities in text in-between placeholders
parts = [] # List of strings (for text chunks) and tuples (ID, original)
# for placeholders
count_names = {} # Map of base names to number of times used
end_names = {} # Map of base names to stack of end tags (for correct nesting)
def MakeNameClosure(base, type = ''):
'''Returns a closure that can be called once all names have been allocated
to return the final name of the placeholder. This allows us to minimally
number placeholders for non-overlap.
Also ensures that END_XXX_Y placeholders have the same Y as the
corresponding BEGIN_XXX_Y placeholder when we have nested tags of the same
type.
Args:
base: 'phname'
type: '' | 'begin' | 'end'
Return:
Closure()
'''
name = base.upper()
if type != '':
name = ('%s_%s' % (type, base)).upper()
count_names.setdefault(name, 0)
count_names[name] += 1
def MakeFinalName(name_ = name, index = count_names[name] - 1):
if type.lower() == 'end' and end_names.get(base):
return end_names[base].pop(-1) # For correct nesting
if count_names[name_] != 1:
name_ = '%s_%s' % (name_, _SUFFIXES[index])
# We need to use a stack to ensure that the end-tag suffixes match
# the begin-tag suffixes. Only needed when more than one tag of the
# same type.
if type == 'begin':
end_name = ('END_%s_%s' % (base, _SUFFIXES[index])).upper()
if base in end_names:
end_names[base].append(end_name)
else:
end_names[base] = [end_name]
return name_
return MakeFinalName
current = 0
last_nobreak = False
while current < len(html):
m = _MESSAGE_NO_BREAK_COMMENT.match(html[current:])
if m:
last_nobreak = True
current += m.end()
continue
m = _NBSP.match(html[current:])
if m:
parts.append((MakeNameClosure('SPACE'), m.group()))
current += m.end()
continue
m = _REPLACEABLE.match(html[current:])
if m:
# Replaceables allow - but placeholders don't, so replace - with _
ph_name = MakeNameClosure('X_%s_X' % m.group('name').replace('-', '_'))
parts.append((ph_name, m.group()))
current += m.end()
continue
m = _SPECIAL_ELEMENT.match(html[current:])
if m:
if not include_block_tags:
if last_nobreak:
last_nobreak = False
else:
raise exception.BlockTagInTranslateableChunk(html)
element_name = 'block' # for simplification
# Get the appropriate group name
for group in m.groupdict():
if m.groupdict()[group]:
break
parts.append((MakeNameClosure(element_name, 'begin'),
html[current : current + m.start(group)]))
parts.append(m.group(group))
parts.append((MakeNameClosure(element_name, 'end'),
html[current + m.end(group) : current + m.end()]))
current += m.end()
continue
m = _ELEMENT.match(html[current:])
if m:
element_name = m.group('element').lower()
if not include_block_tags and not element_name in _INLINE_TAGS:
if last_nobreak:
last_nobreak = False
else:
raise exception.BlockTagInTranslateableChunk(html[current:])
if element_name in _HTML_PLACEHOLDER_NAMES: # use meaningful names
element_name = _HTML_PLACEHOLDER_NAMES[element_name]
# Make a name for the placeholder
type = ''
if not m.group('empty'):
if m.group('closing'):
type = 'end'
else:
type = 'begin'
parts.append((MakeNameClosure(element_name, type), m.group()))
current += m.end()
continue
if len(parts) and isinstance(parts[-1], six.string_types):
parts[-1] += html[current]
else:
parts.append(html[current])
current += 1
msg_text = ''
placeholders = []
for part in parts:
if isinstance(part, tuple):
final_name = part[0]()
original = part[1]
msg_text += final_name
placeholders.append(tclib.Placeholder(final_name, original, '(HTML code)'))
else:
msg_text += part
msg = tclib.Message(text=msg_text, placeholders=placeholders,
description=description)
content = msg.GetContent()
for ix in range(len(content)):
if isinstance(content[ix], six.string_types):
content[ix] = util.UnescapeHtml(content[ix], replace_nbsp=False)
return msg
class TrHtml(interface.GathererBase):
'''Represents a document or message in the template format used by
Total Recall for HTML documents.'''
def __init__(self, *args, **kwargs):
super(TrHtml, self).__init__(*args, **kwargs)
self.have_parsed_ = False
self.skeleton_ = [] # list of strings and MessageClique objects
self.fold_whitespace_ = False
def SetAttributes(self, attrs):
'''Sets node attributes used by the gatherer.
This checks the fold_whitespace attribute.
Args:
attrs: The mapping of node attributes.
'''
self.fold_whitespace_ = ('fold_whitespace' in attrs and
attrs['fold_whitespace'] == 'true')
def GetText(self):
'''Returns the original text of the HTML document'''
return self.text_
def GetTextualIds(self):
return [self.extkey]
def GetCliques(self):
'''Returns the message cliques for each translateable message in the
document.'''
return [x for x in self.skeleton_ if isinstance(x, clique.MessageClique)]
def Translate(self, lang, pseudo_if_not_available=True,
skeleton_gatherer=None, fallback_to_english=False):
'''Returns this document with translateable messages filled with
the translation for language 'lang'.
Args:
lang: 'en'
pseudo_if_not_available: True
Return:
'ID_THIS_SECTION TYPE\n...BEGIN\n "Translated message"\n......\nEND
Raises:
grit.exception.NotReady() if used before Parse() has been successfully
called.
grit.exception.NoSuchTranslation() if 'pseudo_if_not_available' is false
and there is no translation for the requested language.
'''
if len(self.skeleton_) == 0:
raise exception.NotReady()
# TODO(joi) Implement support for skeleton gatherers here.
out = []
for item in self.skeleton_:
if isinstance(item, six.string_types):
out.append(item)
else:
msg = item.MessageForLanguage(lang,
pseudo_if_not_available,
fallback_to_english)
for content in msg.GetContent():
if isinstance(content, tclib.Placeholder):
out.append(content.GetOriginal())
else:
# We escape " characters to increase the chance that attributes
# will be properly escaped.
out.append(util.EscapeHtml(content, True))
return ''.join(out)
def Parse(self):
if self.have_parsed_:
return
self.have_parsed_ = True
text = self._LoadInputFile()
# Ignore the BOM character if the document starts with one.
if text.startswith(u'\ufeff'):
text = text[1:]
self.text_ = text
# Parsing is done in two phases: First, we break the document into
# translateable and nontranslateable chunks. Second, we run through each
# translateable chunk and insert placeholders for any HTML elements,
# unescape escaped characters, etc.
# First handle the silly little [!]-prefixed header because it's not
# handled by our HTML parsers.
m = _SILLY_HEADER.match(text)
if m:
self.skeleton_.append(text[:m.start('title')])
self.skeleton_.append(self.uberclique.MakeClique(
tclib.Message(text=text[m.start('title'):m.end('title')])))
self.skeleton_.append(text[m.end('title') : m.end()])
text = text[m.end():]
chunks = HtmlChunks().Parse(text, self.fold_whitespace_)
for chunk in chunks:
if chunk[0]: # Chunk is translateable
self.skeleton_.append(self.uberclique.MakeClique(
HtmlToMessage(chunk[1], description=chunk[2])))
else:
self.skeleton_.append(chunk[1])
# Go through the skeleton and change any messages that consist solely of
# placeholders and whitespace into nontranslateable strings.
for ix in range(len(self.skeleton_)):
got_text = False
if isinstance(self.skeleton_[ix], clique.MessageClique):
msg = self.skeleton_[ix].GetMessage()
for item in msg.GetContent():
if (isinstance(item, six.string_types)
and _NON_WHITESPACE.search(item) and item != '&nbsp;'):
got_text = True
break
if not got_text:
self.skeleton_[ix] = msg.GetRealContent()
def SubstituteMessages(self, substituter):
'''Applies substitutions to all messages in the tree.
Goes through the skeleton and finds all MessageCliques.
Args:
substituter: a grit.util.Substituter object.
'''
new_skel = []
for chunk in self.skeleton_:
if isinstance(chunk, clique.MessageClique):
old_message = chunk.GetMessage()
new_message = substituter.SubstituteMessage(old_message)
if new_message is not old_message:
new_skel.append(self.uberclique.MakeClique(new_message))
continue
new_skel.append(chunk)
self.skeleton_ = new_skel

524
third_party/libwebrtc/tools/grit/grit/gather/tr_html_unittest.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,524 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grit.gather.tr_html'''
from __future__ import print_function
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
import six
from six import StringIO
from grit.gather import tr_html
from grit import clique
from grit import util
class ParserUnittest(unittest.TestCase):
def testChunkingWithoutFoldWhitespace(self):
self.VerifyChunking(False)
def testChunkingWithFoldWhitespace(self):
self.VerifyChunking(True)
def VerifyChunking(self, fold_whitespace):
"""Use a single function to run all chunking testing.
This makes it easier to run chunking with fold_whitespace both on and off,
to make sure the outputs are the same.
Args:
fold_whitespace: Whether whitespace sequences should be folded into a
single space.
"""
self.VerifyChunkingBasic(fold_whitespace)
self.VerifyChunkingDescriptions(fold_whitespace)
self.VerifyChunkingReplaceables(fold_whitespace)
self.VerifyChunkingLineBreaks(fold_whitespace)
self.VerifyChunkingMessageBreak(fold_whitespace)
self.VerifyChunkingMessageNoBreak(fold_whitespace)
def VerifyChunkingBasic(self, fold_whitespace):
p = tr_html.HtmlChunks()
chunks = p.Parse('<p>Hello <b>dear</b> how <i>are</i>you?<p>Fine!',
fold_whitespace)
self.failUnlessEqual(chunks, [
(False, '<p>', ''), (True, 'Hello <b>dear</b> how <i>are</i>you?', ''),
(False, '<p>', ''), (True, 'Fine!', '')])
chunks = p.Parse('<p> Hello <b>dear</b> how <i>are</i>you? <p>Fine!',
fold_whitespace)
self.failUnlessEqual(chunks, [
(False, '<p> ', ''), (True, 'Hello <b>dear</b> how <i>are</i>you?', ''),
(False, ' <p>', ''), (True, 'Fine!', '')])
chunks = p.Parse('<p> Hello <b>dear how <i>are you? <p> Fine!',
fold_whitespace)
self.failUnlessEqual(chunks, [
(False, '<p> ', ''), (True, 'Hello <b>dear how <i>are you?', ''),
(False, ' <p> ', ''), (True, 'Fine!', '')])
# Ensure translateable sections that start with inline tags contain
# the starting inline tag.
chunks = p.Parse('<b>Hello!</b> how are you?<p><i>I am fine.</i>',
fold_whitespace)
self.failUnlessEqual(chunks, [
(True, '<b>Hello!</b> how are you?', ''), (False, '<p>', ''),
(True, '<i>I am fine.</i>', '')])
# Ensure translateable sections that end with inline tags contain
# the ending inline tag.
chunks = p.Parse("Hello! How are <b>you?</b><p><i>I'm fine!</i>",
fold_whitespace)
self.failUnlessEqual(chunks, [
(True, 'Hello! How are <b>you?</b>', ''), (False, '<p>', ''),
(True, "<i>I'm fine!</i>", '')])
def VerifyChunkingDescriptions(self, fold_whitespace):
p = tr_html.HtmlChunks()
# Check capitals and explicit descriptions
chunks = p.Parse('<!-- desc=bingo! --><B>Hello!</B> how are you?<P>'
'<I>I am fine.</I>', fold_whitespace)
self.failUnlessEqual(chunks, [
(True, '<B>Hello!</B> how are you?', 'bingo!'), (False, '<P>', ''),
(True, '<I>I am fine.</I>', '')])
chunks = p.Parse('<B><!-- desc=bingo! -->Hello!</B> how are you?<P>'
'<I>I am fine.</I>', fold_whitespace)
self.failUnlessEqual(chunks, [
(True, '<B>Hello!</B> how are you?', 'bingo!'), (False, '<P>', ''),
(True, '<I>I am fine.</I>', '')])
# Linebreaks get handled by the tclib message.
chunks = p.Parse('<B>Hello!</B> <!-- desc=bi\nngo\n! -->how are you?<P>'
'<I>I am fine.</I>', fold_whitespace)
self.failUnlessEqual(chunks, [
(True, '<B>Hello!</B> how are you?', 'bi\nngo\n!'), (False, '<P>', ''),
(True, '<I>I am fine.</I>', '')])
# In this case, because the explicit description appears after the first
# translateable, it will actually apply to the second translateable.
chunks = p.Parse('<B>Hello!</B> how are you?<!-- desc=bingo! --><P>'
'<I>I am fine.</I>', fold_whitespace)
self.failUnlessEqual(chunks, [
(True, '<B>Hello!</B> how are you?', ''), (False, '<P>', ''),
(True, '<I>I am fine.</I>', 'bingo!')])
def VerifyChunkingReplaceables(self, fold_whitespace):
# Check that replaceables within block tags (where attributes would go) are
# handled correctly.
p = tr_html.HtmlChunks()
chunks = p.Parse('<b>Hello!</b> how are you?<p [BINGO] [$~BONGO~$]>'
'<i>I am fine.</i>', fold_whitespace)
self.failUnlessEqual(chunks, [
(True, '<b>Hello!</b> how are you?', ''),
(False, '<p [BINGO] [$~BONGO~$]>', ''),
(True, '<i>I am fine.</i>', '')])
def VerifyChunkingLineBreaks(self, fold_whitespace):
# Check that the contents of preformatted tags preserve line breaks.
p = tr_html.HtmlChunks()
chunks = p.Parse('<textarea>Hello\nthere\nhow\nare\nyou?</textarea>',
fold_whitespace)
self.failUnlessEqual(chunks, [(False, '<textarea>', ''),
(True, 'Hello\nthere\nhow\nare\nyou?', ''), (False, '</textarea>', '')])
# ...and that other tags' line breaks are converted to spaces
chunks = p.Parse('<p>Hello\nthere\nhow\nare\nyou?</p>', fold_whitespace)
self.failUnlessEqual(chunks, [(False, '<p>', ''),
(True, 'Hello there how are you?', ''), (False, '</p>', '')])
def VerifyChunkingMessageBreak(self, fold_whitespace):
p = tr_html.HtmlChunks()
# Make sure that message-break comments work properly.
chunks = p.Parse('Break<!-- message-break --> apart '
'<!--message-break-->messages', fold_whitespace)
self.failUnlessEqual(chunks, [(True, 'Break', ''),
(False, ' ', ''),
(True, 'apart', ''),
(False, ' ', ''),
(True, 'messages', '')])
# Make sure message-break comments work in an inline tag.
chunks = p.Parse('<a href=\'google.com\'><!-- message-break -->Google'
'<!--message-break--></a>', fold_whitespace)
self.failUnlessEqual(chunks, [(False, '<a href=\'google.com\'>', ''),
(True, 'Google', ''),
(False, '</a>', '')])
def VerifyChunkingMessageNoBreak(self, fold_whitespace):
p = tr_html.HtmlChunks()
# Make sure that message-no-break comments work properly.
chunks = p.Parse('Please <!-- message-no-break --> <br />don\'t break',
fold_whitespace)
self.failUnlessEqual(chunks, [(True, 'Please <!-- message-no-break --> '
'<br />don\'t break', '')])
chunks = p.Parse('Please <br /> break. <!-- message-no-break --> <br /> '
'But not this time.', fold_whitespace)
self.failUnlessEqual(chunks, [(True, 'Please', ''),
(False, ' <br /> ', ''),
(True, 'break. <!-- message-no-break --> '
'<br /> But not this time.', '')])
def testTranslateableAttributes(self):
p = tr_html.HtmlChunks()
# Check that the translateable attributes in <img>, <submit>, <button> and
# <text> elements buttons are handled correctly.
chunks = p.Parse('<img src=bingo.jpg alt="hello there">'
'<input type=submit value="hello">'
'<input type="button" value="hello">'
'<input type=\'text\' value=\'Howdie\'>', False)
self.failUnlessEqual(chunks, [
(False, '<img src=bingo.jpg alt="', ''), (True, 'hello there', ''),
(False, '"><input type=submit value="', ''), (True, 'hello', ''),
(False, '"><input type="button" value="', ''), (True, 'hello', ''),
(False, '"><input type=\'text\' value=\'', ''), (True, 'Howdie', ''),
(False, '\'>', '')])
def testTranslateableHtmlToMessage(self):
msg = tr_html.HtmlToMessage(
'Hello <b>[USERNAME]</b>, &lt;how&gt;&nbsp;<i>are</i> you?')
pres = msg.GetPresentableContent()
self.failUnless(pres ==
'Hello BEGIN_BOLDX_USERNAME_XEND_BOLD, '
'<how>&nbsp;BEGIN_ITALICareEND_ITALIC you?')
msg = tr_html.HtmlToMessage('<b>Hello</b><I>Hello</I><b>Hello</b>')
pres = msg.GetPresentableContent()
self.failUnless(pres ==
'BEGIN_BOLD_1HelloEND_BOLD_1BEGIN_ITALICHelloEND_ITALIC'
'BEGIN_BOLD_2HelloEND_BOLD_2')
# Check that nesting (of the <font> tags) is handled correctly - i.e. that
# the closing placeholder numbers match the opening placeholders.
msg = tr_html.HtmlToMessage(
'''<font size=-1><font color=#FF0000>Update!</font> '''
'''<a href='http://desktop.google.com/whatsnew.html?hl=[$~LANG~$]'>'''
'''New Features</a>: Now search PDFs, MP3s, Firefox web history, and '''
'''more</font>''')
pres = msg.GetPresentableContent()
self.failUnless(pres ==
'BEGIN_FONT_1BEGIN_FONT_2Update!END_FONT_2 BEGIN_LINK'
'New FeaturesEND_LINK: Now search PDFs, MP3s, Firefox '
'web history, and moreEND_FONT_1')
msg = tr_html.HtmlToMessage('''<a href='[$~URL~$]'><b>[NUM][CAT]</b></a>''')
pres = msg.GetPresentableContent()
self.failUnless(pres == 'BEGIN_LINKBEGIN_BOLDX_NUM_XX_CAT_XEND_BOLDEND_LINK')
msg = tr_html.HtmlToMessage(
'''<font size=-1><a class=q onClick='return window.qs?qs(this):1' '''
'''href='http://[WEBSERVER][SEARCH_URI]'>Desktop</a></font>&nbsp;&nbsp;'''
'''&nbsp;&nbsp;''')
pres = msg.GetPresentableContent()
self.failUnless(pres ==
'''BEGIN_FONTBEGIN_LINKDesktopEND_LINKEND_FONTSPACE''')
msg = tr_html.HtmlToMessage(
'''<br><br><center><font size=-2>&copy;2005 Google </font></center>''', 1)
pres = msg.GetPresentableContent()
self.failUnless(pres ==
u'BEGIN_BREAK_1BEGIN_BREAK_2BEGIN_CENTERBEGIN_FONT\xa92005'
u' Google END_FONTEND_CENTER')
msg = tr_html.HtmlToMessage(
'''&nbsp;-&nbsp;<a class=c href=[$~CACHE~$]>Cached</a>''')
pres = msg.GetPresentableContent()
self.failUnless(pres ==
'&nbsp;-&nbsp;BEGIN_LINKCachedEND_LINK')
# Check that upper-case tags are handled correctly.
msg = tr_html.HtmlToMessage(
'''You can read the <A HREF='http://desktop.google.com/privacypolicy.'''
'''html?hl=[LANG_CODE]'>Privacy Policy</A> and <A HREF='http://desktop'''
'''.google.com/privacyfaq.html?hl=[LANG_CODE]'>Privacy FAQ</A> online.''')
pres = msg.GetPresentableContent()
self.failUnless(pres ==
'You can read the BEGIN_LINK_1Privacy PolicyEND_LINK_1 and '
'BEGIN_LINK_2Privacy FAQEND_LINK_2 online.')
# Check that tags with linebreaks immediately preceding them are handled
# correctly.
msg = tr_html.HtmlToMessage(
'''You can read the
<A HREF='http://desktop.google.com/privacypolicy.html?hl=[LANG_CODE]'>Privacy Policy</A>
and <A HREF='http://desktop.google.com/privacyfaq.html?hl=[LANG_CODE]'>Privacy FAQ</A> online.''')
pres = msg.GetPresentableContent()
self.failUnless(pres == '''You can read the
BEGIN_LINK_1Privacy PolicyEND_LINK_1
and BEGIN_LINK_2Privacy FAQEND_LINK_2 online.''')
# Check that message-no-break comments are handled correctly.
msg = tr_html.HtmlToMessage('''Please <!-- message-no-break --><br /> don't break''')
pres = msg.GetPresentableContent()
self.failUnlessEqual(pres, '''Please BREAK don't break''')
class TrHtmlUnittest(unittest.TestCase):
def testSetAttributes(self):
html = tr_html.TrHtml(StringIO(''))
self.failUnlessEqual(html.fold_whitespace_, False)
html.SetAttributes({})
self.failUnlessEqual(html.fold_whitespace_, False)
html.SetAttributes({'fold_whitespace': 'false'})
self.failUnlessEqual(html.fold_whitespace_, False)
html.SetAttributes({'fold_whitespace': 'true'})
self.failUnlessEqual(html.fold_whitespace_, True)
def testFoldWhitespace(self):
text = '<td> Test Message </td>'
html = tr_html.TrHtml(StringIO(text))
html.Parse()
self.failUnlessEqual(html.skeleton_[1].GetMessage().GetPresentableContent(),
'Test Message')
html = tr_html.TrHtml(StringIO(text))
html.fold_whitespace_ = True
html.Parse()
self.failUnlessEqual(html.skeleton_[1].GetMessage().GetPresentableContent(),
'Test Message')
def testTable(self):
html = tr_html.TrHtml(StringIO('''<table class="shaded-header"><tr>
<td class="header-element b expand">Preferences</td>
<td class="header-element s">
<a href="http://desktop.google.com/preferences.html">Preferences&nbsp;Help</a>
</td>
</tr></table>'''))
html.Parse()
self.failUnless(html.skeleton_[3].GetMessage().GetPresentableContent() ==
'BEGIN_LINKPreferences&nbsp;HelpEND_LINK')
def testSubmitAttribute(self):
html = tr_html.TrHtml(StringIO('''</td>
<td class="header-element"><input type=submit value="Save Preferences"
name=submit2></td>
</tr></table>'''))
html.Parse()
self.failUnless(html.skeleton_[1].GetMessage().GetPresentableContent() ==
'Save Preferences')
def testWhitespaceAfterInlineTag(self):
'''Test that even if there is whitespace after an inline tag at the start
of a translateable section the inline tag will be included.
'''
html = tr_html.TrHtml(
StringIO('''<label for=DISPLAYNONE><font size=-1> Hello</font>'''))
html.Parse()
self.failUnless(html.skeleton_[1].GetMessage().GetRealContent() ==
'<font size=-1> Hello</font>')
def testSillyHeader(self):
html = tr_html.TrHtml(StringIO('''[!]
title\tHello
bingo
bongo
bla
<p>Other stuff</p>'''))
html.Parse()
content = html.skeleton_[1].GetMessage().GetRealContent()
self.failUnless(content == 'Hello')
self.failUnless(html.skeleton_[-1] == '</p>')
# Right after the translateable the nontranslateable should start with
# a linebreak (this catches a bug we had).
self.failUnless(html.skeleton_[2][0] == '\n')
def testExplicitDescriptions(self):
html = tr_html.TrHtml(
StringIO('Hello [USER]<br/><!-- desc=explicit -->'
'<input type="button">Go!</input>'))
html.Parse()
msg = html.GetCliques()[1].GetMessage()
self.failUnlessEqual(msg.GetDescription(), 'explicit')
self.failUnlessEqual(msg.GetRealContent(), 'Go!')
html = tr_html.TrHtml(
StringIO('Hello [USER]<br/><!-- desc=explicit\nmultiline -->'
'<input type="button">Go!</input>'))
html.Parse()
msg = html.GetCliques()[1].GetMessage()
self.failUnlessEqual(msg.GetDescription(), 'explicit multiline')
self.failUnlessEqual(msg.GetRealContent(), 'Go!')
def testRegressionInToolbarAbout(self):
html = tr_html.TrHtml(util.PathFromRoot(r'grit/testdata/toolbar_about.html'))
html.Parse()
cliques = html.GetCliques()
for cl in cliques:
content = cl.GetMessage().GetRealContent()
if content.count('De parvis grandis acervus erit'):
self.failIf(content.count('$/translate'))
def HtmlFromFileWithManualCheck(self, f):
html = tr_html.TrHtml(f)
html.Parse()
# For manual results inspection only...
list = []
for item in html.skeleton_:
if isinstance(item, six.string_types):
list.append(item)
else:
list.append(item.GetMessage().GetPresentableContent())
return html
def testPrivacyHtml(self):
html = self.HtmlFromFileWithManualCheck(
util.PathFromRoot(r'grit/testdata/privacy.html'))
self.failUnless(html.skeleton_[1].GetMessage().GetRealContent() ==
'Privacy and Google Desktop Search')
self.failUnless(html.skeleton_[3].startswith('<'))
self.failUnless(len(html.skeleton_) > 10)
def testPreferencesHtml(self):
html = self.HtmlFromFileWithManualCheck(
util.PathFromRoot(r'grit/testdata/preferences.html'))
# Verify that we don't get '[STATUS-MESSAGE]' as the original content of
# one of the MessageClique objects (it would be a placeholder-only message
# and we're supposed to have stripped those).
for item in [x for x in html.skeleton_
if isinstance(x, clique.MessageClique)]:
if (item.GetMessage().GetRealContent() == '[STATUS-MESSAGE]' or
item.GetMessage().GetRealContent() == '[ADDIN-DO] [ADDIN-OPTIONS]'):
self.fail()
self.failUnless(len(html.skeleton_) > 100)
def AssertNumberOfTranslateables(self, files, num):
'''Fails if any of the files in files don't have exactly
num translateable sections.
Args:
files: ['file1', 'file2']
num: 3
'''
for f in files:
f = util.PathFromRoot(r'grit/testdata/%s' % f)
html = self.HtmlFromFileWithManualCheck(f)
self.failUnless(len(html.GetCliques()) == num)
def testFewTranslateables(self):
self.AssertNumberOfTranslateables(['browser.html', 'email_thread.html',
'header.html', 'mini.html',
'oneclick.html', 'script.html',
'time_related.html', 'versions.html'], 0)
self.AssertNumberOfTranslateables(['footer.html', 'hover.html'], 1)
def testOtherHtmlFilesForManualInspection(self):
files = [
'about.html', 'bad_browser.html', 'cache_prefix.html',
'cache_prefix_file.html', 'chat_result.html', 'del_footer.html',
'del_header.html', 'deleted.html', 'details.html', 'email_result.html',
'error.html', 'explicit_web.html', 'footer.html',
'homepage.html', 'indexing_speed.html',
'install_prefs.html', 'install_prefs2.html',
'oem_enable.html', 'oem_non_admin.html', 'onebox.html',
'password.html', 'quit_apps.html', 'recrawl.html',
'searchbox.html', 'sidebar_h.html', 'sidebar_v.html', 'status.html',
]
for f in files:
self.HtmlFromFileWithManualCheck(
util.PathFromRoot(r'grit/testdata/%s' % f))
def testTranslate(self):
# Note that the English translation of documents that use character
# literals (e.g. &copy;) will not be the same as the original document
# because the character literal will be transformed into the Unicode
# character itself. So for this test we choose some relatively complex
# HTML without character entities (but with &nbsp; because that's handled
# specially).
html = tr_html.TrHtml(StringIO(''' <script>
<!--
function checkOffice() { var w = document.getElementById("h7");
var e = document.getElementById("h8"); var o = document.getElementById("h10");
if (!(w.checked || e.checked)) { o.checked=0;o.disabled=1;} else {o.disabled=0;} }
// -->
</script>
<input type=checkbox [CHECK-DOC] name=DOC id=h7 onclick='checkOffice()'>
<label for=h7> Word</label><br>
<input type=checkbox [CHECK-XLS] name=XLS id=h8 onclick='checkOffice()'>
<label for=h8> Excel</label><br>
<input type=checkbox [CHECK-PPT] name=PPT id=h9>
<label for=h9> PowerPoint</label><br>
</span></td><td nowrap valign=top><span class="s">
<input type=checkbox [CHECK-PDF] name=PDF id=hpdf>
<label for=hpdf> PDF</label><br>
<input type=checkbox [CHECK-TXT] name=TXT id=h6>
<label for=h6> Text, media, and other files</label><br>
</tr>&nbsp;&nbsp;
<tr><td nowrap valign=top colspan=3><span class="s"><br />
<input type=checkbox [CHECK-SECUREOFFICE] name=SECUREOFFICE id=h10>
<label for=h10> Password-protected Office documents (Word, Excel)</label><br />
<input type=checkbox [DISABLED-HTTPS] [CHECK-HTTPS] name=HTTPS id=h12><label
for=h12> Secure pages (HTTPS) in web history</label></span></td></tr>
</table>'''))
html.Parse()
trans = html.Translate('en')
if (html.GetText() != trans):
self.fail()
def testHtmlToMessageWithBlockTags(self):
msg = tr_html.HtmlToMessage(
'Hello<p>Howdie<img alt="bingo" src="image.gif">', True)
result = msg.GetPresentableContent()
self.failUnless(
result == 'HelloBEGIN_PARAGRAPHHowdieBEGIN_BLOCKbingoEND_BLOCK')
msg = tr_html.HtmlToMessage(
'Hello<p>Howdie<input type="button" value="bingo">', True)
result = msg.GetPresentableContent()
self.failUnless(
result == 'HelloBEGIN_PARAGRAPHHowdieBEGIN_BLOCKbingoEND_BLOCK')
def testHtmlToMessageRegressions(self):
msg = tr_html.HtmlToMessage(' - ', True)
result = msg.GetPresentableContent()
self.failUnless(result == ' - ')
def testEscapeUnescaped(self):
text = '&copy;&nbsp; & &quot;&lt;hello&gt;&quot;'
unescaped = util.UnescapeHtml(text)
self.failUnless(unescaped == u'\u00a9\u00a0 & "<hello>"')
escaped_unescaped = util.EscapeHtml(unescaped, True)
self.failUnless(escaped_unescaped ==
u'\u00a9\u00a0 &amp; &quot;&lt;hello&gt;&quot;')
def testRegressionCjkHtmlFile(self):
# TODO(joi) Fix this problem where unquoted attributes that
# have a value that is CJK characters causes the regular expression
# match never to return. (culprit is the _ELEMENT regexp(
if False:
html = self.HtmlFromFileWithManualCheck(util.PathFromRoot(
r'grit/testdata/ko_oem_enable_bug.html'))
self.failUnless(True)
def testRegressionCpuHang(self):
# If this regression occurs, the unit test will never return
html = tr_html.TrHtml(StringIO(
'''<input type=text size=12 id=advFileTypeEntry [~SHOW-FILETYPE-BOX~] value="[EXT]" name=ext>'''))
html.Parse()
if __name__ == '__main__':
unittest.main()

38
third_party/libwebrtc/tools/grit/grit/gather/txt.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,38 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Supports making amessage from a text file.
'''
from __future__ import print_function
from grit.gather import interface
from grit import tclib
class TxtFile(interface.GathererBase):
'''A text file gatherer. Very simple, all text from the file becomes a
single clique.
'''
def Parse(self):
self.text_ = self._LoadInputFile()
self.clique_ = self.uberclique.MakeClique(tclib.Message(text=self.text_))
def GetText(self):
'''Returns the text of what is being gathered.'''
return self.text_
def GetTextualIds(self):
return [self.extkey]
def GetCliques(self):
'''Returns the MessageClique objects for all translateable portions.'''
return [self.clique_]
def Translate(self, lang, pseudo_if_not_available=True,
skeleton_gatherer=None, fallback_to_english=False):
return self.clique_.MessageForLanguage(lang,
pseudo_if_not_available,
fallback_to_english).GetRealContent()

35
third_party/libwebrtc/tools/grit/grit/gather/txt_unittest.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,35 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for TxtFile gatherer'''
from __future__ import print_function
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
from six import StringIO
from grit.gather import txt
class TxtUnittest(unittest.TestCase):
def testGather(self):
input = StringIO('Hello there\nHow are you?')
gatherer = txt.TxtFile(input)
gatherer.Parse()
self.failUnless(gatherer.GetText() == input.getvalue())
self.failUnless(len(gatherer.GetCliques()) == 1)
self.failUnless(gatherer.GetCliques()[0].GetMessage().GetRealContent() ==
input.getvalue())
if __name__ == '__main__':
unittest.main()

238
third_party/libwebrtc/tools/grit/grit/grd_reader.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,238 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Class for reading GRD files into memory, without processing them.
'''
from __future__ import print_function
import os.path
import sys
import xml.sax
import xml.sax.handler
import six
from grit import exception
from grit import util
from grit.node import mapping
from grit.node import misc
class StopParsingException(Exception):
'''An exception used to stop parsing.'''
pass
class GrdContentHandler(xml.sax.handler.ContentHandler):
def __init__(self, stop_after, debug, dir, defines, tags_to_ignore,
target_platform, source):
# Invariant of data:
# 'root' is the root of the parse tree being created, or None if we haven't
# parsed out any elements.
# 'stack' is the a stack of elements that we push new nodes onto and
# pop from when they finish parsing, or [] if we are not currently parsing.
# 'stack[-1]' is the top of the stack.
self.root = None
self.stack = []
self.stop_after = stop_after
self.debug = debug
self.dir = dir
self.defines = defines
self.tags_to_ignore = tags_to_ignore or set()
self.ignore_depth = 0
self.target_platform = target_platform
self.source = source
def startElement(self, name, attrs):
if self.ignore_depth or name in self.tags_to_ignore:
if self.debug and self.ignore_depth == 0:
print("Ignoring element %s and its children" % name)
self.ignore_depth += 1
return
if self.debug:
attr_list = ' '.join('%s="%s"' % kv for kv in attrs.items())
print("Starting parsing of element %s with attributes %r" %
(name, attr_list or '(none)'))
typeattr = attrs.get('type')
node = mapping.ElementToClass(name, typeattr)()
node.source = self.source
if self.stack:
self.stack[-1].AddChild(node)
node.StartParsing(name, self.stack[-1])
else:
assert self.root is None
self.root = node
if isinstance(self.root, misc.GritNode):
if self.target_platform:
self.root.SetTargetPlatform(self.target_platform)
node.StartParsing(name, None)
if self.defines:
node.SetDefines(self.defines)
self.stack.append(node)
for attr, attrval in attrs.items():
node.HandleAttribute(attr, attrval)
def endElement(self, name):
if self.ignore_depth:
self.ignore_depth -= 1
return
if name == 'part':
partnode = self.stack[-1]
partnode.started_inclusion = True
# Add the contents of the sub-grd file as children of the <part> node.
partname = os.path.join(self.dir, partnode.GetInputPath())
# Check the GRDP file exists.
if not os.path.exists(partname):
raise exception.FileNotFound(partname)
# Exceptions propagate to the handler in grd_reader.Parse().
oldsource = self.source
try:
self.source = partname
xml.sax.parse(partname, GrdPartContentHandler(self))
finally:
self.source = oldsource
if self.debug:
print("End parsing of element %s" % name)
self.stack.pop().EndParsing()
if name == self.stop_after:
raise StopParsingException()
def characters(self, content):
if self.ignore_depth == 0:
if self.stack[-1]:
self.stack[-1].AppendContent(content)
def ignorableWhitespace(self, whitespace):
# TODO(joi): This is not supported by expat. Should use a different XML
# parser?
pass
class GrdPartContentHandler(xml.sax.handler.ContentHandler):
def __init__(self, parent):
self.parent = parent
self.depth = 0
def startElement(self, name, attrs):
if self.depth:
self.parent.startElement(name, attrs)
else:
if name != 'grit-part':
raise exception.MissingElement("root tag must be <grit-part>")
if attrs:
raise exception.UnexpectedAttribute(
"<grit-part> tag must not have attributes")
self.depth += 1
def endElement(self, name):
self.depth -= 1
if self.depth:
self.parent.endElement(name)
def characters(self, content):
self.parent.characters(content)
def ignorableWhitespace(self, whitespace):
self.parent.ignorableWhitespace(whitespace)
def Parse(filename_or_stream, dir=None, stop_after=None, first_ids_file=None,
debug=False, defines=None, tags_to_ignore=None, target_platform=None,
predetermined_ids_file=None):
'''Parses a GRD file into a tree of nodes (from grit.node).
If filename_or_stream is a stream, 'dir' should point to the directory
notionally containing the stream (this feature is only used in unit tests).
If 'stop_after' is provided, the parsing will stop once the first node
with this name has been fully parsed (including all its contents).
If 'debug' is true, lots of information about the parsing events will be
printed out during parsing of the file.
If 'first_ids_file' is non-empty, it is used to override the setting for the
first_ids_file attribute of the <grit> root node. Note that the first_ids_file
parameter should be relative to the cwd, even though the first_ids_file
attribute of the <grit> node is relative to the grd file.
If 'target_platform' is set, this is used to determine the target
platform of builds, instead of using |sys.platform|.
Args:
filename_or_stream: './bla.xml'
dir: None (if filename_or_stream is a filename) or '.'
stop_after: 'inputs'
first_ids_file: 'GRIT_DIR/../gritsettings/resource_ids'
debug: False
defines: dictionary of defines, like {'chromeos': '1'}
target_platform: None or the value that would be returned by sys.platform
on your target platform.
predetermined_ids_file: File path to a file containing a pre-determined
mapping from resource names to resource ids which will be used to assign
resource ids to those resources.
Return:
Subclass of grit.node.base.Node
Throws:
grit.exception.Parsing
'''
if isinstance(filename_or_stream, six.string_types):
source = filename_or_stream
if dir is None:
dir = util.dirname(filename_or_stream)
else:
source = None
handler = GrdContentHandler(stop_after=stop_after, debug=debug, dir=dir,
defines=defines, tags_to_ignore=tags_to_ignore,
target_platform=target_platform, source=source)
try:
xml.sax.parse(filename_or_stream, handler)
except StopParsingException:
assert stop_after
pass
except:
if not debug:
print("parse exception: run GRIT with the -x flag to debug .grd problems")
raise
if handler.root.name != 'grit':
raise exception.MissingElement("root tag must be <grit>")
if hasattr(handler.root, 'SetOwnDir'):
# Fix up the base_dir so it is relative to the input file.
assert dir is not None
handler.root.SetOwnDir(dir)
if isinstance(handler.root, misc.GritNode):
handler.root.SetPredeterminedIdsFile(predetermined_ids_file)
if first_ids_file:
# Make the path to the first_ids_file relative to the grd file,
# unless it begins with GRIT_DIR.
GRIT_DIR_PREFIX = 'GRIT_DIR'
if not (first_ids_file.startswith(GRIT_DIR_PREFIX)
and first_ids_file[len(GRIT_DIR_PREFIX)] in ['/', '\\']):
rel_dir = os.path.relpath(os.getcwd(), dir)
first_ids_file = util.normpath(os.path.join(rel_dir, first_ids_file))
handler.root.attrs['first_ids_file'] = first_ids_file
# Assign first ids to the nodes that don't have them.
handler.root.AssignFirstIds(filename_or_stream, defines)
return handler.root
if __name__ == '__main__':
util.ChangeStdoutEncoding()
print(six.text_type(Parse(sys.argv[1])))

346
third_party/libwebrtc/tools/grit/grit/grd_reader_unittest.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,346 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grd_reader package'''
from __future__ import print_function
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import unittest
import six
from six import StringIO
from grit import exception
from grit import grd_reader
from grit import util
from grit.node import empty
from grit.node import message
class GrdReaderUnittest(unittest.TestCase):
def testParsingAndXmlOutput(self):
input = u'''<?xml version="1.0" encoding="UTF-8"?>
<grit base_dir="." current_release="3" latest_public_release="2" source_lang_id="en-US">
<release seq="3">
<includes>
<include file="images/logo.gif" name="ID_LOGO" type="gif" />
</includes>
<messages>
<if expr="True">
<message desc="Printed to greet the currently logged in user" name="IDS_GREETING">
Hello <ph name="USERNAME">%s<ex>Joi</ex></ph>, how are you doing today?
</message>
</if>
</messages>
<structures>
<structure file="rc_files/dialogs.rc" name="IDD_NARROW_DIALOG" type="dialog">
<skeleton expr="lang == 'fr-FR'" file="bla.rc" variant_of_revision="3" />
</structure>
<structure file="rc_files/version.rc" name="VS_VERSION_INFO" type="version" />
</structures>
</release>
<translations>
<file lang="nl" path="nl_translations.xtb" />
</translations>
<outputs>
<output filename="resource.h" type="rc_header" />
<output filename="resource.rc" lang="en-US" type="rc_all" />
</outputs>
</grit>'''
pseudo_file = StringIO(input)
tree = grd_reader.Parse(pseudo_file, '.')
output = six.text_type(tree)
expected_output = input.replace(u' base_dir="."', u'')
self.assertEqual(expected_output, output)
self.failUnless(tree.GetNodeById('IDS_GREETING'))
def testStopAfter(self):
input = u'''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<outputs>
<output filename="resource.h" type="rc_header" />
<output filename="resource.rc" lang="en-US" type="rc_all" />
</outputs>
<release seq="3">
<includes>
<include type="gif" name="ID_LOGO" file="images/logo.gif"/>
</includes>
</release>
</grit>'''
pseudo_file = StringIO(input)
tree = grd_reader.Parse(pseudo_file, '.', stop_after='outputs')
# only an <outputs> child
self.failUnless(len(tree.children) == 1)
self.failUnless(tree.children[0].name == 'outputs')
def testLongLinesWithComments(self):
input = u'''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<messages>
<message name="IDS_GREETING" desc="Printed to greet the currently logged in user">
This is a very long line with no linebreaks yes yes it stretches on <!--
-->and on <!--
-->and on!
</message>
</messages>
</release>
</grit>'''
pseudo_file = StringIO(input)
tree = grd_reader.Parse(pseudo_file, '.')
greeting = tree.GetNodeById('IDS_GREETING')
self.failUnless(greeting.GetCliques()[0].GetMessage().GetRealContent() ==
'This is a very long line with no linebreaks yes yes it '
'stretches on and on and on!')
def doTestAssignFirstIds(self, first_ids_path):
input = u'''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3"
base_dir="." first_ids_file="%s">
<release seq="3">
<messages>
<message name="IDS_TEST" desc="test">
test
</message>
</messages>
</release>
</grit>''' % first_ids_path
pseudo_file = StringIO(input)
grit_root_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'..')
fake_input_path = os.path.join(
grit_root_dir, "grit/testdata/chrome/app/generated_resources.grd")
root = grd_reader.Parse(pseudo_file, os.path.split(fake_input_path)[0])
root.AssignFirstIds(fake_input_path, {})
messages_node = root.children[0].children[0]
self.failUnless(isinstance(messages_node, empty.MessagesNode))
self.failUnless(messages_node.attrs["first_id"] !=
empty.MessagesNode().DefaultAttributes()["first_id"])
def testAssignFirstIds(self):
self.doTestAssignFirstIds("../../tools/grit/resource_ids")
def testAssignFirstIdsUseGritDir(self):
self.doTestAssignFirstIds("GRIT_DIR/grit/testdata/tools/grit/resource_ids")
def testAssignFirstIdsMultipleMessages(self):
"""If there are multiple messages sections, the resource_ids file
needs to list multiple first_id values."""
input = u'''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3"
base_dir="." first_ids_file="resource_ids">
<release seq="3">
<messages>
<message name="IDS_TEST" desc="test">
test
</message>
</messages>
<messages>
<message name="IDS_TEST2" desc="test">
test2
</message>
</messages>
</release>
</grit>'''
pseudo_file = StringIO(input)
grit_root_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'..')
fake_input_path = os.path.join(grit_root_dir, "grit/testdata/test.grd")
root = grd_reader.Parse(pseudo_file, os.path.split(fake_input_path)[0])
root.AssignFirstIds(fake_input_path, {})
messages_node = root.children[0].children[0]
self.assertTrue(isinstance(messages_node, empty.MessagesNode))
self.assertEqual('100', messages_node.attrs["first_id"])
messages_node = root.children[0].children[1]
self.assertTrue(isinstance(messages_node, empty.MessagesNode))
self.assertEqual('10000', messages_node.attrs["first_id"])
def testUseNameForIdAndPpIfdef(self):
input = u'''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<messages>
<if expr="pp_ifdef('hello')">
<message name="IDS_HELLO" use_name_for_id="true">
Hello!
</message>
</if>
</messages>
</release>
</grit>'''
pseudo_file = StringIO(input)
root = grd_reader.Parse(pseudo_file, '.', defines={'hello': '1'})
# Check if the ID is set to the name. In the past, there was a bug
# that caused the ID to be a generated number.
hello = root.GetNodeById('IDS_HELLO')
self.failUnless(hello.GetCliques()[0].GetId() == 'IDS_HELLO')
def testUseNameForIdWithIfElse(self):
input = u'''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<messages>
<if expr="pp_ifdef('hello')">
<then>
<message name="IDS_HELLO" use_name_for_id="true">
Hello!
</message>
</then>
<else>
<message name="IDS_HELLO" use_name_for_id="true">
Yellow!
</message>
</else>
</if>
</messages>
</release>
</grit>'''
pseudo_file = StringIO(input)
root = grd_reader.Parse(pseudo_file, '.', defines={'hello': '1'})
# Check if the ID is set to the name. In the past, there was a bug
# that caused the ID to be a generated number.
hello = root.GetNodeById('IDS_HELLO')
self.failUnless(hello.GetCliques()[0].GetId() == 'IDS_HELLO')
def testPartInclusionAndCorrectSource(self):
arbitrary_path_grd = u'''\
<grit-part>
<message name="IDS_TEST5" desc="test5">test5</message>
</grit-part>'''
tmp_dir = util.TempDir({'arbitrary_path.grp': arbitrary_path_grd})
arbitrary_path_grd_file = tmp_dir.GetPath('arbitrary_path.grp')
top_grd = u'''\
<grit latest_public_release="2" current_release="3">
<release seq="3">
<messages>
<message name="IDS_TEST" desc="test">
test
</message>
<part file="sub.grp" />
<part file="%s" />
</messages>
</release>
</grit>''' % arbitrary_path_grd_file
sub_grd = u'''\
<grit-part>
<message name="IDS_TEST2" desc="test2">test2</message>
<part file="subsub.grp" />
<message name="IDS_TEST3" desc="test3">test3</message>
</grit-part>'''
subsub_grd = u'''\
<grit-part>
<message name="IDS_TEST4" desc="test4">test4</message>
</grit-part>'''
expected_output = u'''\
<grit current_release="3" latest_public_release="2">
<release seq="3">
<messages>
<message desc="test" name="IDS_TEST">
test
</message>
<part file="sub.grp">
<message desc="test2" name="IDS_TEST2">
test2
</message>
<part file="subsub.grp">
<message desc="test4" name="IDS_TEST4">
test4
</message>
</part>
<message desc="test3" name="IDS_TEST3">
test3
</message>
</part>
<part file="%s">
<message desc="test5" name="IDS_TEST5">
test5
</message>
</part>
</messages>
</release>
</grit>''' % arbitrary_path_grd_file
with util.TempDir({'sub.grp': sub_grd,
'subsub.grp': subsub_grd}) as tmp_sub_dir:
output = grd_reader.Parse(StringIO(top_grd),
tmp_sub_dir.GetPath())
correct_sources = {
'IDS_TEST': None,
'IDS_TEST2': tmp_sub_dir.GetPath('sub.grp'),
'IDS_TEST3': tmp_sub_dir.GetPath('sub.grp'),
'IDS_TEST4': tmp_sub_dir.GetPath('subsub.grp'),
'IDS_TEST5': arbitrary_path_grd_file,
}
for node in output.ActiveDescendants():
with node:
if isinstance(node, message.MessageNode):
self.assertEqual(correct_sources[node.attrs.get('name')], node.source)
self.assertEqual(expected_output.split(), output.FormatXml().split())
tmp_dir.CleanUp()
def testPartInclusionFailure(self):
template = u'''
<grit latest_public_release="2" current_release="3">
<outputs>
%s
</outputs>
</grit>'''
part_failures = [
(exception.UnexpectedContent, u'<part file="x">fnord</part>'),
(exception.UnexpectedChild,
u'<part file="x"><output filename="x" type="y" /></part>'),
(exception.FileNotFound, u'<part file="yet_created_x" />'),
]
for raises, data in part_failures:
data = StringIO(template % data)
self.assertRaises(raises, grd_reader.Parse, data, '.')
gritpart_failures = [
(exception.UnexpectedAttribute, u'<grit-part file="xyz"></grit-part>'),
(exception.MissingElement, u'<output filename="x" type="y" />'),
]
for raises, data in gritpart_failures:
top_grd = StringIO(template % u'<part file="bad.grp" />')
with util.TempDir({'bad.grp': data}) as temp_dir:
self.assertRaises(raises, grd_reader.Parse, top_grd, temp_dir.GetPath())
def testEarlyEnoughPlatformSpecification(self):
# This is a regression test for issue
# https://code.google.com/p/grit-i18n/issues/detail?id=23
grd_text = u'''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="1" current_release="1">
<release seq="1">
<messages>
<if expr="not pp_ifdef('use_titlecase')">
<message name="IDS_XYZ">foo</message>
</if>
<!-- The assumption is that use_titlecase is never true for
this platform. When the platform isn't set to 'android'
early enough, we get a duplicate message name. -->
<if expr="os == '%s'">
<message name="IDS_XYZ">boo</message>
</if>
</messages>
</release>
</grit>''' % sys.platform
with util.TempDir({}) as temp_dir:
grd_reader.Parse(StringIO(grd_text), temp_dir.GetPath(),
target_platform='android')
if __name__ == '__main__':
unittest.main()

62
third_party/libwebrtc/tools/grit/grit/grit-todo.xml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,62 @@
<?xml version="1.0" encoding="windows-1252"?>
<TODOLIST FILEFORMAT="6" PROJECTNAME="GRIT" NEXTUNIQUEID="56" FILEVERSION="69" LASTMODIFIED="2005-08-19">
<TASK STARTDATESTRING="2005-04-08" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38453.49975694" TITLE="check 'name' attribute is unique" TIMEESTUNITS="H" ID="2" PERCENTDONE="100" STARTDATE="38450.00000000" DONEDATESTRING="2005-04-11" POS="22" DONEDATE="38453.00000000"/>
<TASK STARTDATESTRING="2005-04-08" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38488.48189815" TITLE="import id-calculating code" TIMEESTUNITS="H" ID="3" PERCENTDONE="100" STARTDATE="38450.00000000" DONEDATESTRING="2005-05-16" POS="13" DONEDATE="38488.00000000"/>
<TASK STARTDATESTRING="2005-05-16" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38488.48209491" TITLE="Import tool for existing translations" TIMEESTUNITS="H" ID="6" PERCENTDONE="100" STARTDATE="38488.00000000" DONEDATESTRING="2005-06-16" POS="12" DONEDATE="38519.00000000"/>
<TASK STARTDATESTRING="2005-05-16" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38511.00805556" TITLE="Export XMBs" TIMEESTUNITS="H" ID="8" PERCENTDONE="100" STARTDATE="38488.00000000" DONEDATESTRING="2005-06-08" POS="20" DONEDATE="38511.00000000"/>
<TASK STARTDATESTRING="2005-05-16" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38511.00924769" TITLE="Initial Integration" TIMEESTUNITS="H" ID="10" PERCENTDONE="100" STARTDATE="38488.00000000" DONEDATESTRING="2005-06-08" POS="10" DONEDATE="38511.00000000">
<TASK STARTDATESTRING="2005-05-16" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38496.54048611" TITLE="parser for %s strings" TIMEESTUNITS="H" ID="4" PERCENTDONE="100" STARTDATE="38488.00000000" DONEDATESTRING="2005-05-24" POS="2" DONEDATE="38496.00000000"/>
<TASK STARTDATESTRING="2005-05-16" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38497.00261574" TITLE="import tool for existing RC files" TIMEESTUNITS="H" ID="5" PERCENTDONE="100" STARTDATE="38488.00000000" DONEDATESTRING="2005-05-25" POS="4" DONEDATE="38497.00000000">
<TASK STARTDATESTRING="2005-05-24" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38496.92990741" TITLE="handle button value= and img alt= in message HTML text" TIMEESTUNITS="H" ID="22" PERCENTDONE="100" STARTDATE="38496.00000000" DONEDATESTRING="2005-05-24" POS="1" DONEDATE="38496.00000000"/>
<TASK STARTDATESTRING="2005-05-24" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38497.00258102" TITLE="&amp;nbsp; bug" TIMEESTUNITS="H" ID="23" PERCENTDONE="100" STARTDATE="38496.00000000" DONEDATESTRING="2005-05-25" POS="2" DONEDATE="38497.00000000"/>
</TASK>
<TASK STARTDATESTRING="2005-05-16" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38490.61171296" TITLE="grit build" TIMEESTUNITS="H" ID="7" PERCENTDONE="100" STARTDATE="38488.00000000" DONEDATESTRING="2005-05-18" POS="6" DONEDATE="38490.00000000">
<TASK STARTDATESTRING="2005-05-16" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38490.61168981" TITLE="use IDs gathered from gatherers for .h file" TIMEESTUNITS="H" ID="20" PERCENTDONE="100" STARTDATE="38488.00000000" DONEDATESTRING="2005-05-18" POS="1" DONEDATE="38490.00000000"/>
</TASK>
<TASK STARTDATESTRING="2005-05-16" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38504.55199074" TITLE="SCons Integration" TIMEESTUNITS="H" ID="9" PERCENTDONE="100" STARTDATE="38488.00000000" DONEDATESTRING="2005-06-01" POS="1" DONEDATE="38504.00000000"/>
<TASK STARTDATESTRING="2005-05-16" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38490.61181713" TITLE="handle includes" TIMEESTUNITS="H" ID="12" PERCENTDONE="100" STARTDATE="38488.00000000" DONEDATESTRING="2005-05-18" POS="5" DONEDATE="38490.00000000"/>
<TASK STARTDATESTRING="2005-05-24" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38507.98567130" TITLE="output translated HTML templates" TIMEESTUNITS="H" ID="25" PERCENTDONE="100" STARTDATE="38496.00000000" DONEDATESTRING="2005-06-04" POS="3" DONEDATE="38507.00000000"/>
<TASK STARTDATESTRING="2005-05-31" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38507.99394676" TITLE="bug: re-escape too much in RC dialogs etc." TIMEESTUNITS="H" ID="38" PERCENTDONE="100" STARTDATE="38503.00000000" DONEDATESTRING="2005-06-04" POS="7" DONEDATE="38507.00000000"/>
</TASK>
<TASK STARTDATESTRING="2005-05-16" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38489.46444444" TITLE="handle structure variants" TIMEESTUNITS="H" ID="11" PERCENTDONE="100" STARTDATE="38488.00000000" DONEDATESTRING="2005-06-16" POS="15" DONEDATE="38519.00000000"/>
<TASK STARTDATESTRING="2005-05-16" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38489.46456019" TITLE="handle include variants" TIMEESTUNITS="H" ID="13" PERCENTDONE="100" STARTDATE="38488.00000000" DONEDATESTRING="2005-06-16" POS="17" DONEDATE="38519.00000000"/>
<TASK STARTDATESTRING="2005-05-16" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38489.46537037" TITLE="handle translateable text for includes (e.g. image text)" TIMEESTUNITS="H" ID="14" PERCENTDONE="100" STARTDATE="38488.00000000" DONEDATESTRING="2005-06-16" POS="14" DONEDATE="38519.00000000"/>
<TASK STARTDATESTRING="2005-05-16" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38489.46712963" TITLE="ddoc" TIMEESTUNITS="H" ID="15" STARTDATE="38488.00000000" POS="4">
<TASK STARTDATESTRING="2005-05-16" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38489.46718750" TITLE="review comments miket" TIMEESTUNITS="H" ID="16" STARTDATE="38488.00000000" POS="2"/>
<TASK STARTDATESTRING="2005-05-16" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38489.46722222" TITLE="review comments pdoyle" TIMEESTUNITS="H" ID="17" STARTDATE="38488.00000000" POS="1"/>
<TASK STARTDATESTRING="2005-05-16" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38489.46732639" TITLE="remove 'extkey' from structure" TIMEESTUNITS="H" ID="18" STARTDATE="38488.00000000" POS="3"/>
<TASK STARTDATESTRING="2005-05-16" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38489.53537037" TITLE="add 'encoding' to structure" TIMEESTUNITS="H" ID="19" STARTDATE="38488.00000000" POS="6"/>
<TASK STARTDATESTRING="2005-05-31" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38503.55304398" TITLE="document limitation: emitter doesn't emit the translated HTML templates" TIMEESTUNITS="H" ID="30" STARTDATE="38503.00000000" POS="4"/>
<TASK STARTDATESTRING="2005-05-31" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38504.58541667" TITLE="add 'internal_comment' to &lt;message&gt;" TIMEESTUNITS="H" ID="32" STARTDATE="38503.00000000" POS="5"/>
<TASK STARTDATESTRING="2005-05-31" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38505.73391204" TITLE="&lt;outputs&gt; can not have paths (because of SCons integration - goes to build dir)" TIMEESTUNITS="H" ID="36" STARTDATE="38503.00000000" POS="9"/>
<TASK STARTDATESTRING="2005-05-31" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38506.64265046" TITLE="&lt;identifers&gt; and &lt;identifier&gt; nodes" TIMEESTUNITS="H" ID="37" STARTDATE="38503.00000000" POS="10"/>
<TASK STARTDATESTRING="2005-06-23" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38526.62344907" TITLE="&lt;structure&gt; can have 'exclude_from_rc' attribute (default false)" TIMEESTUNITS="H" ID="47" STARTDATE="38526.00000000" POS="8"/>
<TASK STARTDATESTRING="2005-06-23" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38531.94135417" TITLE="add 'enc_check' to &lt;grit&gt;" TIMEESTUNITS="H" ID="48" STARTDATE="38526.00000000" POS="7"/>
</TASK>
<TASK STARTDATESTRING="2005-05-18" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38492.51549769" TITLE="handle nontranslateable messages (in MessageClique?)" TIMEESTUNITS="H" ID="21" PERCENTDONE="100" STARTDATE="38490.00000000" DONEDATESTRING="2005-06-16" POS="16" DONEDATE="38519.00000000"/>
<TASK STARTDATESTRING="2005-05-24" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38505.70454861" TITLE="ask cprince about SCons builder in new mk system" TIMEESTUNITS="H" ID="24" PERCENTDONE="100" STARTDATE="38496.00000000" DONEDATESTRING="2005-06-02" POS="25" DONEDATE="38505.00000000"/>
<TASK STARTDATESTRING="2005-05-24" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38504.57436343" TITLE="fix AOL resource in trunk (&quot;???????&quot;)" TIMEESTUNITS="H" ID="26" PERCENTDONE="100" STARTDATE="38496.00000000" DONEDATESTRING="2005-06-01" POS="19" DONEDATE="38504.00000000"/>
<TASK STARTDATESTRING="2005-05-24" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38498.53893519" TITLE="rc_all vs. rc_translateable vs. rc_nontranslateable" TIMEESTUNITS="H" ID="27" PERCENTDONE="100" STARTDATE="38496.00000000" DONEDATESTRING="2005-06-16" POS="6" DONEDATE="38519.00000000"/>
<TASK STARTDATESTRING="2005-05-31" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38509.45532407" TITLE="make separate .grb &quot;outputs&quot; file (and change SCons integ) (??)" TIMEESTUNITS="H" ID="28" PERCENTDONE="100" STARTDATE="38503.00000000" DONEDATESTRING="2005-06-06" POS="8" DONEDATE="38509.00000000"/>
<TASK STARTDATESTRING="2005-05-31" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38511.00939815" TITLE="fix unit tests so they run from any directory" TIMEESTUNITS="H" ID="33" PERCENTDONE="100" STARTDATE="38503.00000000" DONEDATESTRING="2005-06-08" POS="18" DONEDATE="38511.00000000"/>
<TASK STARTDATESTRING="2005-05-31" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38508.96640046" TITLE="Change R4 tool to CC correct team(s) on GRIT changes" TIMEESTUNITS="H" ID="39" PERCENTDONE="100" STARTDATE="38503.00000000" DONEDATESTRING="2005-06-05" POS="23" DONEDATE="38508.00000000"/>
<TASK STARTDATESTRING="2005-06-07" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38511.00881944" TITLE="Document why wrapper.rc" TIMEESTUNITS="H" ID="40" PERCENTDONE="100" STARTDATE="38510.00000000" DONEDATESTRING="2005-06-08" POS="21" DONEDATE="38511.00000000"/>
<TASK STARTDATESTRING="2005-06-08" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38511.00804398" TITLE="import XTBs" TIMEESTUNITS="H" ID="41" PERCENTDONE="100" STARTDATE="38511.00000000" DONEDATESTRING="2005-06-16" POS="11" DONEDATE="38519.00000000"/>
<TASK STARTDATESTRING="2005-06-08" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38511.00875000" TITLE="Nightly build integration" TIMEESTUNITS="H" ID="42" STARTDATE="38511.00000000" POS="3"/>
<TASK STARTDATESTRING="2005-06-08" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38511.00891204" TITLE="BUGS" TIMEESTUNITS="H" ID="43" STARTDATE="38511.00000000" POS="24">
<TASK STARTDATESTRING="2005-06-08" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38513.03375000" TITLE="Should report error if RC-section structure refers to does not exist" TIMEESTUNITS="H" ID="44" PERCENTDONE="100" STARTDATE="38511.00000000" DONEDATESTRING="2005-06-10" POS="1" DONEDATE="38513.00000000"/>
</TASK>
<TASK STARTDATESTRING="2005-06-08" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38511.00981481" TITLE="NEW FEATURES" TIMEESTUNITS="H" ID="45" PERCENTDONE="100" STARTDATE="38511.00000000" DONEDATESTRING="2005-06-16" POS="7" DONEDATE="38519.00000000">
<TASK STARTDATESTRING="2005-05-31" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38505.70077546" TITLE="Implement line-continuation feature (\ at end of line?)" TIMEESTUNITS="H" ID="34" PERCENTDONE="100" STARTDATE="38503.00000000" DONEDATESTRING="2005-06-16" POS="1" DONEDATE="38519.00000000"/>
<TASK STARTDATESTRING="2005-05-31" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38505.70262731" TITLE="Implement conditional inclusion &amp; reflect the conditionals from R3 RC file" TIMEESTUNITS="H" ID="35" PERCENTDONE="100" STARTDATE="38503.00000000" DONEDATESTRING="2005-06-16" POS="2" DONEDATE="38519.00000000"/>
</TASK>
<TASK STARTDATESTRING="2005-06-08" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38511.01046296" TITLE="TC integration (one-way TO the TC)" TIMEESTUNITS="H" ID="46" PERCENTDONE="100" STARTDATE="38511.00000000" DONEDATESTRING="2005-06-16" POS="5" DONEDATE="38519.00000000"/>
<TASK STARTDATESTRING="2005-06-30" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38533.59072917" TITLE="bazaar20 ad for GRIT help" TIMEESTUNITS="H" ID="49" STARTDATE="38533.00000000" POS="2">
<TASK STARTDATESTRING="2005-08-19" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38583.72346065" TITLE="bazaar20 ideas" TIMEESTUNITS="H" ID="51" STARTDATE="38583.00000000" POS="1">
<TASK STARTDATESTRING="2005-08-19" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38583.72354167" TITLE="GUI for adding/editing messages" TIMEESTUNITS="H" ID="52" STARTDATE="38583.00000000" POS="2"/>
<TASK STARTDATESTRING="2005-08-19" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38583.72365741" TITLE="XLIFF import/export" TIMEESTUNITS="H" ID="54" STARTDATE="38583.00000000" POS="1"/>
</TASK>
</TASK>
<TASK STARTDATESTRING="2005-06-30" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38583.73721065" TITLE="internal_comment for all resource nodes (not just &lt;message&gt;)" TIMEESTUNITS="H" ID="50" PERCENTDONE="100" STARTDATE="38533.00000000" DONEDATESTRING="2005-08-19" POS="9" DONEDATE="38583.73721065"/>
<TASK STARTDATESTRING="2005-08-19" PRIORITY="5" TIMEESPENTUNITS="H" LASTMOD="38583.73743056" TITLE="Preserve XML comments - this gives us line continuation and more" TIMEESTUNITS="H" ID="55" STARTDATE="38583.72326389" POS="1"/>
</TODOLIST>

334
third_party/libwebrtc/tools/grit/grit/grit_runner.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,334 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Command processor for GRIT. This is the script you invoke to run the various
GRIT tools.
"""
from __future__ import print_function
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import getopt
from grit import util
import grit.extern.FP
# Tool info factories; these import only within each factory to avoid
# importing most of the GRIT code until required.
def ToolFactoryBuild():
import grit.tool.build
return grit.tool.build.RcBuilder()
def ToolFactoryBuildInfo():
import grit.tool.buildinfo
return grit.tool.buildinfo.DetermineBuildInfo()
def ToolFactoryCount():
import grit.tool.count
return grit.tool.count.CountMessage()
def ToolFactoryDiffStructures():
import grit.tool.diff_structures
return grit.tool.diff_structures.DiffStructures()
def ToolFactoryMenuTranslationsFromParts():
import grit.tool.menu_from_parts
return grit.tool.menu_from_parts.MenuTranslationsFromParts()
def ToolFactoryNewGrd():
import grit.tool.newgrd
return grit.tool.newgrd.NewGrd()
def ToolFactoryResizeDialog():
import grit.tool.resize
return grit.tool.resize.ResizeDialog()
def ToolFactoryRc2Grd():
import grit.tool.rc2grd
return grit.tool.rc2grd.Rc2Grd()
def ToolFactoryTest():
import grit.tool.test
return grit.tool.test.TestTool()
def ToolFactoryTranslationToTc():
import grit.tool.transl2tc
return grit.tool.transl2tc.TranslationToTc()
def ToolFactoryUnit():
import grit.tool.unit
return grit.tool.unit.UnitTestTool()
def ToolFactoryUpdateResourceIds():
import grit.tool.update_resource_ids
return grit.tool.update_resource_ids.UpdateResourceIds()
def ToolFactoryXmb():
import grit.tool.xmb
return grit.tool.xmb.OutputXmb()
def ToolAndroid2Grd():
import grit.tool.android2grd
return grit.tool.android2grd.Android2Grd()
# Keys for the following map
_FACTORY = 1
_REQUIRES_INPUT = 2
_HIDDEN = 3 # optional key - presence indicates tool is hidden
# Maps tool names to the tool's module. Done as a list of (key, value) tuples
# instead of a map to preserve ordering.
_TOOLS = [
['android2grd', {
_FACTORY: ToolAndroid2Grd,
_REQUIRES_INPUT: False
}],
['build', {
_FACTORY: ToolFactoryBuild,
_REQUIRES_INPUT: True
}],
['buildinfo', {
_FACTORY: ToolFactoryBuildInfo,
_REQUIRES_INPUT: True
}],
['count', {
_FACTORY: ToolFactoryCount,
_REQUIRES_INPUT: True
}],
[
'menufromparts',
{
_FACTORY: ToolFactoryMenuTranslationsFromParts,
_REQUIRES_INPUT: True,
_HIDDEN: True
}
],
['newgrd', {
_FACTORY: ToolFactoryNewGrd,
_REQUIRES_INPUT: False
}],
['rc2grd', {
_FACTORY: ToolFactoryRc2Grd,
_REQUIRES_INPUT: False
}],
['resize', {
_FACTORY: ToolFactoryResizeDialog,
_REQUIRES_INPUT: True
}],
['sdiff', {
_FACTORY: ToolFactoryDiffStructures,
_REQUIRES_INPUT: False
}],
['test', {
_FACTORY: ToolFactoryTest,
_REQUIRES_INPUT: True,
_HIDDEN: True
}],
[
'transl2tc',
{
_FACTORY: ToolFactoryTranslationToTc,
_REQUIRES_INPUT: False
}
],
['unit', {
_FACTORY: ToolFactoryUnit,
_REQUIRES_INPUT: False
}],
[
'update_resource_ids',
{
_FACTORY: ToolFactoryUpdateResourceIds,
_REQUIRES_INPUT: False
}
],
['xmb', {
_FACTORY: ToolFactoryXmb,
_REQUIRES_INPUT: True
}],
]
def PrintUsage():
tool_list = ''
for (tool, info) in _TOOLS:
if not _HIDDEN in info:
tool_list += ' %-12s %s\n' % (
tool, info[_FACTORY]().ShortDescription())
print("""GRIT - the Google Resource and Internationalization Tool
Usage: grit [GLOBALOPTIONS] TOOL [args to tool]
Global options:
-i INPUT Specifies the INPUT file to use (a .grd file). If this is not
specified, GRIT will look for the environment variable GRIT_INPUT.
If it is not present either, GRIT will try to find an input file
named 'resource.grd' in the current working directory.
-h MODULE Causes GRIT to use MODULE.UnsignedFingerPrint instead of
grit.extern.FP.UnsignedFingerprint. MODULE must be
available somewhere in the PYTHONPATH search path.
-v Print more verbose runtime information.
-x Print extremely verbose runtime information. Implies -v
-p FNAME Specifies that GRIT should profile its execution and output the
results to the file FNAME.
Tools:
TOOL can be one of the following:
%s
For more information on how to use a particular tool, and the specific
arguments you can send to that tool, execute 'grit help TOOL'
""" % (tool_list))
class Options(object):
"""Option storage and parsing."""
def __init__(self):
self.hash = None
self.input = None
self.verbose = False
self.extra_verbose = False
self.output_stream = sys.stdout
self.profile_dest = None
def ReadOptions(self, args):
"""Reads options from the start of args and returns the remainder."""
(opts, args) = getopt.getopt(args, 'vxi:p:h:', ('help',))
for (key, val) in opts:
if key == '-h': self.hash = val
elif key == '-i': self.input = val
elif key == '-v':
self.verbose = True
util.verbose = True
elif key == '-x':
self.verbose = True
util.verbose = True
self.extra_verbose = True
util.extra_verbose = True
elif key == '-p': self.profile_dest = val
elif key == '--help':
PrintUsage()
sys.exit(0)
if not self.input:
if 'GRIT_INPUT' in os.environ:
self.input = os.environ['GRIT_INPUT']
else:
self.input = 'resource.grd'
return args
def __repr__(self):
return '(verbose: %d, input: %s)' % (
self.verbose, self.input)
def _GetToolInfo(tool):
"""Returns the info map for the tool named 'tool' or None if there is no
such tool."""
matches = [t for t in _TOOLS if t[0] == tool]
if not matches:
return None
else:
return matches[0][1]
def Main(args=None):
"""Parses arguments and does the appropriate thing."""
util.ChangeStdoutEncoding()
# Support for setuptools console wrappers.
if args is None:
args = sys.argv[1:]
options = Options()
try:
args = options.ReadOptions(args) # args may be shorter after this
except getopt.GetoptError as e:
print("grit:", str(e))
print("Try running 'grit help' for valid options.")
return 1
if not args:
print("No tool provided. Try running 'grit help' for a list of tools.")
return 2
tool = args[0]
if tool == 'help':
if len(args) == 1:
PrintUsage()
return 0
else:
tool = args[1]
if not _GetToolInfo(tool):
print("No such tool. Try running 'grit help' for a list of tools.")
return 2
print("Help for 'grit %s' (for general help, run 'grit help'):\n" %
(tool,))
_GetToolInfo(tool)[_FACTORY]().ShowUsage()
return 0
if not _GetToolInfo(tool):
print("No such tool. Try running 'grit help' for a list of tools.")
return 2
try:
if _GetToolInfo(tool)[_REQUIRES_INPUT]:
os.stat(options.input)
except OSError:
print('Input file %s not found.\n'
'To specify a different input file:\n'
' 1. Use the GRIT_INPUT environment variable.\n'
' 2. Use the -i command-line option. This overrides '
'GRIT_INPUT.\n'
' 3. Specify neither GRIT_INPUT or -i and GRIT will try to load '
"'resource.grd'\n"
' from the current directory.' % options.input)
return 2
if options.hash:
grit.extern.FP.UseUnsignedFingerPrintFromModule(options.hash)
try:
toolobject = _GetToolInfo(tool)[_FACTORY]()
if options.profile_dest:
import hotshot
prof = hotshot.Profile(options.profile_dest)
return prof.runcall(toolobject.Run, options, args[1:])
else:
return toolobject.Run(options, args[1:])
except getopt.GetoptError as e:
print("grit: %s: %s" % (tool, str(e)))
print("Try running 'grit help %s' for valid options." % (tool,))
return 1
if __name__ == '__main__':
sys.path.append(
os.path.join(
os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),
'diagnosis'))
try:
import crbug_1001171
with crbug_1001171.DumpStateOnLookupError():
sys.exit(Main(sys.argv[1:]))
except ImportError:
pass
sys.exit(Main(sys.argv[1:]))

42
third_party/libwebrtc/tools/grit/grit/grit_runner_unittest.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,42 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grit.py'''
from __future__ import print_function
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import unittest
from six import StringIO
from grit import util
import grit.grit_runner
class OptionArgsUnittest(unittest.TestCase):
def setUp(self):
self.buf = StringIO()
self.old_stdout = sys.stdout
sys.stdout = self.buf
def tearDown(self):
sys.stdout = self.old_stdout
def testSimple(self):
grit.grit_runner.Main(['-i',
util.PathFromRoot('grit/testdata/simple-input.xml'),
'test', 'bla', 'voff', 'ga'])
output = self.buf.getvalue()
self.failUnless(output.count("'test'") == 0) # tool name doesn't occur
self.failUnless(output.count('bla'))
self.failUnless(output.count('simple-input.xml'))
if __name__ == '__main__':
unittest.main()

46
third_party/libwebrtc/tools/grit/grit/lazy_re.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,46 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''In GRIT, we used to compile a lot of regular expressions at parse
time. Since many of them never get used, we use lazy_re to compile
them on demand the first time they are used, thus speeding up startup
time in some cases.
'''
from __future__ import print_function
import re
class LazyRegexObject(object):
'''This object creates a RegexObject with the arguments passed in
its constructor, the first time any attribute except the several on
the class itself is accessed. This accomplishes lazy compilation of
the regular expression while maintaining a nearly-identical
interface.
'''
def __init__(self, *args, **kwargs):
self._stash_args = args
self._stash_kwargs = kwargs
self._lazy_re = None
def _LazyInit(self):
if not self._lazy_re:
self._lazy_re = re.compile(*self._stash_args, **self._stash_kwargs)
def __getattribute__(self, name):
if name in ('_LazyInit', '_lazy_re', '_stash_args', '_stash_kwargs'):
return object.__getattribute__(self, name)
else:
self._LazyInit()
return getattr(self._lazy_re, name)
def compile(*args, **kwargs):
'''Creates a LazyRegexObject that, when invoked on, will compile a
re.RegexObject (via re.compile) with the same arguments passed to
this function, and delegate almost all of its methods to it.
'''
return LazyRegexObject(*args, **kwargs)

40
third_party/libwebrtc/tools/grit/grit/lazy_re_unittest.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,40 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit test for lazy_re.
'''
from __future__ import print_function
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import re
import unittest
from grit import lazy_re
class LazyReUnittest(unittest.TestCase):
def testCreatedOnlyOnDemand(self):
rex = lazy_re.compile('bingo')
self.assertEqual(None, rex._lazy_re)
self.assertTrue(rex.match('bingo'))
self.assertNotEqual(None, rex._lazy_re)
def testJustKwargsWork(self):
rex = lazy_re.compile(flags=re.I, pattern='BiNgO')
self.assertTrue(rex.match('bingo'))
def testPositionalAndKwargsWork(self):
rex = lazy_re.compile('BiNgO', flags=re.I)
self.assertTrue(rex.match('bingo'))
if __name__ == '__main__':
unittest.main()

8
third_party/libwebrtc/tools/grit/grit/node/__init__.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,8 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Package 'grit.node'
'''
pass

670
third_party/libwebrtc/tools/grit/grit/node/base.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,670 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Base types for nodes in a GRIT resource tree.
'''
from __future__ import print_function
import ast
import os
import struct
import sys
from xml.sax import saxutils
import six
from grit import constants
from grit import clique
from grit import exception
from grit import util
from grit.node import brotli_util
import grit.format.gzip_string
class Node(object):
'''An item in the tree that has children.'''
# Valid content types that can be returned by _ContentType()
_CONTENT_TYPE_NONE = 0 # No CDATA content but may have children
_CONTENT_TYPE_CDATA = 1 # Only CDATA, no children.
_CONTENT_TYPE_MIXED = 2 # CDATA and children, possibly intermingled
# Types of files to be compressed by default.
_COMPRESS_BY_DEFAULT_EXTENSIONS = ('.js', '.html', '.css', '.svg')
# Default nodes to not whitelist skipped
_whitelist_marked_as_skip = False
# A class-static cache to speed up EvaluateExpression().
# Keys are expressions (e.g. 'is_ios and lang == "fr"'). Values are tuples
# (code, variables_in_expr) where code is the compiled expression and can be
# directly eval'd, and variables_in_expr is the list of variable and method
# names used in the expression (e.g. ['is_ios', 'lang']).
eval_expr_cache = {}
def __init__(self):
self.children = [] # A list of child elements
self.mixed_content = [] # A list of u'' and/or child elements (this
# duplicates 'children' but
# is needed to preserve markup-type content).
self.name = u'' # The name of this element
self.attrs = {} # The set of attributes (keys to values)
self.parent = None # Our parent unless we are the root element.
self.uberclique = None # Allows overriding uberclique for parts of tree
self.source = None # File that this node was parsed from
# This context handler allows you to write "with node:" and get a
# line identifying the offending node if an exception escapes from the body
# of the with statement.
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is not None:
print(u'Error processing node %s: %s' % (six.text_type(self), exc_value))
def __iter__(self):
'''A preorder iteration through the tree that this node is the root of.'''
return self.Preorder()
def Preorder(self):
'''Generator that generates first this node, then the same generator for
any child nodes.'''
yield self
for child in self.children:
for iterchild in child.Preorder():
yield iterchild
def ActiveChildren(self):
'''Returns the children of this node that should be included in the current
configuration. Overridden by <if>.'''
return [node for node in self.children if not node.WhitelistMarkedAsSkip()]
def ActiveDescendants(self):
'''Yields the current node and all descendants that should be included in
the current configuration, in preorder.'''
yield self
for child in self.ActiveChildren():
for descendant in child.ActiveDescendants():
yield descendant
def GetRoot(self):
'''Returns the root Node in the tree this Node belongs to.'''
curr = self
while curr.parent:
curr = curr.parent
return curr
# TODO(joi) Use this (currently untested) optimization?:
#if hasattr(self, '_root'):
# return self._root
#curr = self
#while curr.parent and not hasattr(curr, '_root'):
# curr = curr.parent
#if curr.parent:
# self._root = curr._root
#else:
# self._root = curr
#return self._root
def StartParsing(self, name, parent):
'''Called at the start of parsing.
Args:
name: u'elementname'
parent: grit.node.base.Node or subclass or None
'''
assert isinstance(name, six.string_types)
assert not parent or isinstance(parent, Node)
self.name = name
self.parent = parent
def AddChild(self, child):
'''Adds a child to the list of children of this node, if it is a valid
child for the node.'''
assert isinstance(child, Node)
if (not self._IsValidChild(child) or
self._ContentType() == self._CONTENT_TYPE_CDATA):
explanation = 'invalid child %s for parent %s' % (str(child), self.name)
raise exception.UnexpectedChild(explanation)
self.children.append(child)
self.mixed_content.append(child)
def RemoveChild(self, child_id):
'''Removes the first node that has a "name" attribute which
matches "child_id" in the list of immediate children of
this node.
Args:
child_id: String identifying the child to be removed
'''
index = 0
# Safe not to copy since we only remove the first element found
for child in self.children:
name_attr = child.attrs['name']
if name_attr == child_id:
self.children.pop(index)
self.mixed_content.pop(index)
break
index += 1
def AppendContent(self, content):
'''Appends a chunk of text as content of this node.
Args:
content: u'hello'
Return:
None
'''
assert isinstance(content, six.string_types)
if self._ContentType() != self._CONTENT_TYPE_NONE:
self.mixed_content.append(content)
elif content.strip() != '':
raise exception.UnexpectedContent()
def HandleAttribute(self, attrib, value):
'''Informs the node of an attribute that was parsed out of the GRD file
for it.
Args:
attrib: 'name'
value: 'fooblat'
Return:
None
'''
assert isinstance(attrib, six.string_types)
assert isinstance(value, six.string_types)
if self._IsValidAttribute(attrib, value):
self.attrs[attrib] = value
else:
raise exception.UnexpectedAttribute(attrib)
def EndParsing(self):
'''Called at the end of parsing.'''
# TODO(joi) Rewrite this, it's extremely ugly!
if len(self.mixed_content):
if isinstance(self.mixed_content[0], six.string_types):
# Remove leading and trailing chunks of pure whitespace.
while (len(self.mixed_content) and
isinstance(self.mixed_content[0], six.string_types) and
self.mixed_content[0].strip() == ''):
self.mixed_content = self.mixed_content[1:]
# Strip leading and trailing whitespace from mixed content chunks
# at front and back.
if (len(self.mixed_content) and
isinstance(self.mixed_content[0], six.string_types)):
self.mixed_content[0] = self.mixed_content[0].lstrip()
# Remove leading and trailing ''' (used to demarcate whitespace)
if (len(self.mixed_content) and
isinstance(self.mixed_content[0], six.string_types)):
if self.mixed_content[0].startswith("'''"):
self.mixed_content[0] = self.mixed_content[0][3:]
if len(self.mixed_content):
if isinstance(self.mixed_content[-1], six.string_types):
# Same stuff all over again for the tail end.
while (len(self.mixed_content) and
isinstance(self.mixed_content[-1], six.string_types) and
self.mixed_content[-1].strip() == ''):
self.mixed_content = self.mixed_content[:-1]
if (len(self.mixed_content) and
isinstance(self.mixed_content[-1], six.string_types)):
self.mixed_content[-1] = self.mixed_content[-1].rstrip()
if (len(self.mixed_content) and
isinstance(self.mixed_content[-1], six.string_types)):
if self.mixed_content[-1].endswith("'''"):
self.mixed_content[-1] = self.mixed_content[-1][:-3]
# Check that all mandatory attributes are there.
for node_mandatt in self.MandatoryAttributes():
mandatt_list = []
if node_mandatt.find('|') >= 0:
mandatt_list = node_mandatt.split('|')
else:
mandatt_list.append(node_mandatt)
mandatt_option_found = False
for mandatt in mandatt_list:
assert mandatt not in self.DefaultAttributes()
if mandatt in self.attrs:
if not mandatt_option_found:
mandatt_option_found = True
else:
raise exception.MutuallyExclusiveMandatoryAttribute(mandatt)
if not mandatt_option_found:
raise exception.MissingMandatoryAttribute(mandatt)
# Add default attributes if not specified in input file.
for defattr in self.DefaultAttributes():
if not defattr in self.attrs:
self.attrs[defattr] = self.DefaultAttributes()[defattr]
def GetCdata(self):
'''Returns all CDATA of this element, concatenated into a single
string. Note that this ignores any elements embedded in CDATA.'''
return ''.join([c for c in self.mixed_content
if isinstance(c, six.string_types)])
def __str__(self):
'''Returns this node and all nodes below it as an XML document in a Unicode
string.'''
header = u'<?xml version="1.0" encoding="UTF-8"?>\n'
return header + self.FormatXml()
# Some Python 2 glue.
__unicode__ = __str__
def FormatXml(self, indent = u'', one_line = False):
'''Returns this node and all nodes below it as an XML
element in a Unicode string. This differs from __unicode__ in that it does
not include the <?xml> stuff at the top of the string. If one_line is true,
children and CDATA are layed out in a way that preserves internal
whitespace.
'''
assert isinstance(indent, six.string_types)
content_one_line = (one_line or
self._ContentType() == self._CONTENT_TYPE_MIXED)
inside_content = self.ContentsAsXml(indent, content_one_line)
# Then the attributes for this node.
attribs = u''
default_attribs = self.DefaultAttributes()
for attrib, value in sorted(self.attrs.items()):
# Only print an attribute if it is other than the default value.
if attrib not in default_attribs or value != default_attribs[attrib]:
attribs += u' %s=%s' % (attrib, saxutils.quoteattr(value))
# Finally build the XML for our node and return it
if len(inside_content) > 0:
if one_line:
return u'<%s%s>%s</%s>' % (self.name, attribs, inside_content,
self.name)
elif content_one_line:
return u'%s<%s%s>\n%s %s\n%s</%s>' % (
indent, self.name, attribs,
indent, inside_content,
indent, self.name)
else:
return u'%s<%s%s>\n%s\n%s</%s>' % (
indent, self.name, attribs,
inside_content,
indent, self.name)
else:
return u'%s<%s%s />' % (indent, self.name, attribs)
def ContentsAsXml(self, indent, one_line):
'''Returns the contents of this node (CDATA and child elements) in XML
format. If 'one_line' is true, the content will be laid out on one line.'''
assert isinstance(indent, six.string_types)
# Build the contents of the element.
inside_parts = []
last_item = None
for mixed_item in self.mixed_content:
if isinstance(mixed_item, Node):
inside_parts.append(mixed_item.FormatXml(indent + u' ', one_line))
if not one_line:
inside_parts.append(u'\n')
else:
message = mixed_item
# If this is the first item and it starts with whitespace, we add
# the ''' delimiter.
if not last_item and message.lstrip() != message:
message = u"'''" + message
inside_parts.append(util.EncodeCdata(message))
last_item = mixed_item
# If there are only child nodes and no cdata, there will be a spurious
# trailing \n
if len(inside_parts) and inside_parts[-1] == '\n':
inside_parts = inside_parts[:-1]
# If the last item is a string (not a node) and ends with whitespace,
# we need to add the ''' delimiter.
if (isinstance(last_item, six.string_types) and
last_item.rstrip() != last_item):
inside_parts[-1] = inside_parts[-1] + u"'''"
return u''.join(inside_parts)
def SubstituteMessages(self, substituter):
'''Applies substitutions to all messages in the tree.
Called as a final step of RunGatherers.
Args:
substituter: a grit.util.Substituter object.
'''
for child in self.children:
child.SubstituteMessages(substituter)
def _IsValidChild(self, child):
'''Returns true if 'child' is a valid child of this node.
Overridden by subclasses.'''
return False
def _IsValidAttribute(self, name, value):
'''Returns true if 'name' is the name of a valid attribute of this element
and 'value' is a valid value for that attribute. Overriden by
subclasses unless they have only mandatory attributes.'''
return (name in self.MandatoryAttributes() or
name in self.DefaultAttributes())
def _ContentType(self):
'''Returns the type of content this element can have. Overridden by
subclasses. The content type can be one of the _CONTENT_TYPE_XXX constants
above.'''
return self._CONTENT_TYPE_NONE
def MandatoryAttributes(self):
'''Returns a list of attribute names that are mandatory (non-optional)
on the current element. One can specify a list of
"mutually exclusive mandatory" attributes by specifying them as one
element in the list, separated by a "|" character.
'''
return []
def DefaultAttributes(self):
'''Returns a dictionary of attribute names that have defaults, mapped to
the default value. Overridden by subclasses.'''
return {}
def GetCliques(self):
'''Returns all MessageClique objects belonging to this node. Overridden
by subclasses.
Return:
[clique1, clique2] or []
'''
return []
def ToRealPath(self, path_from_basedir):
'''Returns a real path (which can be absolute or relative to the current
working directory), given a path that is relative to the base directory
set for the GRIT input file.
Args:
path_from_basedir: '..'
Return:
'resource'
'''
return util.normpath(os.path.join(self.GetRoot().GetBaseDir(),
os.path.expandvars(path_from_basedir)))
def GetInputPath(self):
'''Returns a path, relative to the base directory set for the grd file,
that points to the file the node refers to.
'''
# This implementation works for most nodes that have an input file.
return self.attrs['file']
def UberClique(self):
'''Returns the uberclique that should be used for messages originating in
a given node. If the node itself has its uberclique set, that is what we
use, otherwise we search upwards until we find one. If we do not find one
even at the root node, we set the root node's uberclique to a new
uberclique instance.
'''
node = self
while not node.uberclique and node.parent:
node = node.parent
if not node.uberclique:
node.uberclique = clique.UberClique()
return node.uberclique
def IsTranslateable(self):
'''Returns false if the node has contents that should not be translated,
otherwise returns false (even if the node has no contents).
'''
if not 'translateable' in self.attrs:
return True
else:
return self.attrs['translateable'] == 'true'
def IsAccessibilityWithNoUI(self):
'''Returns true if the node is marked as an accessibility label and the
message isn't shown in the UI. Otherwise returns false. This label is
used to determine if the text requires screenshots.'''
if not 'is_accessibility_with_no_ui' in self.attrs:
return False
else:
return self.attrs['is_accessibility_with_no_ui'] == 'true'
def GetNodeById(self, id):
'''Returns the node in the subtree parented by this node that has a 'name'
attribute matching 'id'. Returns None if no such node is found.
'''
for node in self:
if 'name' in node.attrs and node.attrs['name'] == id:
return node
return None
def GetChildrenOfType(self, type):
'''Returns a list of all subnodes (recursing to all leaves) of this node
that are of the indicated type (or tuple of types).
Args:
type: A type you could use with isinstance().
Return:
A list, possibly empty.
'''
return [child for child in self if isinstance(child, type)]
def GetTextualIds(self):
'''Returns a list of the textual ids of this node.
'''
if 'name' in self.attrs:
return [self.attrs['name']]
return []
@classmethod
def EvaluateExpression(cls, expr, defs, target_platform, extra_variables={}):
'''Worker for EvaluateCondition (below) and conditions in XTB files.'''
if expr in cls.eval_expr_cache:
code, variables_in_expr = cls.eval_expr_cache[expr]
else:
# Get a list of all variable and method names used in the expression.
syntax_tree = ast.parse(expr, mode='eval')
variables_in_expr = [node.id for node in ast.walk(syntax_tree) if
isinstance(node, ast.Name) and node.id not in ('True', 'False')]
code = compile(syntax_tree, filename='<string>', mode='eval')
cls.eval_expr_cache[expr] = code, variables_in_expr
# Set values only for variables that are needed to eval the expression.
variable_map = {}
for name in variables_in_expr:
if name == 'os':
value = target_platform
elif name == 'defs':
value = defs
elif name == 'is_linux':
value = target_platform.startswith('linux')
elif name == 'is_macosx':
value = target_platform == 'darwin'
elif name == 'is_win':
value = target_platform in ('cygwin', 'win32')
elif name == 'is_android':
value = target_platform == 'android'
elif name == 'is_ios':
value = target_platform == 'ios'
elif name == 'is_bsd':
value = 'bsd' in target_platform
elif name == 'is_posix':
value = (target_platform in ('darwin', 'linux2', 'linux3', 'sunos5',
'android', 'ios')
or 'bsd' in target_platform)
elif name == 'pp_ifdef':
def pp_ifdef(symbol):
return symbol in defs
value = pp_ifdef
elif name == 'pp_if':
def pp_if(symbol):
return defs.get(symbol, False)
value = pp_if
elif name in defs:
value = defs[name]
elif name in extra_variables:
value = extra_variables[name]
else:
# Undefined variables default to False.
value = False
variable_map[name] = value
eval_result = eval(code, {}, variable_map)
assert isinstance(eval_result, bool)
return eval_result
def EvaluateCondition(self, expr):
'''Returns true if and only if the Python expression 'expr' evaluates
to true.
The expression is given a few local variables:
- 'lang' is the language currently being output
(the 'lang' attribute of the <output> element).
- 'context' is the current output context
(the 'context' attribute of the <output> element).
- 'defs' is a map of C preprocessor-style symbol names to their values.
- 'os' is the current platform (likely 'linux2', 'win32' or 'darwin').
- 'pp_ifdef(symbol)' is a shorthand for "symbol in defs".
- 'pp_if(symbol)' is a shorthand for "symbol in defs and defs[symbol]".
- 'is_linux', 'is_macosx', 'is_win', 'is_posix' are true if 'os'
matches the given platform.
'''
root = self.GetRoot()
lang = getattr(root, 'output_language', '')
context = getattr(root, 'output_context', '')
defs = getattr(root, 'defines', {})
target_platform = getattr(root, 'target_platform', '')
extra_variables = {
'lang': lang,
'context': context,
}
return Node.EvaluateExpression(
expr, defs, target_platform, extra_variables)
def OnlyTheseTranslations(self, languages):
'''Turns off loading of translations for languages not in the provided list.
Attrs:
languages: ['fr', 'zh_cn']
'''
for node in self:
if (hasattr(node, 'IsTranslation') and
node.IsTranslation() and
node.GetLang() not in languages):
node.DisableLoading()
def FindBooleanAttribute(self, attr, default, skip_self):
'''Searches all ancestors of the current node for the nearest enclosing
definition of the given boolean attribute.
Args:
attr: 'fallback_to_english'
default: What to return if no node defines the attribute.
skip_self: Don't check the current node, only its parents.
'''
p = self.parent if skip_self else self
while p:
value = p.attrs.get(attr, 'default').lower()
if value != 'default':
return (value == 'true')
p = p.parent
return default
def PseudoIsAllowed(self):
'''Returns true if this node is allowed to use pseudo-translations. This
is true by default, unless this node is within a <release> node that has
the allow_pseudo attribute set to false.
'''
return self.FindBooleanAttribute('allow_pseudo',
default=True, skip_self=True)
def ShouldFallbackToEnglish(self):
'''Returns true iff this node should fall back to English when
pseudotranslations are disabled and no translation is available for a
given message.
'''
return self.FindBooleanAttribute('fallback_to_english',
default=False, skip_self=True)
def WhitelistMarkedAsSkip(self):
'''Returns true if the node is marked to be skipped in the output by a
whitelist.
'''
return self._whitelist_marked_as_skip
def SetWhitelistMarkedAsSkip(self, mark_skipped):
'''Sets WhitelistMarkedAsSkip.
'''
self._whitelist_marked_as_skip = mark_skipped
def ExpandVariables(self):
'''Whether we need to expand variables on a given node.'''
return False
def IsResourceMapSource(self):
'''Whether this node is a resource map source.'''
return False
def CompressDataIfNeeded(self, data):
'''Compress data using the format specified in the compress attribute.
Args:
data: The data to compressed.
Returns:
The data in gzipped or brotli compressed format. If the format is
unspecified then this returns the data uncompressed.
'''
compress = self.attrs.get('compress')
# Compress JS, HTML, CSS and SVG files by default (gzip), unless |compress|
# is explicitly specified.
compress_by_default = (compress == 'default'
and self.attrs.get('file').endswith(
self._COMPRESS_BY_DEFAULT_EXTENSIONS))
if compress == 'gzip' or compress_by_default:
# We only use rsyncable compression on Linux.
# We exclude ChromeOS since ChromeOS bots are Linux based but do not have
# the --rsyncable option built in for gzip. See crbug.com/617950.
if sys.platform == 'linux2' and 'chromeos' not in self.GetRoot().defines:
return grit.format.gzip_string.GzipStringRsyncable(data)
return grit.format.gzip_string.GzipString(data)
if compress == 'brotli':
# The length of the uncompressed data as 8 bytes little-endian.
size_bytes = struct.pack("<q", len(data))
data = brotli_util.BrotliCompress(data)
# BROTLI_CONST is prepended to brotli decompressed data in order to
# easily check if a resource has been brotli compressed.
# The length of the uncompressed data is also appended to the start,
# truncated to 6 bytes, little-endian. size_bytes is 8 bytes,
# need to truncate further to 6.
formatter = b'%ds %dx %ds' % (6, 2, len(size_bytes) - 8)
return (constants.BROTLI_CONST +
b''.join(struct.unpack(formatter, size_bytes)) +
data)
if compress == 'false' or compress == 'default':
return data
raise Exception('Invalid value for compression')
class ContentNode(Node):
'''Convenience baseclass for nodes that can have content.'''
def _ContentType(self):
return self._CONTENT_TYPE_MIXED

259
third_party/libwebrtc/tools/grit/grit/node/base_unittest.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,259 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for base.Node functionality (as used in various subclasses)'''
from __future__ import print_function
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
from six import StringIO
from grit import grd_reader
from grit import util
from grit.node import base
from grit.node import message
def MakePlaceholder(phname='BINGO'):
ph = message.PhNode()
ph.StartParsing(u'ph', None)
ph.HandleAttribute(u'name', phname)
ph.AppendContent(u'bongo')
ph.EndParsing()
return ph
class NodeUnittest(unittest.TestCase):
def testWhitespaceHandling(self):
# We test using the Message node type.
node = message.MessageNode()
node.StartParsing(u'hello', None)
node.HandleAttribute(u'name', u'bla')
node.AppendContent(u" ''' two spaces ")
node.EndParsing()
self.failUnless(node.GetCdata() == u' two spaces')
node = message.MessageNode()
node.StartParsing(u'message', None)
node.HandleAttribute(u'name', u'bla')
node.AppendContent(u" two spaces ''' ")
node.EndParsing()
self.failUnless(node.GetCdata() == u'two spaces ')
def testWhitespaceHandlingWithChildren(self):
# We test using the Message node type.
node = message.MessageNode()
node.StartParsing(u'message', None)
node.HandleAttribute(u'name', u'bla')
node.AppendContent(u" ''' two spaces ")
node.AddChild(MakePlaceholder())
node.AppendContent(u' space before and after ')
node.AddChild(MakePlaceholder('BONGO'))
node.AppendContent(u" space before two after '''")
node.EndParsing()
self.failUnless(node.mixed_content[0] == u' two spaces ')
self.failUnless(node.mixed_content[2] == u' space before and after ')
self.failUnless(node.mixed_content[-1] == u' space before two after ')
def testXmlFormatMixedContent(self):
# Again test using the Message node type, because it is the only mixed
# content node.
node = message.MessageNode()
node.StartParsing(u'message', None)
node.HandleAttribute(u'name', u'name')
node.AppendContent(u'Hello <young> ')
ph = message.PhNode()
ph.StartParsing(u'ph', None)
ph.HandleAttribute(u'name', u'USERNAME')
ph.AppendContent(u'$1')
ex = message.ExNode()
ex.StartParsing(u'ex', None)
ex.AppendContent(u'Joi')
ex.EndParsing()
ph.AddChild(ex)
ph.EndParsing()
node.AddChild(ph)
node.EndParsing()
non_indented_xml = node.FormatXml()
self.failUnless(non_indented_xml == u'<message name="name">\n Hello '
u'&lt;young&gt; <ph name="USERNAME">$1<ex>Joi</ex></ph>'
u'\n</message>')
indented_xml = node.FormatXml(u' ')
self.failUnless(indented_xml == u' <message name="name">\n Hello '
u'&lt;young&gt; <ph name="USERNAME">$1<ex>Joi</ex></ph>'
u'\n </message>')
def testXmlFormatMixedContentWithLeadingWhitespace(self):
# Again test using the Message node type, because it is the only mixed
# content node.
node = message.MessageNode()
node.StartParsing(u'message', None)
node.HandleAttribute(u'name', u'name')
node.AppendContent(u"''' Hello <young> ")
ph = message.PhNode()
ph.StartParsing(u'ph', None)
ph.HandleAttribute(u'name', u'USERNAME')
ph.AppendContent(u'$1')
ex = message.ExNode()
ex.StartParsing(u'ex', None)
ex.AppendContent(u'Joi')
ex.EndParsing()
ph.AddChild(ex)
ph.EndParsing()
node.AddChild(ph)
node.AppendContent(u" yessiree '''")
node.EndParsing()
non_indented_xml = node.FormatXml()
self.failUnless(non_indented_xml ==
u"<message name=\"name\">\n ''' Hello"
u' &lt;young&gt; <ph name="USERNAME">$1<ex>Joi</ex></ph>'
u" yessiree '''\n</message>")
indented_xml = node.FormatXml(u' ')
self.failUnless(indented_xml ==
u" <message name=\"name\">\n ''' Hello"
u' &lt;young&gt; <ph name="USERNAME">$1<ex>Joi</ex></ph>'
u" yessiree '''\n </message>")
self.failUnless(node.GetNodeById('name'))
def testXmlFormatContentWithEntities(self):
'''Tests a bug where &nbsp; would not be escaped correctly.'''
from grit import tclib
msg_node = message.MessageNode.Construct(None, tclib.Message(
text = 'BEGIN_BOLDHelloWHITESPACEthere!END_BOLD Bingo!',
placeholders = [
tclib.Placeholder('BEGIN_BOLD', '<b>', 'bla'),
tclib.Placeholder('WHITESPACE', '&nbsp;', 'bla'),
tclib.Placeholder('END_BOLD', '</b>', 'bla')]),
'BINGOBONGO')
xml = msg_node.FormatXml()
self.failUnless(xml.find('&nbsp;') == -1, 'should have no entities')
def testIter(self):
# First build a little tree of message and ph nodes.
node = message.MessageNode()
node.StartParsing(u'message', None)
node.HandleAttribute(u'name', u'bla')
node.AppendContent(u" ''' two spaces ")
node.AppendContent(u' space before and after ')
ph = message.PhNode()
ph.StartParsing(u'ph', None)
ph.AddChild(message.ExNode())
ph.HandleAttribute(u'name', u'BINGO')
ph.AppendContent(u'bongo')
node.AddChild(ph)
node.AddChild(message.PhNode())
node.AppendContent(u" space before two after '''")
order = [message.MessageNode, message.PhNode, message.ExNode, message.PhNode]
for n in node:
self.failUnless(type(n) == order[0])
order = order[1:]
self.failUnless(len(order) == 0)
def testGetChildrenOfType(self):
xml = '''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US"
current_release="3" base_dir=".">
<outputs>
<output filename="resource.h" type="rc_header" />
<output filename="en/generated_resources.rc" type="rc_all"
lang="en" />
<if expr="pp_if('NOT_TRUE')">
<output filename="de/generated_resources.rc" type="rc_all"
lang="de" />
</if>
</outputs>
<release seq="3">
<messages>
<message name="ID_HELLO">Hello!</message>
</messages>
</release>
</grit>'''
grd = grd_reader.Parse(StringIO(xml),
util.PathFromRoot('grit/test/data'))
from grit.node import node_io
output_nodes = grd.GetChildrenOfType(node_io.OutputNode)
self.failUnlessEqual(len(output_nodes), 3)
self.failUnlessEqual(output_nodes[2].attrs['filename'],
'de/generated_resources.rc')
def testEvaluateExpression(self):
def AssertExpr(expected_value, expr, defs, target_platform,
extra_variables):
self.failUnlessEqual(expected_value, base.Node.EvaluateExpression(
expr, defs, target_platform, extra_variables))
AssertExpr(True, "True", {}, 'linux', {})
AssertExpr(False, "False", {}, 'linux', {})
AssertExpr(True, "True or False", {}, 'linux', {})
AssertExpr(False, "True and False", {}, 'linux', {})
AssertExpr(True, "os == 'linux'", {}, 'linux', {})
AssertExpr(False, "os == 'linux'", {}, 'ios', {})
AssertExpr(True, "'foo' in defs", {'foo': 'bar'}, 'ios', {})
AssertExpr(False, "'foo' in defs", {'baz': 'bar'}, 'ios', {})
AssertExpr(False, "'foo' in defs", {}, 'ios', {})
AssertExpr(True, "is_linux", {}, 'linux2', {})
AssertExpr(False, "is_linux", {}, 'win32', {})
AssertExpr(True, "is_macosx", {}, 'darwin', {})
AssertExpr(False, "is_macosx", {}, 'ios', {})
AssertExpr(True, "is_win", {}, 'win32', {})
AssertExpr(False, "is_win", {}, 'darwin', {})
AssertExpr(True, "is_android", {}, 'android', {})
AssertExpr(False, "is_android", {}, 'linux3', {})
AssertExpr(True, "is_ios", {}, 'ios', {})
AssertExpr(False, "is_ios", {}, 'darwin', {})
AssertExpr(True, "is_posix", {}, 'linux2', {})
AssertExpr(True, "is_posix", {}, 'darwin', {})
AssertExpr(True, "is_posix", {}, 'android', {})
AssertExpr(True, "is_posix", {}, 'ios', {})
AssertExpr(True, "is_posix", {}, 'freebsd7', {})
AssertExpr(False, "is_posix", {}, 'win32', {})
AssertExpr(True, "pp_ifdef('foo')", {'foo': True}, 'win32', {})
AssertExpr(True, "pp_ifdef('foo')", {'foo': False}, 'win32', {})
AssertExpr(False, "pp_ifdef('foo')", {'bar': True}, 'win32', {})
AssertExpr(True, "pp_if('foo')", {'foo': True}, 'win32', {})
AssertExpr(False, "pp_if('foo')", {'foo': False}, 'win32', {})
AssertExpr(False, "pp_if('foo')", {'bar': True}, 'win32', {})
AssertExpr(True, "foo", {'foo': True}, 'win32', {})
AssertExpr(False, "foo", {'foo': False}, 'win32', {})
AssertExpr(False, "foo", {'bar': True}, 'win32', {})
AssertExpr(True, "foo == 'baz'", {'foo': 'baz'}, 'win32', {})
AssertExpr(False, "foo == 'baz'", {'foo': True}, 'win32', {})
AssertExpr(False, "foo == 'baz'", {}, 'win32', {})
AssertExpr(True, "lang == 'de'", {}, 'win32', {'lang': 'de'})
AssertExpr(False, "lang == 'de'", {}, 'win32', {'lang': 'fr'})
AssertExpr(False, "lang == 'de'", {}, 'win32', {})
# Test a couple more complex expressions for good measure.
AssertExpr(True, "is_ios and (lang in ['de', 'fr'] or foo)",
{'foo': 'bar'}, 'ios', {'lang': 'fr', 'context': 'today'})
AssertExpr(False, "is_ios and (lang in ['de', 'fr'] or foo)",
{'foo': False}, 'linux2', {'lang': 'fr', 'context': 'today'})
AssertExpr(False, "is_ios and (lang in ['de', 'fr'] or foo)",
{'baz': 'bar'}, 'ios', {'lang': 'he', 'context': 'today'})
AssertExpr(True, "foo == 'bar' or not baz",
{'foo': 'bar', 'fun': True}, 'ios', {'lang': 'en'})
AssertExpr(True, "foo == 'bar' or not baz",
{}, 'ios', {'lang': 'en', 'context': 'java'})
AssertExpr(False, "foo == 'bar' or not baz",
{'foo': 'ruz', 'baz': True}, 'ios', {'lang': 'en'})
if __name__ == '__main__':
unittest.main()

29
third_party/libwebrtc/tools/grit/grit/node/brotli_util.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,29 @@
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Framework for compressing resources using Brotli."""
import subprocess
__brotli_executable = None
def SetBrotliCommand(brotli):
# brotli is a list. In production it contains the path to the Brotli executable.
# During testing it contains [python, mock_brotli.py] for testing on Windows.
global __brotli_executable
__brotli_executable = brotli
def BrotliCompress(data):
if not __brotli_executable:
raise Exception('Add "use_brotli = true" to you GN grit(...) target ' +
'if you want to use brotli.')
compress = subprocess.Popen(__brotli_executable + ['-', '-f'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
return compress.communicate(data)[0]
def IsInitialized():
global __brotli_executable
return __brotli_executable is not None

8
third_party/libwebrtc/tools/grit/grit/node/custom/__init__.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,8 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Package 'grit.node.custom'
'''
pass

29
third_party/libwebrtc/tools/grit/grit/node/custom/filename.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,29 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''A CustomType for filenames.'''
from __future__ import print_function
from grit import clique
from grit import lazy_re
class WindowsFilename(clique.CustomType):
'''Validates that messages can be used as Windows filenames, and strips
illegal characters out of translations.
'''
BANNED = lazy_re.compile(r'\+|:|\/|\\\\|\*|\?|\"|\<|\>|\|')
def Validate(self, message):
return not self.BANNED.search(message.GetPresentableContent())
def ValidateAndModify(self, lang, translation):
is_ok = self.Validate(translation)
self.ModifyEachTextPart(lang, translation)
return is_ok
def ModifyTextPart(self, lang, text):
return self.BANNED.sub(' ', text)

Просмотреть файл

@ -0,0 +1,34 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grit.node.custom.filename'''
from __future__ import print_function
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../../..'))
import unittest
from grit.node.custom import filename
from grit import clique
from grit import tclib
class WindowsFilenameUnittest(unittest.TestCase):
def testValidate(self):
factory = clique.UberClique()
msg = tclib.Message(text='Bingo bongo')
c = factory.MakeClique(msg)
c.SetCustomType(filename.WindowsFilename())
translation = tclib.Translation(id=msg.GetId(), text='Bilingo bolongo:')
c.AddTranslation(translation, 'fr')
self.failUnless(c.MessageForLanguage('fr').GetRealContent() == 'Bilingo bolongo ')
if __name__ == '__main__':
unittest.main()

64
third_party/libwebrtc/tools/grit/grit/node/empty.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,64 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Container nodes that don't have any logic.
'''
from __future__ import print_function
from grit.node import base
from grit.node import include
from grit.node import message
from grit.node import misc
from grit.node import node_io
from grit.node import structure
class GroupingNode(base.Node):
'''Base class for all the grouping elements (<structures>, <includes>,
<messages> and <identifiers>).'''
def DefaultAttributes(self):
return {
'first_id' : '',
'comment' : '',
'fallback_to_english' : 'false',
'fallback_to_low_resolution' : 'false',
}
class IncludesNode(GroupingNode):
'''The <includes> element.'''
def _IsValidChild(self, child):
return isinstance(child, (include.IncludeNode, misc.IfNode, misc.PartNode))
class MessagesNode(GroupingNode):
'''The <messages> element.'''
def _IsValidChild(self, child):
return isinstance(child, (message.MessageNode, misc.IfNode, misc.PartNode))
class StructuresNode(GroupingNode):
'''The <structures> element.'''
def _IsValidChild(self, child):
return isinstance(child, (structure.StructureNode,
misc.IfNode, misc.PartNode))
class TranslationsNode(base.Node):
'''The <translations> element.'''
def _IsValidChild(self, child):
return isinstance(child, (node_io.FileNode, misc.IfNode, misc.PartNode))
class OutputsNode(base.Node):
'''The <outputs> element.'''
def _IsValidChild(self, child):
return isinstance(child, (node_io.OutputNode, misc.IfNode, misc.PartNode))
class IdentifiersNode(GroupingNode):
'''The <identifiers> element.'''
def _IsValidChild(self, child):
return isinstance(child, misc.IdentifierNode)

170
third_party/libwebrtc/tools/grit/grit/node/include.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,170 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Handling of the <include> element.
"""
from __future__ import print_function
import os
from grit import util
import grit.format.html_inline
import grit.format.rc
from grit.format import minifier
from grit.node import base
class IncludeNode(base.Node):
"""An <include> element."""
def __init__(self):
super(IncludeNode, self).__init__()
# Cache flattened data so that we don't flatten the same file
# multiple times.
self._flattened_data = None
# Also keep track of the last filename we flattened to, so we can
# avoid doing it more than once.
self._last_flat_filename = None
def _IsValidChild(self, child):
return False
def _GetFlattenedData(
self, allow_external_script=False, preprocess_only=False):
if not self._flattened_data:
filename = self.ToRealPath(self.GetInputPath())
self._flattened_data = (
grit.format.html_inline.InlineToString(filename, self,
preprocess_only=preprocess_only,
allow_external_script=allow_external_script))
return self._flattened_data.encode('utf-8')
def MandatoryAttributes(self):
return ['name', 'type', 'file']
def DefaultAttributes(self):
"""Attributes:
translateable: False if the node has contents that should not be
translated.
preprocess: Takes the same code path as flattenhtml, but it
disables any processing/inlining outside of <if>
and <include>.
compress: The format to compress the data with, e.g. 'gzip'
or 'false' if data should not be compressed.
skip_minify: If true, skips minifying the node's contents.
skip_in_resource_map: If true, do not add to the resource map.
"""
return {
'translateable': 'true',
'generateid': 'true',
'filenameonly': 'false',
'mkoutput': 'false',
'preprocess': 'false',
'flattenhtml': 'false',
'compress': 'default',
'allowexternalscript': 'false',
'relativepath': 'false',
'use_base_dir': 'true',
'skip_minify': 'false',
'skip_in_resource_map': 'false',
}
def GetInputPath(self):
# Do not mess with absolute paths, that would make them invalid.
if os.path.isabs(os.path.expandvars(self.attrs['file'])):
return self.attrs['file']
# We have no control over code that calls ToRealPath later, so convert
# the path to be relative against our basedir.
if self.attrs.get('use_base_dir', 'true') != 'true':
# Normalize the directory path to use the appropriate OS separator.
# GetBaseDir() may return paths\like\this or paths/like/this, since it is
# read from the base_dir attribute in the grd file.
norm_base_dir = util.normpath(self.GetRoot().GetBaseDir())
return os.path.relpath(self.attrs['file'], norm_base_dir)
return self.attrs['file']
def FileForLanguage(self, lang, output_dir):
"""Returns the file for the specified language. This allows us to return
different files for different language variants of the include file.
"""
input_path = self.GetInputPath()
if input_path is None:
return None
return self.ToRealPath(input_path)
def GetDataPackValue(self, lang, encoding):
'''Returns bytes or a str represenation for a data_pack entry.'''
filename = self.ToRealPath(self.GetInputPath())
if self.attrs['flattenhtml'] == 'true':
allow_external_script = self.attrs['allowexternalscript'] == 'true'
data = self._GetFlattenedData(allow_external_script=allow_external_script)
elif self.attrs['preprocess'] == 'true':
data = self._GetFlattenedData(preprocess_only=True)
else:
data = util.ReadFile(filename, util.BINARY)
if self.attrs['skip_minify'] != 'true':
# Note that the minifier will only do anything if a minifier command
# has been set in the command line.
data = minifier.Minify(data, filename)
# Include does not care about the encoding, because it only returns binary
# data.
return self.CompressDataIfNeeded(data)
def Process(self, output_dir):
"""Rewrite file references to be base64 encoded data URLs. The new file
will be written to output_dir and the name of the new file is returned."""
filename = self.ToRealPath(self.GetInputPath())
flat_filename = os.path.join(output_dir,
self.attrs['name'] + '_' + os.path.basename(filename))
if self._last_flat_filename == flat_filename:
return
with open(flat_filename, 'wb') as outfile:
outfile.write(self._GetFlattenedData())
self._last_flat_filename = flat_filename
return os.path.basename(flat_filename)
def GetHtmlResourceFilenames(self):
"""Returns a set of all filenames inlined by this file."""
allow_external_script = self.attrs['allowexternalscript'] == 'true'
return grit.format.html_inline.GetResourceFilenames(
self.ToRealPath(self.GetInputPath()),
self,
allow_external_script=allow_external_script)
def IsResourceMapSource(self):
skip = self.attrs.get('skip_in_resource_map', 'false') == 'true'
return not skip
@staticmethod
def Construct(parent, name, type, file, translateable=True,
filenameonly=False, mkoutput=False, relativepath=False):
"""Creates a new node which is a child of 'parent', with attributes set
by parameters of the same name.
"""
# Convert types to appropriate strings
translateable = util.BoolToString(translateable)
filenameonly = util.BoolToString(filenameonly)
mkoutput = util.BoolToString(mkoutput)
relativepath = util.BoolToString(relativepath)
node = IncludeNode()
node.StartParsing('include', parent)
node.HandleAttribute('name', name)
node.HandleAttribute('type', type)
node.HandleAttribute('file', file)
node.HandleAttribute('translateable', translateable)
node.HandleAttribute('filenameonly', filenameonly)
node.HandleAttribute('mkoutput', mkoutput)
node.HandleAttribute('relativepath', relativepath)
node.EndParsing()
return node

134
third_party/libwebrtc/tools/grit/grit/node/include_unittest.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,134 @@
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for include.IncludeNode'''
from __future__ import print_function
import os
import sys
import unittest
import zlib
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
from grit.node import misc
from grit.node import include
from grit.node import empty
from grit import util
def checkIsGzipped(filename, compress_attr):
test_data_root = util.PathFromRoot('grit/testdata')
root = util.ParseGrdForUnittest(
'''
<includes>
<include name="TEST_TXT" file="%s" %s type="BINDATA"/>
</includes>''' % (filename, compress_attr),
base_dir=test_data_root)
node, = root.GetChildrenOfType(include.IncludeNode)
compressed = node.GetDataPackValue(lang='en', encoding=util.BINARY)
decompressed_data = zlib.decompress(compressed, 16 + zlib.MAX_WBITS)
expected = util.ReadFile(os.path.join(test_data_root, filename), util.BINARY)
return expected == decompressed_data
class IncludeNodeUnittest(unittest.TestCase):
def testGetPath(self):
root = misc.GritNode()
root.StartParsing(u'grit', None)
root.HandleAttribute(u'latest_public_release', u'0')
root.HandleAttribute(u'current_release', u'1')
root.HandleAttribute(u'base_dir', r'..\resource')
release = misc.ReleaseNode()
release.StartParsing(u'release', root)
release.HandleAttribute(u'seq', u'1')
root.AddChild(release)
includes = empty.IncludesNode()
includes.StartParsing(u'includes', release)
release.AddChild(includes)
include_node = include.IncludeNode()
include_node.StartParsing(u'include', includes)
include_node.HandleAttribute(u'file', r'flugel\kugel.pdf')
includes.AddChild(include_node)
root.EndParsing()
self.assertEqual(root.ToRealPath(include_node.GetInputPath()),
util.normpath(
os.path.join(r'../resource', r'flugel/kugel.pdf')))
def testGetPathNoBasedir(self):
root = misc.GritNode()
root.StartParsing(u'grit', None)
root.HandleAttribute(u'latest_public_release', u'0')
root.HandleAttribute(u'current_release', u'1')
root.HandleAttribute(u'base_dir', r'..\resource')
release = misc.ReleaseNode()
release.StartParsing(u'release', root)
release.HandleAttribute(u'seq', u'1')
root.AddChild(release)
includes = empty.IncludesNode()
includes.StartParsing(u'includes', release)
release.AddChild(includes)
include_node = include.IncludeNode()
include_node.StartParsing(u'include', includes)
include_node.HandleAttribute(u'file', r'flugel\kugel.pdf')
include_node.HandleAttribute(u'use_base_dir', u'false')
includes.AddChild(include_node)
root.EndParsing()
last_dir = os.path.basename(os.getcwd())
expected_path = util.normpath(os.path.join(
u'..', last_dir, u'flugel/kugel.pdf'))
self.assertEqual(root.ToRealPath(include_node.GetInputPath()),
expected_path)
def testCompressGzip(self):
self.assertTrue(checkIsGzipped('test_text.txt', 'compress="gzip"'))
def testCompressGzipByDefault(self):
self.assertTrue(checkIsGzipped('test_html.html', ''))
self.assertTrue(checkIsGzipped('test_js.js', ''))
self.assertTrue(checkIsGzipped('test_css.css', ''))
self.assertTrue(checkIsGzipped('test_svg.svg', ''))
self.assertTrue(checkIsGzipped('test_html.html', 'compress="default"'))
self.assertTrue(checkIsGzipped('test_js.js', 'compress="default"'))
self.assertTrue(checkIsGzipped('test_css.css', 'compress="default"'))
self.assertTrue(checkIsGzipped('test_svg.svg', 'compress="default"'))
def testSkipInResourceMap(self):
root = util.ParseGrdForUnittest('''
<includes>
<include name="TEST1_TXT" file="test1_text.txt" type="BINDATA"/>
<include name="TEST2_TXT" file="test1_text.txt" type="BINDATA"
skip_in_resource_map="true"/>
<include name="TEST3_TXT" file="test1_text.txt" type="BINDATA"
skip_in_resource_map="false"/>
</includes>''', base_dir = util.PathFromRoot('grit/testdata'))
inc = root.GetChildrenOfType(include.IncludeNode)
self.assertTrue(inc[0].IsResourceMapSource())
self.assertFalse(inc[1].IsResourceMapSource())
self.assertTrue(inc[2].IsResourceMapSource())
def testAcceptsPreprocess(self):
root = util.ParseGrdForUnittest(
'''
<includes>
<include name="PREPROCESS_TEST" file="preprocess_test.html"
preprocess="true" compress="false" type="chrome_html"/>
</includes>''',
base_dir=util.PathFromRoot('grit/testdata'))
inc, = root.GetChildrenOfType(include.IncludeNode)
result = inc.GetDataPackValue(lang='en', encoding=util.BINARY)
self.assertIn(b'should be kept', result)
self.assertIn(b'in the middle...', result)
self.assertNotIn(b'should be removed', result)
if __name__ == '__main__':
unittest.main()

60
third_party/libwebrtc/tools/grit/grit/node/mapping.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,60 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Maps each node type to an implementation class.
When adding a new node type, you add to this mapping.
'''
from __future__ import print_function
from grit import exception
from grit.node import empty
from grit.node import include
from grit.node import message
from grit.node import misc
from grit.node import node_io
from grit.node import structure
from grit.node import variant
_ELEMENT_TO_CLASS = {
'identifiers' : empty.IdentifiersNode,
'includes' : empty.IncludesNode,
'messages' : empty.MessagesNode,
'outputs' : empty.OutputsNode,
'structures' : empty.StructuresNode,
'translations' : empty.TranslationsNode,
'include' : include.IncludeNode,
'emit' : node_io.EmitNode,
'file' : node_io.FileNode,
'output' : node_io.OutputNode,
'ex' : message.ExNode,
'message' : message.MessageNode,
'ph' : message.PhNode,
'else' : misc.ElseNode,
'grit' : misc.GritNode,
'identifier' : misc.IdentifierNode,
'if' : misc.IfNode,
'part' : misc.PartNode,
'release' : misc.ReleaseNode,
'then' : misc.ThenNode,
'structure' : structure.StructureNode,
'skeleton' : variant.SkeletonNode,
}
def ElementToClass(name, typeattr):
'''Maps an element to a class that handles the element.
Args:
name: 'element' (the name of the element)
typeattr: 'type' (the value of the type attribute, if present, else None)
Return:
type
'''
if name not in _ELEMENT_TO_CLASS:
raise exception.UnknownElement()
return _ELEMENT_TO_CLASS[name]

362
third_party/libwebrtc/tools/grit/grit/node/message.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,362 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Handling of the <message> element.
'''
from __future__ import print_function
import re
import six
from grit.node import base
from grit import clique
from grit import exception
from grit import lazy_re
from grit import tclib
from grit import util
# Matches exactly three dots ending a line or followed by whitespace.
_ELLIPSIS_PATTERN = lazy_re.compile(r'(?<!\.)\.\.\.(?=$|\s)')
_ELLIPSIS_SYMBOL = u'\u2026' # Ellipsis
# Finds whitespace at the start and end of a string which can be multiline.
_WHITESPACE = lazy_re.compile(r'(?P<start>\s*)(?P<body>.+?)(?P<end>\s*)\Z',
re.DOTALL | re.MULTILINE)
# <ph> placeholder elements should contain the special character formatters
# used to format <ph> element content.
# Android format.
_ANDROID_FORMAT = (r'%[1-9]+\$'
r'([-#+ 0,(]*)([0-9]+)?(\.[0-9]+)?'
r'([bBhHsScCdoxXeEfgGaAtT%n])')
# Chrome l10n format.
_CHROME_FORMAT = r'\$+\d'
# Windows EWT numeric and GRIT %s %d formats.
_OTHER_FORMAT = r'%[0-9sd]'
# Finds formatters that must be in a placeholder (<ph>) element.
_FORMATTERS = lazy_re.compile(
'(%s)|(%s)|(%s)' % (_ANDROID_FORMAT, _CHROME_FORMAT, _OTHER_FORMAT))
_BAD_PLACEHOLDER_MSG = ('ERROR: Placeholder formatter found outside of <ph> '
'tag in message "%s" in %s.')
_INVALID_PH_CHAR_MSG = ('ERROR: Invalid format characters found in message '
'"%s" <ph> tag in %s.')
# Finds HTML tag tokens.
_HTMLTOKEN = lazy_re.compile(r'<[/]?[a-z][a-z0-9]*[^>]*>', re.I)
# Finds HTML entities.
_HTMLENTITY = lazy_re.compile(r'&[^\s]*;')
class MessageNode(base.ContentNode):
'''A <message> element.'''
# For splitting a list of things that can be separated by commas or
# whitespace
_SPLIT_RE = lazy_re.compile(r'\s*,\s*|\s+')
def __init__(self):
super(MessageNode, self).__init__()
# Valid after EndParsing, this is the MessageClique that contains the
# source message and any translations of it that have been loaded.
self.clique = None
# We don't send leading and trailing whitespace into the translation
# console, but rather tack it onto the source message and any
# translations when formatting them into RC files or what have you.
self.ws_at_start = '' # Any whitespace characters at the start of the text
self.ws_at_end = '' # --"-- at the end of the text
# A list of "shortcut groups" this message is in. We check to make sure
# that shortcut keys (e.g. &J) within each shortcut group are unique.
self.shortcut_groups_ = []
# Formatter-specific data used to control the output of individual strings.
# formatter_data is a space separated list of C preprocessor-style
# definitions. Names without values are given the empty string value.
# Example: "foo=5 bar baz=100"
self.formatter_data = {}
# Whether or not to convert ... -> U+2026 within Translate().
self._replace_ellipsis = False
def _IsValidChild(self, child):
return isinstance(child, (PhNode))
def _IsValidAttribute(self, name, value):
if name not in [
'name', 'offset', 'translateable', 'desc', 'meaning',
'internal_comment', 'shortcut_groups', 'custom_type', 'validation_expr',
'use_name_for_id', 'sub_variable', 'formatter_data',
'is_accessibility_with_no_ui'
]:
return False
if (name in ('translateable', 'sub_variable') and
value not in ['true', 'false']):
return False
return True
def SetReplaceEllipsis(self, value):
r'''Sets whether to replace ... with \u2026.
'''
self._replace_ellipsis = value
def MandatoryAttributes(self):
return ['name|offset']
def DefaultAttributes(self):
return {
'custom_type': '',
'desc': '',
'formatter_data': '',
'internal_comment': '',
'is_accessibility_with_no_ui': 'false',
'meaning': '',
'shortcut_groups': '',
'sub_variable': 'false',
'translateable': 'true',
'use_name_for_id': 'false',
'validation_expr': '',
}
def HandleAttribute(self, attrib, value):
base.ContentNode.HandleAttribute(self, attrib, value)
if attrib != 'formatter_data':
return
# Parse value, a space-separated list of defines, into a dict.
# Example: "foo=5 bar" -> {'foo':'5', 'bar':''}
for item in value.split():
name, _, val = item.partition('=')
self.formatter_data[name] = val
def GetTextualIds(self):
'''
Returns the concatenation of the parent's node first_id and
this node's offset if it has one, otherwise just call the
superclass' implementation
'''
if 'offset' not in self.attrs:
return super(MessageNode, self).GetTextualIds()
# we search for the first grouping node in the parents' list
# to take care of the case where the first parent is an <if> node
grouping_parent = self.parent
import grit.node.empty
while grouping_parent and not isinstance(grouping_parent,
grit.node.empty.GroupingNode):
grouping_parent = grouping_parent.parent
assert 'first_id' in grouping_parent.attrs
return [grouping_parent.attrs['first_id'] + '_' + self.attrs['offset']]
def IsTranslateable(self):
return self.attrs['translateable'] == 'true'
def EndParsing(self):
super(MessageNode, self).EndParsing()
# Make the text (including placeholder references) and list of placeholders,
# verify placeholder formats, then strip and store leading and trailing
# whitespace and create the tclib.Message() and a clique to contain it.
text = ''
placeholders = []
for item in self.mixed_content:
if isinstance(item, six.string_types):
# Not a <ph> element: fail if any <ph> formatters are detected.
if _FORMATTERS.search(item):
print(_BAD_PLACEHOLDER_MSG % (item, self.source))
raise exception.PlaceholderNotInsidePhNode
text += item
else:
# Extract the <ph> element components.
presentation = item.attrs['name'].upper()
text += presentation
ex = ' ' # <ex> example element cdata if present.
if len(item.children):
ex = item.children[0].GetCdata()
original = item.GetCdata()
# Sanity check the <ph> element content.
cdata = original
# Replace all HTML tag tokens in cdata.
match = _HTMLTOKEN.search(cdata)
while match:
cdata = cdata.replace(match.group(0), '_')
match = _HTMLTOKEN.search(cdata)
# Replace all HTML entities in cdata.
match = _HTMLENTITY.search(cdata)
while match:
cdata = cdata.replace(match.group(0), '_')
match = _HTMLENTITY.search(cdata)
# Remove first matching formatter from cdata.
match = _FORMATTERS.search(cdata)
if match:
cdata = cdata.replace(match.group(0), '')
# Fail if <ph> special chars remain in cdata.
if re.search(r'[%\$]', cdata):
message_id = self.attrs['name'] + ' ' + original;
print(_INVALID_PH_CHAR_MSG % (message_id, self.source))
raise exception.InvalidCharactersInsidePhNode
# Otherwise, accept this <ph> placeholder.
placeholders.append(tclib.Placeholder(presentation, original, ex))
m = _WHITESPACE.match(text)
if m:
self.ws_at_start = m.group('start')
self.ws_at_end = m.group('end')
text = m.group('body')
self.shortcut_groups_ = self._SPLIT_RE.split(self.attrs['shortcut_groups'])
self.shortcut_groups_ = [i for i in self.shortcut_groups_ if i != '']
description_or_id = self.attrs['desc']
if description_or_id == '' and 'name' in self.attrs:
description_or_id = 'ID: %s' % self.attrs['name']
assigned_id = None
if self.attrs['use_name_for_id'] == 'true':
assigned_id = self.attrs['name']
message = tclib.Message(text=text, placeholders=placeholders,
description=description_or_id,
meaning=self.attrs['meaning'],
assigned_id=assigned_id)
self.InstallMessage(message)
def InstallMessage(self, message):
'''Sets this node's clique from a tclib.Message instance.
Args:
message: A tclib.Message.
'''
self.clique = self.UberClique().MakeClique(message, self.IsTranslateable())
for group in self.shortcut_groups_:
self.clique.AddToShortcutGroup(group)
if self.attrs['custom_type'] != '':
self.clique.SetCustomType(util.NewClassInstance(self.attrs['custom_type'],
clique.CustomType))
elif self.attrs['validation_expr'] != '':
self.clique.SetCustomType(
clique.OneOffCustomType(self.attrs['validation_expr']))
def SubstituteMessages(self, substituter):
'''Applies substitution to this message.
Args:
substituter: a grit.util.Substituter object.
'''
message = substituter.SubstituteMessage(self.clique.GetMessage())
if message is not self.clique.GetMessage():
self.InstallMessage(message)
def GetCliques(self):
return [self.clique] if self.clique else []
def Translate(self, lang):
'''Returns a translated version of this message.
'''
assert self.clique
msg = self.clique.MessageForLanguage(lang,
self.PseudoIsAllowed(),
self.ShouldFallbackToEnglish()
).GetRealContent()
if self._replace_ellipsis:
msg = _ELLIPSIS_PATTERN.sub(_ELLIPSIS_SYMBOL, msg)
# Always remove all byte order marks (\uFEFF) https://crbug.com/1033305
msg = msg.replace(u'\uFEFF','')
return msg.replace('[GRITLANGCODE]', lang)
def NameOrOffset(self):
key = 'name' if 'name' in self.attrs else 'offset'
return self.attrs[key]
def ExpandVariables(self):
'''We always expand variables on Messages.'''
return True
def GetDataPackValue(self, lang, encoding):
'''Returns a str represenation for a data_pack entry.'''
message = self.ws_at_start + self.Translate(lang) + self.ws_at_end
return util.Encode(message, encoding)
def IsResourceMapSource(self):
return True
@staticmethod
def Construct(parent, message, name, desc='', meaning='', translateable=True):
'''Constructs a new message node that is a child of 'parent', with the
name, desc, meaning and translateable attributes set using the same-named
parameters and the text of the message and any placeholders taken from
'message', which must be a tclib.Message() object.'''
# Convert type to appropriate string
translateable = 'true' if translateable else 'false'
node = MessageNode()
node.StartParsing('message', parent)
node.HandleAttribute('name', name)
node.HandleAttribute('desc', desc)
node.HandleAttribute('meaning', meaning)
node.HandleAttribute('translateable', translateable)
items = message.GetContent()
for ix, item in enumerate(items):
if isinstance(item, six.string_types):
# Ensure whitespace at front and back of message is correctly handled.
if ix == 0:
item = "'''" + item
if ix == len(items) - 1:
item = item + "'''"
node.AppendContent(item)
else:
phnode = PhNode()
phnode.StartParsing('ph', node)
phnode.HandleAttribute('name', item.GetPresentation())
phnode.AppendContent(item.GetOriginal())
if len(item.GetExample()) and item.GetExample() != ' ':
exnode = ExNode()
exnode.StartParsing('ex', phnode)
exnode.AppendContent(item.GetExample())
exnode.EndParsing()
phnode.AddChild(exnode)
phnode.EndParsing()
node.AddChild(phnode)
node.EndParsing()
return node
class PhNode(base.ContentNode):
'''A <ph> element.'''
def _IsValidChild(self, child):
return isinstance(child, ExNode)
def MandatoryAttributes(self):
return ['name']
def EndParsing(self):
super(PhNode, self).EndParsing()
# We only allow a single example for each placeholder
if len(self.children) > 1:
raise exception.TooManyExamples()
def GetTextualIds(self):
# The 'name' attribute is not an ID.
return []
class ExNode(base.ContentNode):
'''An <ex> element.'''
pass

380
third_party/libwebrtc/tools/grit/grit/node/message_unittest.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,380 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grit.node.message'''
from __future__ import print_function
import os
import sys
import unittest
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
from grit import exception
from grit import tclib
from grit import util
from grit.node import message
class MessageUnittest(unittest.TestCase):
def testMessage(self):
root = util.ParseGrdForUnittest('''
<messages>
<message name="IDS_GREETING"
desc="Printed to greet the currently logged in user">
Hello <ph name="USERNAME">%s<ex>Joi</ex></ph>, how are you doing today?
</message>
</messages>''')
msg, = root.GetChildrenOfType(message.MessageNode)
cliques = msg.GetCliques()
content = cliques[0].GetMessage().GetPresentableContent()
self.failUnless(content == 'Hello USERNAME, how are you doing today?')
def testMessageWithWhitespace(self):
root = util.ParseGrdForUnittest("""\
<messages>
<message name="IDS_BLA" desc="">
''' Hello there <ph name="USERNAME">%s</ph> '''
</message>
</messages>""")
msg, = root.GetChildrenOfType(message.MessageNode)
content = msg.GetCliques()[0].GetMessage().GetPresentableContent()
self.failUnless(content == 'Hello there USERNAME')
self.failUnless(msg.ws_at_start == ' ')
self.failUnless(msg.ws_at_end == ' ')
def testConstruct(self):
msg = tclib.Message(text=" Hello USERNAME, how are you? BINGO\t\t",
placeholders=[tclib.Placeholder('USERNAME', '%s', 'Joi'),
tclib.Placeholder('BINGO', '%d', '11')])
msg_node = message.MessageNode.Construct(None, msg, 'BINGOBONGO')
self.failUnless(msg_node.children[0].name == 'ph')
self.failUnless(msg_node.children[0].children[0].name == 'ex')
self.failUnless(msg_node.children[0].children[0].GetCdata() == 'Joi')
self.failUnless(msg_node.children[1].children[0].GetCdata() == '11')
self.failUnless(msg_node.ws_at_start == ' ')
self.failUnless(msg_node.ws_at_end == '\t\t')
def testUnicodeConstruct(self):
text = u'Howdie \u00fe'
msg = tclib.Message(text=text)
msg_node = message.MessageNode.Construct(None, msg, 'BINGOBONGO')
msg_from_node = msg_node.GetCdata()
self.failUnless(msg_from_node == text)
def testFormatterData(self):
root = util.ParseGrdForUnittest("""\
<messages>
<message name="IDS_BLA" desc="" formatter_data=" foo=123 bar qux=low">
Text
</message>
</messages>""")
msg, = root.GetChildrenOfType(message.MessageNode)
expected_formatter_data = {
'foo': '123',
'bar': '',
'qux': 'low'}
# Can't use assertDictEqual, not available in Python 2.6, so do it
# by hand.
self.failUnlessEqual(len(expected_formatter_data),
len(msg.formatter_data))
for key in expected_formatter_data:
self.failUnlessEqual(expected_formatter_data[key],
msg.formatter_data[key])
def testReplaceEllipsis(self):
root = util.ParseGrdForUnittest('''
<messages>
<message name="IDS_GREETING" desc="">
A...B.... <ph name="PH">%s<ex>A</ex></ph>... B... C...
</message>
</messages>''')
msg, = root.GetChildrenOfType(message.MessageNode)
msg.SetReplaceEllipsis(True)
content = msg.Translate('en')
self.failUnlessEqual(u'A...B.... %s\u2026 B\u2026 C\u2026', content)
def testRemoveByteOrderMark(self):
root = util.ParseGrdForUnittest(u'''
<messages>
<message name="IDS_HAS_BOM" desc="">
\uFEFFThis\uFEFF i\uFEFFs OK\uFEFF
</message>
</messages>''')
msg, = root.GetChildrenOfType(message.MessageNode)
content = msg.Translate('en')
self.failUnlessEqual(u'This is OK', content)
def testPlaceholderHasTooManyExamples(self):
try:
util.ParseGrdForUnittest("""\
<messages>
<message name="IDS_FOO" desc="foo">
Hi <ph name="NAME">$1<ex>Joi</ex><ex>Joy</ex></ph>
</message>
</messages>""")
except exception.TooManyExamples:
return
self.fail('Should have gotten exception')
def testPlaceholderHasInvalidName(self):
try:
util.ParseGrdForUnittest("""\
<messages>
<message name="IDS_FOO" desc="foo">
Hi <ph name="ABC!">$1</ph>
</message>
</messages>""")
except exception.InvalidPlaceholderName:
return
self.fail('Should have gotten exception')
def testChromeLocalizedFormatIsInsidePhNode(self):
try:
util.ParseGrdForUnittest("""\
<messages>
<message name="IDS_CHROME_L10N" desc="l10n format">
This message is missing the ph node: $1
</message>
</messages>""")
except exception.PlaceholderNotInsidePhNode:
return
self.fail('Should have gotten exception')
def testAndroidStringFormatIsInsidePhNode(self):
try:
util.ParseGrdForUnittest("""\
<messages>
<message name="IDS_ANDROID" desc="string format">
This message is missing a ph node: %1$s
</message>
</messages>""")
except exception.PlaceholderNotInsidePhNode:
return
self.fail('Should have gotten exception')
def testAndroidIntegerFormatIsInsidePhNode(self):
try:
util.ParseGrdForUnittest("""\
<messages>
<message name="IDS_ANDROID" desc="integer format">
This message is missing a ph node: %2$d
</message>
</messages>""")
except exception.PlaceholderNotInsidePhNode:
return
self.fail('Should have gotten exception')
def testAndroidIntegerWidthFormatIsInsidePhNode(self):
try:
util.ParseGrdForUnittest("""\
<messages>
<message name="IDS_ANDROID" desc="integer width format">
This message is missing a ph node: %2$3d
</message>
</messages>""")
except exception.PlaceholderNotInsidePhNode:
return
self.fail('Should have gotten exception')
def testValidAndroidIntegerWidthFormatInPhNode(self):
try:
util.ParseGrdForUnittest("""\
<messages>
<message name="IDS_ANDROID_WIDTH">
<ph name="VALID">%2$3d<ex>042</ex></ph>
</message>
</messages>""")
except:
self.fail('Should not have gotten exception')
def testAndroidFloatFormatIsInsidePhNode(self):
try:
util.ParseGrdForUnittest("""\
<messages>
<message name="IDS_ANDROID" desc="float number format">
This message is missing a ph node: %3$4.5f
</message>
</messages>""")
except exception.PlaceholderNotInsidePhNode:
return
self.fail('Should have gotten exception')
def testGritStringFormatIsInsidePhNode(self):
try:
util.ParseGrdForUnittest("""\
<messages>
<message name="IDS_GRIT_STRING" desc="grit string format">
This message is missing the ph node: %s
</message>
</messages>""")
except exception.PlaceholderNotInsidePhNode:
return
self.fail('Should have gotten exception')
def testGritIntegerFormatIsInsidePhNode(self):
try:
util.ParseGrdForUnittest("""\
<messages>
<message name="IDS_GRIT_INTEGER" desc="grit integer format">
This message is missing the ph node: %d
</message>
</messages>""")
except exception.PlaceholderNotInsidePhNode:
return
self.fail('Should have gotten exception')
def testWindowsETWIntegerFormatIsInsidePhNode(self):
try:
util.ParseGrdForUnittest("""\
<messages>
<message name="IDS_WINDOWS_ETW" desc="ETW tracing integer">
This message is missing the ph node: %1
</message>
</messages>""")
except exception.PlaceholderNotInsidePhNode:
return
self.fail('Should have gotten exception')
def testValidMultipleFormattersInsidePhNodes(self):
root = util.ParseGrdForUnittest("""\
<messages>
<message name="IDS_MULTIPLE_FORMATTERS">
<ph name="ERROR_COUNT">%1$d<ex>1</ex></ph> error, <ph name="WARNING_COUNT">%2$d<ex>1</ex></ph> warning
</message>
</messages>""")
msg, = root.GetChildrenOfType(message.MessageNode)
cliques = msg.GetCliques()
content = cliques[0].GetMessage().GetPresentableContent()
self.failUnless(content == 'ERROR_COUNT error, WARNING_COUNT warning')
def testMultipleFormattersAreInsidePhNodes(self):
failed = True
try:
util.ParseGrdForUnittest("""\
<messages>
<message name="IDS_MULTIPLE_FORMATTERS">
%1$d error, %2$d warning
</message>
</messages>""")
except exception.PlaceholderNotInsidePhNode:
failed = False
if failed:
self.fail('Should have gotten exception')
return
failed = True
try:
util.ParseGrdForUnittest("""\
<messages>
<message name="IDS_MULTIPLE_FORMATTERS">
<ph name="ERROR_COUNT">%1$d<ex>1</ex></ph> error, %2$d warning
</message>
</messages>""")
except exception.PlaceholderNotInsidePhNode:
failed = False
if failed:
self.fail('Should have gotten exception')
return
failed = True
try:
util.ParseGrdForUnittest("""\
<messages>
<message name="IDS_MULTIPLE_FORMATTERS">
<ph name="INVALID">%1$d %2$d</ph>
</message>
</messages>""")
except exception.InvalidCharactersInsidePhNode:
failed = False
if failed:
self.fail('Should have gotten exception')
return
def testValidHTMLFormatInsidePhNode(self):
try:
util.ParseGrdForUnittest("""\
<messages>
<message name="IDS_HTML">
<ph name="VALID">&lt;span&gt;$1&lt;/span&gt;<ex>1</ex></ph>
</message>
</messages>""")
except:
self.fail('Should not have gotten exception')
def testValidHTMLWithAttributesFormatInsidePhNode(self):
try:
util.ParseGrdForUnittest("""\
<messages>
<message name="IDS_HTML_ATTRIBUTE">
<ph name="VALID">&lt;span attribute="js:$this %"&gt;$2&lt;/span&gt;<ex>2</ex></ph>
</message>
</messages>""")
except:
self.fail('Should not have gotten exception')
def testValidHTMLEntityFormatInsidePhNode(self):
try:
util.ParseGrdForUnittest("""\
<messages>
<message name="IDS_ENTITY">
<ph name="VALID">&gt;%1$d&lt;<ex>1</ex></ph>
</message>
</messages>""")
except:
self.fail('Should not have gotten exception')
def testValidMultipleDollarFormatInsidePhNode(self):
try:
util.ParseGrdForUnittest("""\
<messages>
<message name="IDS_DOLLARS" desc="l10n dollars format">
<ph name="VALID">$$1</ph>
</message>
</messages>""")
except:
self.fail('Should not have gotten exception')
def testInvalidDollarCharacterInsidePhNode(self):
try:
util.ParseGrdForUnittest("""\
<messages>
<message name="IDS_BAD_DOLLAR">
<ph name="INVALID">%1$d $</ph>
</message>
</messages>""")
except exception.InvalidCharactersInsidePhNode:
return
self.fail('Should have gotten exception')
def testInvalidPercentCharacterInsidePhNode(self):
try:
util.ParseGrdForUnittest("""\
<messages>
<message name="IDS_BAD_PERCENT">
<ph name="INVALID">%1$d %</ph>
</message>
</messages>""")
except exception.InvalidCharactersInsidePhNode:
return
self.fail('Should have gotten exception')
def testInvalidMixedFormatCharactersInsidePhNode(self):
try:
util.ParseGrdForUnittest("""\
<messages>
<message name="IDS_MIXED_FORMATS">
<ph name="INVALID">%1$2</ph>
</message>
</messages>""")
except exception.InvalidCharactersInsidePhNode:
return
self.fail('Should have gotten exception')
if __name__ == '__main__':
unittest.main()

707
third_party/libwebrtc/tools/grit/grit/node/misc.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,707 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Miscellaneous node types.
"""
from __future__ import print_function
import os.path
import re
import sys
import six
from grit import constants
from grit import exception
from grit import util
from grit.extern import FP
from grit.node import base
from grit.node import message
from grit.node import node_io
# Python 3 doesn't have long() as int() works everywhere. But we really do need
# the long() behavior on Python 2 as our ids are much too large for int().
try:
long
except NameError:
long = int
# RTL languages
# TODO(jennyz): remove this fixed set of RTL language array
# now that generic expand_variable code exists.
_RTL_LANGS = (
'ar', # Arabic
'fa', # Farsi
'iw', # Hebrew
'ks', # Kashmiri
'ku', # Kurdish
'ps', # Pashto
'ur', # Urdu
'yi', # Yiddish
)
def _ReadFirstIdsFromFile(filename, defines):
"""Read the starting resource id values from |filename|. We also
expand variables of the form <(FOO) based on defines passed in on
the command line.
Returns a tuple, the absolute path of SRCDIR followed by the
first_ids dictionary.
"""
first_ids_dict = eval(util.ReadFile(filename, 'utf-8'))
src_root_dir = os.path.abspath(os.path.join(os.path.dirname(filename),
first_ids_dict['SRCDIR']))
def ReplaceVariable(matchobj):
for key, value in defines.items():
if matchobj.group(1) == key:
return value
return ''
renames = []
for grd_filename in first_ids_dict:
new_grd_filename = re.sub(r'<\(([A-Za-z_]+)\)', ReplaceVariable,
grd_filename)
if new_grd_filename != grd_filename:
abs_grd_filename = os.path.abspath(new_grd_filename)
if abs_grd_filename[:len(src_root_dir)] != src_root_dir:
new_grd_filename = os.path.basename(abs_grd_filename)
else:
new_grd_filename = abs_grd_filename[len(src_root_dir) + 1:]
new_grd_filename = new_grd_filename.replace('\\', '/')
renames.append((grd_filename, new_grd_filename))
for grd_filename, new_grd_filename in renames:
first_ids_dict[new_grd_filename] = first_ids_dict[grd_filename]
del(first_ids_dict[grd_filename])
return (src_root_dir, first_ids_dict)
def _ComputeIds(root, predetermined_tids):
"""Returns a dict of textual id -> numeric id for all nodes in root.
IDs are mostly assigned sequentially, but will vary based on:
* first_id node attribute (from first_ids_file)
* hash of textual id (if not first_id is defined)
* offset node attribute
* whether the textual id matches a system id
* whether the node generates its own ID via GetId()
Args:
predetermined_tids: Dict of textual id -> numeric id to use in return dict.
"""
from grit.node import empty, include, misc, structure
ids = {} # Maps numeric id to textual id
tids = {} # Maps textual id to numeric id
id_reasons = {} # Maps numeric id to text id and a human-readable explanation
group = None
last_id = None
predetermined_ids = {value: key
for key, value in predetermined_tids.items()}
for item in root:
if isinstance(item, empty.GroupingNode):
# Note: this won't work if any GroupingNode can be contained inside
# another.
group = item
last_id = None
continue
assert not item.GetTextualIds() or isinstance(item,
(include.IncludeNode, message.MessageNode,
misc.IdentifierNode, structure.StructureNode))
# Resources that use the RES protocol don't need
# any numerical ids generated, so we skip them altogether.
# This is accomplished by setting the flag 'generateid' to false
# in the GRD file.
if item.attrs.get('generateid', 'true') == 'false':
continue
for tid in item.GetTextualIds():
if util.SYSTEM_IDENTIFIERS.match(tid):
# Don't emit a new ID for predefined IDs
continue
if tid in tids:
continue
if predetermined_tids and tid in predetermined_tids:
id = predetermined_tids[tid]
reason = "from predetermined_tids map"
# Some identifier nodes can provide their own id,
# and we use that id in the generated header in that case.
elif hasattr(item, 'GetId') and item.GetId():
id = long(item.GetId())
reason = 'returned by GetId() method'
elif ('offset' in item.attrs and group and
group.attrs.get('first_id', '') != ''):
offset_text = item.attrs['offset']
parent_text = group.attrs['first_id']
try:
offset_id = long(offset_text)
except ValueError:
offset_id = tids[offset_text]
try:
parent_id = long(parent_text)
except ValueError:
parent_id = tids[parent_text]
id = parent_id + offset_id
reason = 'first_id %d + offset %d' % (parent_id, offset_id)
# We try to allocate IDs sequentially for blocks of items that might
# be related, for instance strings in a stringtable (as their IDs might be
# used e.g. as IDs for some radio buttons, in which case the IDs must
# be sequential).
#
# We do this by having the first item in a section store its computed ID
# (computed from a fingerprint) in its parent object. Subsequent children
# of the same parent will then try to get IDs that sequentially follow
# the currently stored ID (on the parent) and increment it.
elif last_id is None:
# First check if the starting ID is explicitly specified by the parent.
if group and group.attrs.get('first_id', '') != '':
id = long(group.attrs['first_id'])
reason = "from parent's first_id attribute"
else:
# Automatically generate the ID based on the first clique from the
# first child of the first child node of our parent (i.e. when we
# first get to this location in the code).
# According to
# http://msdn.microsoft.com/en-us/library/t2zechd4(VS.71).aspx
# the safe usable range for resource IDs in Windows is from decimal
# 101 to 0x7FFF.
id = FP.UnsignedFingerPrint(tid)
id = id % (0x7FFF - 101) + 101
reason = 'chosen by random fingerprint -- use first_id to override'
last_id = id
else:
id = last_id = last_id + 1
reason = 'sequentially assigned'
reason = "%s (%s)" % (tid, reason)
# Don't fail when 'offset' is specified, as the base and the 0th
# offset will have the same ID.
if id in id_reasons and not 'offset' in item.attrs:
raise exception.IdRangeOverlap('ID %d was assigned to both %s and %s.'
% (id, id_reasons[id], reason))
if id < 101:
print('WARNING: Numeric resource IDs should be greater than 100 to\n'
'avoid conflicts with system-defined resource IDs.')
if tid not in predetermined_tids and id in predetermined_ids:
raise exception.IdRangeOverlap('ID %d overlaps between %s and %s'
% (id, tid, predetermined_ids[tid]))
ids[id] = tid
tids[tid] = id
id_reasons[id] = reason
return tids
class SplicingNode(base.Node):
"""A node whose children should be considered to be at the same level as
its siblings for most purposes. This includes <if> and <part> nodes.
"""
def _IsValidChild(self, child):
assert self.parent, '<%s> node should never be root.' % self.name
if isinstance(child, SplicingNode):
return True # avoid O(n^2) behavior
return self.parent._IsValidChild(child)
class IfNode(SplicingNode):
"""A node for conditional inclusion of resources.
"""
def MandatoryAttributes(self):
return ['expr']
def _IsValidChild(self, child):
return (isinstance(child, (ThenNode, ElseNode)) or
super(IfNode, self)._IsValidChild(child))
def EndParsing(self):
children = self.children
self.if_then_else = False
if any(isinstance(node, (ThenNode, ElseNode)) for node in children):
if (len(children) != 2 or not isinstance(children[0], ThenNode) or
not isinstance(children[1], ElseNode)):
raise exception.UnexpectedChild(
'<if> element must be <if><then>...</then><else>...</else></if>')
self.if_then_else = True
def ActiveChildren(self):
cond = self.EvaluateCondition(self.attrs['expr'])
if self.if_then_else:
return self.children[0 if cond else 1].ActiveChildren()
else:
# Equivalent to having all children inside <then> with an empty <else>
return super(IfNode, self).ActiveChildren() if cond else []
class ThenNode(SplicingNode):
"""A <then> node. Can only appear directly inside an <if> node."""
pass
class ElseNode(SplicingNode):
"""An <else> node. Can only appear directly inside an <if> node."""
pass
class PartNode(SplicingNode):
"""A node for inclusion of sub-grd (*.grp) files.
"""
def __init__(self):
super(PartNode, self).__init__()
self.started_inclusion = False
def MandatoryAttributes(self):
return ['file']
def _IsValidChild(self, child):
return self.started_inclusion and super(PartNode, self)._IsValidChild(child)
class ReleaseNode(base.Node):
"""The <release> element."""
def _IsValidChild(self, child):
from grit.node import empty
return isinstance(child, (empty.IncludesNode, empty.MessagesNode,
empty.StructuresNode, empty.IdentifiersNode))
def _IsValidAttribute(self, name, value):
return (
(name == 'seq' and int(value) <= self.GetRoot().GetCurrentRelease()) or
name == 'allow_pseudo'
)
def MandatoryAttributes(self):
return ['seq']
def DefaultAttributes(self):
return { 'allow_pseudo' : 'true' }
class GritNode(base.Node):
"""The <grit> root element."""
def __init__(self):
super(GritNode, self).__init__()
self.output_language = ''
self.defines = {}
self.substituter = None
self.target_platform = sys.platform
self.whitelist_support = False
self._predetermined_ids_file = None
self._id_map = None # Dict of textual_id -> numeric_id.
def _IsValidChild(self, child):
from grit.node import empty
return isinstance(child, (ReleaseNode, empty.TranslationsNode,
empty.OutputsNode))
def _IsValidAttribute(self, name, value):
if name not in ['base_dir', 'first_ids_file', 'source_lang_id',
'latest_public_release', 'current_release',
'enc_check', 'tc_project', 'grit_version',
'output_all_resource_defines']:
return False
if name in ['latest_public_release', 'current_release'] and value.strip(
'0123456789') != '':
return False
return True
def MandatoryAttributes(self):
return ['latest_public_release', 'current_release']
def DefaultAttributes(self):
return {
'base_dir' : '.',
'first_ids_file': '',
'grit_version': 1,
'source_lang_id' : 'en',
'enc_check' : constants.ENCODING_CHECK,
'tc_project' : 'NEED_TO_SET_tc_project_ATTRIBUTE',
}
def EndParsing(self):
super(GritNode, self).EndParsing()
if (int(self.attrs['latest_public_release'])
> int(self.attrs['current_release'])):
raise exception.Parsing('latest_public_release cannot have a greater '
'value than current_release')
self.ValidateUniqueIds()
# Add the encoding check if it's not present (should ensure that it's always
# present in all .grd files generated by GRIT). If it's present, assert if
# it's not correct.
if 'enc_check' not in self.attrs or self.attrs['enc_check'] == '':
self.attrs['enc_check'] = constants.ENCODING_CHECK
else:
assert self.attrs['enc_check'] == constants.ENCODING_CHECK, (
'Are you sure your .grd file is in the correct encoding (UTF-8)?')
def ValidateUniqueIds(self):
"""Validate that 'name' attribute is unique in all nodes in this tree
except for nodes that are children of <if> nodes.
"""
unique_names = {}
duplicate_names = []
# To avoid false positives from mutually exclusive <if> clauses, check
# against whatever the output condition happens to be right now.
# TODO(benrg): do something better.
for node in self.ActiveDescendants():
if node.attrs.get('generateid', 'true') == 'false':
continue # Duplication not relevant in that case
for node_id in node.GetTextualIds():
if util.SYSTEM_IDENTIFIERS.match(node_id):
continue # predefined IDs are sometimes used more than once
if node_id in unique_names and node_id not in duplicate_names:
duplicate_names.append(node_id)
unique_names[node_id] = 1
if len(duplicate_names):
raise exception.DuplicateKey(', '.join(duplicate_names))
def GetCurrentRelease(self):
"""Returns the current release number."""
return int(self.attrs['current_release'])
def GetLatestPublicRelease(self):
"""Returns the latest public release number."""
return int(self.attrs['latest_public_release'])
def GetSourceLanguage(self):
"""Returns the language code of the source language."""
return self.attrs['source_lang_id']
def GetTcProject(self):
"""Returns the name of this project in the TranslationConsole, or
'NEED_TO_SET_tc_project_ATTRIBUTE' if it is not defined."""
return self.attrs['tc_project']
def SetOwnDir(self, dir):
"""Informs the 'grit' element of the directory the file it is in resides.
This allows it to calculate relative paths from the input file, which is
what we desire (rather than from the current path).
Args:
dir: r'c:\bla'
Return:
None
"""
assert dir
self.base_dir = os.path.normpath(os.path.join(dir, self.attrs['base_dir']))
def GetBaseDir(self):
"""Returns the base directory, relative to the working directory. To get
the base directory as set in the .grd file, use GetOriginalBaseDir()
"""
if hasattr(self, 'base_dir'):
return self.base_dir
else:
return self.GetOriginalBaseDir()
def GetOriginalBaseDir(self):
"""Returns the base directory, as set in the .grd file.
"""
return self.attrs['base_dir']
def IsWhitelistSupportEnabled(self):
return self.whitelist_support
def SetWhitelistSupportEnabled(self, whitelist_support):
self.whitelist_support = whitelist_support
def GetInputFiles(self):
"""Returns the list of files that are read to produce the output."""
# Importing this here avoids a circular dependency in the imports.
# pylint: disable-msg=C6204
from grit.node import include
from grit.node import misc
from grit.node import structure
from grit.node import variant
# Check if the input is required for any output configuration.
input_files = set()
# Collect even inactive PartNodes since they affect ID assignments.
for node in self:
if isinstance(node, misc.PartNode):
input_files.add(self.ToRealPath(node.GetInputPath()))
old_output_language = self.output_language
for lang, ctx, fallback in self.GetConfigurations():
self.SetOutputLanguage(lang or self.GetSourceLanguage())
self.SetOutputContext(ctx)
self.SetFallbackToDefaultLayout(fallback)
for node in self.ActiveDescendants():
if isinstance(node, (node_io.FileNode, include.IncludeNode,
structure.StructureNode, variant.SkeletonNode)):
input_path = node.GetInputPath()
if input_path is not None:
input_files.add(self.ToRealPath(input_path))
# If it's a flattened node, grab inlined resources too.
if ((node.name == 'structure' or node.name == 'include')
and node.attrs['flattenhtml'] == 'true'):
if node.name == 'structure':
node.RunPreSubstitutionGatherer()
input_files.update(node.GetHtmlResourceFilenames())
self.SetOutputLanguage(old_output_language)
return sorted(input_files)
def GetFirstIdsFile(self):
"""Returns a usable path to the first_ids file, if set, otherwise
returns None.
The first_ids_file attribute is by default relative to the
base_dir of the .grd file, but may be prefixed by GRIT_DIR/,
which makes it relative to the directory of grit.py
(e.g. GRIT_DIR/../gritsettings/resource_ids).
"""
if not self.attrs['first_ids_file']:
return None
path = self.attrs['first_ids_file']
GRIT_DIR_PREFIX = 'GRIT_DIR'
if (path.startswith(GRIT_DIR_PREFIX)
and path[len(GRIT_DIR_PREFIX)] in ['/', '\\']):
return util.PathFromRoot(path[len(GRIT_DIR_PREFIX) + 1:])
else:
return self.ToRealPath(path)
def GetOutputFiles(self):
"""Returns the list of <output> nodes that are descendants of this node's
<outputs> child and are not enclosed by unsatisfied <if> conditionals.
"""
for child in self.children:
if child.name == 'outputs':
return [node for node in child.ActiveDescendants()
if node.name == 'output']
raise exception.MissingElement()
def GetConfigurations(self):
"""Returns the distinct (language, context, fallback_to_default_layout)
triples from the output nodes.
"""
return set((n.GetLanguage(), n.GetContext(), n.GetFallbackToDefaultLayout())
for n in self.GetOutputFiles())
def GetSubstitutionMessages(self):
"""Returns the list of <message sub_variable="true"> nodes."""
return [n for n in self.ActiveDescendants()
if isinstance(n, message.MessageNode)
and n.attrs['sub_variable'] == 'true']
def SetOutputLanguage(self, output_language):
"""Set the output language. Prepares substitutions.
The substitutions are reset every time the language is changed.
They include messages designated as variables, and language codes for html
and rc files.
Args:
output_language: a two-letter language code (eg: 'en', 'ar'...) or ''
"""
if not output_language:
# We do not specify the output language for .grh files,
# so we get an empty string as the default.
# The value should match grit.clique.MessageClique.source_language.
output_language = self.GetSourceLanguage()
if output_language != self.output_language:
self.output_language = output_language
self.substituter = None # force recalculate
def SetOutputContext(self, output_context):
self.output_context = output_context
self.substituter = None # force recalculate
def SetFallbackToDefaultLayout(self, fallback_to_default_layout):
self.fallback_to_default_layout = fallback_to_default_layout
self.substituter = None # force recalculate
def SetDefines(self, defines):
self.defines = defines
self.substituter = None # force recalculate
def SetTargetPlatform(self, target_platform):
self.target_platform = target_platform
def GetSubstituter(self):
if self.substituter is None:
self.substituter = util.Substituter()
self.substituter.AddMessages(self.GetSubstitutionMessages(),
self.output_language)
if self.output_language in _RTL_LANGS:
direction = 'dir="RTL"'
else:
direction = 'dir="LTR"'
self.substituter.AddSubstitutions({
'GRITLANGCODE': self.output_language,
'GRITDIR': direction,
})
from grit.format import rc # avoid circular dep
rc.RcSubstitutions(self.substituter, self.output_language)
return self.substituter
def AssignFirstIds(self, filename_or_stream, defines):
"""Assign first ids to each grouping node based on values from the
first_ids file (if specified on the <grit> node).
"""
assert self._id_map is None, 'AssignFirstIds() after InitializeIds()'
# If the input is a stream, then we're probably in a unit test and
# should skip this step.
if not isinstance(filename_or_stream, six.string_types):
return
# Nothing to do if the first_ids_filename attribute isn't set.
first_ids_filename = self.GetFirstIdsFile()
if not first_ids_filename:
return
src_root_dir, first_ids = _ReadFirstIdsFromFile(first_ids_filename,
defines)
from grit.node import empty
for node in self.Preorder():
if isinstance(node, empty.GroupingNode):
abs_filename = os.path.abspath(filename_or_stream)
if abs_filename[:len(src_root_dir)] != src_root_dir:
filename = os.path.basename(filename_or_stream)
else:
filename = abs_filename[len(src_root_dir) + 1:]
filename = filename.replace('\\', '/')
if node.attrs['first_id'] != '':
raise Exception(
"Don't set the first_id attribute when using the first_ids_file "
"attribute on the <grit> node, update %s instead." %
first_ids_filename)
try:
id_list = first_ids[filename][node.name]
except KeyError as e:
print('-' * 78)
print('Resource id not set for %s (%s)!' % (filename, node.name))
print('Please update %s to include an entry for %s. See the '
'comments in resource_ids for information on why you need to '
'update that file.' % (first_ids_filename, filename))
print('-' * 78)
raise e
try:
node.attrs['first_id'] = str(id_list.pop(0))
except IndexError as e:
raise Exception('Please update %s and add a first id for %s (%s).'
% (first_ids_filename, filename, node.name))
def GetIdMap(self):
'''Return a dictionary mapping textual ids to numeric ids.'''
return self._id_map
def SetPredeterminedIdsFile(self, predetermined_ids_file):
assert self._id_map is None, (
'SetPredeterminedIdsFile() after InitializeIds()')
self._predetermined_ids_file = predetermined_ids_file
def InitializeIds(self):
'''Initializes the text ID -> numeric ID mapping.'''
predetermined_id_map = {}
if self._predetermined_ids_file:
with open(self._predetermined_ids_file) as f:
for line in f:
tid, nid = line.split()
predetermined_id_map[tid] = int(nid)
self._id_map = _ComputeIds(self, predetermined_id_map)
def RunGatherers(self, debug=False):
'''Call RunPreSubstitutionGatherer() on every node of the tree, then apply
substitutions, then call RunPostSubstitutionGatherer() on every node.
The substitutions step requires that the output language has been set.
Locally, get the Substitution messages and add them to the substituter.
Also add substitutions for language codes in the Rc.
Args:
debug: will print information while running gatherers.
'''
for node in self.ActiveDescendants():
if hasattr(node, 'RunPreSubstitutionGatherer'):
with node:
node.RunPreSubstitutionGatherer(debug=debug)
assert self.output_language
self.SubstituteMessages(self.GetSubstituter())
for node in self.ActiveDescendants():
if hasattr(node, 'RunPostSubstitutionGatherer'):
with node:
node.RunPostSubstitutionGatherer(debug=debug)
class IdentifierNode(base.Node):
"""A node for specifying identifiers that should appear in the resource
header file, and be unique amongst all other resource identifiers, but don't
have any other attributes or reference any resources.
"""
def MandatoryAttributes(self):
return ['name']
def DefaultAttributes(self):
return { 'comment' : '', 'id' : '', 'systemid': 'false' }
def GetId(self):
"""Returns the id of this identifier if it has one, None otherwise
"""
if 'id' in self.attrs:
return self.attrs['id']
return None
def EndParsing(self):
"""Handles system identifiers."""
super(IdentifierNode, self).EndParsing()
if self.attrs['systemid'] == 'true':
util.SetupSystemIdentifiers((self.attrs['name'],))
@staticmethod
def Construct(parent, name, id, comment, systemid='false'):
"""Creates a new node which is a child of 'parent', with attributes set
by parameters of the same name.
"""
node = IdentifierNode()
node.StartParsing('identifier', parent)
node.HandleAttribute('name', name)
node.HandleAttribute('id', id)
node.HandleAttribute('comment', comment)
node.HandleAttribute('systemid', systemid)
node.EndParsing()
return node

590
third_party/libwebrtc/tools/grit/grit/node/misc_unittest.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,590 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for misc.GritNode'''
from __future__ import print_function
import contextlib
import os
import sys
import tempfile
import unittest
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
from six import StringIO
from grit import grd_reader
import grit.exception
from grit import util
from grit.format import rc
from grit.format import rc_header
from grit.node import misc
@contextlib.contextmanager
def _MakeTempPredeterminedIdsFile(content):
"""Write the |content| string to a temporary file.
The temporary file must be deleted by the caller.
Example:
with _MakeTempPredeterminedIdsFile('foo') as path:
...
os.remove(path)
Args:
content: The string to write.
Yields:
The name of the temporary file.
"""
with tempfile.NamedTemporaryFile(mode='w', delete=False) as f:
f.write(content)
f.flush()
f.close()
yield f.name
class GritNodeUnittest(unittest.TestCase):
def testUniqueNameAttribute(self):
try:
restree = grd_reader.Parse(
util.PathFromRoot('grit/testdata/duplicate-name-input.xml'))
self.fail('Expected parsing exception because of duplicate names.')
except grit.exception.Parsing:
pass # Expected case
def testReadFirstIdsFromFile(self):
test_resource_ids = os.path.join(os.path.dirname(__file__), '..',
'testdata', 'resource_ids')
base_dir = os.path.dirname(test_resource_ids)
src_dir, id_dict = misc._ReadFirstIdsFromFile(
test_resource_ids,
{
'FOO': os.path.join(base_dir, 'bar'),
'SHARED_INTERMEDIATE_DIR': os.path.join(base_dir,
'out/Release/obj/gen'),
})
self.assertEqual({}, id_dict.get('bar/file.grd', None))
self.assertEqual({},
id_dict.get('out/Release/obj/gen/devtools/devtools.grd', None))
src_dir, id_dict = misc._ReadFirstIdsFromFile(
test_resource_ids,
{
'SHARED_INTERMEDIATE_DIR': '/outside/src_dir',
})
self.assertEqual({}, id_dict.get('devtools.grd', None))
# Verifies that GetInputFiles() returns the correct list of files
# corresponding to ChromeScaledImage nodes when assets are missing.
def testGetInputFilesChromeScaledImage(self):
chrome_html_path = util.PathFromRoot('grit/testdata/chrome_html.html')
xml = '''<?xml version="1.0" encoding="utf-8"?>
<grit latest_public_release="0" current_release="1">
<outputs>
<output filename="default.pak" type="data_package" context="default_100_percent" />
<output filename="special.pak" type="data_package" context="special_100_percent" fallback_to_default_layout="false" />
</outputs>
<release seq="1">
<structures fallback_to_low_resolution="true">
<structure type="chrome_scaled_image" name="IDR_A" file="a.png" />
<structure type="chrome_scaled_image" name="IDR_B" file="b.png" />
<structure type="chrome_html" name="HTML_FILE1" file="%s" flattenhtml="true" />
</structures>
</release>
</grit>''' % chrome_html_path
grd = grd_reader.Parse(StringIO(xml),
util.PathFromRoot('grit/testdata'))
expected = ['chrome_html.html', 'default_100_percent/a.png',
'default_100_percent/b.png', 'included_sample.html',
'special_100_percent/a.png']
actual = [os.path.relpath(path, util.PathFromRoot('grit/testdata')) for
path in grd.GetInputFiles()]
# Convert path separator for Windows paths.
actual = [path.replace('\\', '/') for path in actual]
self.assertEquals(expected, actual)
# Verifies that GetInputFiles() returns the correct list of files
# when files include other files.
def testGetInputFilesFromIncludes(self):
chrome_html_path = util.PathFromRoot('grit/testdata/chrome_html.html')
xml = '''<?xml version="1.0" encoding="utf-8"?>
<grit latest_public_release="0" current_release="1">
<outputs>
<output filename="default.pak" type="data_package" context="default_100_percent" />
<output filename="special.pak" type="data_package" context="special_100_percent" fallback_to_default_layout="false" />
</outputs>
<release seq="1">
<includes>
<include name="IDR_TESTDATA_CHROME_HTML" file="%s" flattenhtml="true"
allowexternalscript="true" type="BINDATA" />
</includes>
</release>
</grit>''' % chrome_html_path
grd = grd_reader.Parse(StringIO(xml), util.PathFromRoot('grit/testdata'))
expected = ['chrome_html.html', 'included_sample.html']
actual = [os.path.relpath(path, util.PathFromRoot('grit/testdata')) for
path in grd.GetInputFiles()]
# Convert path separator for Windows paths.
actual = [path.replace('\\', '/') for path in actual]
self.assertEquals(expected, actual)
def testNonDefaultEntry(self):
grd = util.ParseGrdForUnittest('''
<messages>
<message name="IDS_A" desc="foo">bar</message>
<if expr="lang == 'fr'">
<message name="IDS_B" desc="foo">bar</message>
</if>
</messages>''')
grd.SetOutputLanguage('fr')
output = ''.join(rc_header.Format(grd, 'fr', '.'))
self.assertIn('#define IDS_A 2378\n#define IDS_B 2379', output)
def testExplicitFirstIdOverlaps(self):
# second first_id will overlap preexisting range
self.assertRaises(grit.exception.IdRangeOverlap,
util.ParseGrdForUnittest, '''
<includes first_id="300" comment="bingo">
<include type="gif" name="ID_LOGO" file="images/logo.gif" />
<include type="gif" name="ID_LOGO2" file="images/logo2.gif" />
</includes>
<messages first_id="301">
<message name="IDS_GREETING" desc="Printed to greet the currently logged in user">
Hello <ph name="USERNAME">%s<ex>Joi</ex></ph>, how are you doing today?
</message>
<message name="IDS_SMURFGEBURF">Frubegfrums</message>
</messages>''')
def testImplicitOverlapsPreexisting(self):
# second message in <messages> will overlap preexisting range
self.assertRaises(grit.exception.IdRangeOverlap,
util.ParseGrdForUnittest, '''
<includes first_id="301" comment="bingo">
<include type="gif" name="ID_LOGO" file="images/logo.gif" />
<include type="gif" name="ID_LOGO2" file="images/logo2.gif" />
</includes>
<messages first_id="300">
<message name="IDS_GREETING" desc="Printed to greet the currently logged in user">
Hello <ph name="USERNAME">%s<ex>Joi</ex></ph>, how are you doing today?
</message>
<message name="IDS_SMURFGEBURF">Frubegfrums</message>
</messages>''')
def testPredeterminedIds(self):
with _MakeTempPredeterminedIdsFile('IDS_A 101\nIDS_B 102') as ids_file:
grd = util.ParseGrdForUnittest('''
<includes first_id="300" comment="bingo">
<include type="gif" name="IDS_B" file="images/logo.gif" />
</includes>
<messages first_id="10000">
<message name="IDS_GREETING" desc="Printed to greet the currently logged in user">
Hello <ph name="USERNAME">%s<ex>Joi</ex></ph>, how are you doing today?
</message>
<message name="IDS_A">
Bongo!
</message>
</messages>''', predetermined_ids_file=ids_file)
output = rc_header.FormatDefines(grd)
self.assertEqual(('#define IDS_B 102\n'
'#define IDS_GREETING 10000\n'
'#define IDS_A 101\n'), ''.join(output))
os.remove(ids_file)
def testPredeterminedIdsOverlap(self):
with _MakeTempPredeterminedIdsFile('ID_LOGO 10000') as ids_file:
self.assertRaises(grit.exception.IdRangeOverlap,
util.ParseGrdForUnittest, '''
<includes first_id="300" comment="bingo">
<include type="gif" name="ID_LOGO" file="images/logo.gif" />
</includes>
<messages first_id="10000">
<message name="IDS_GREETING" desc="Printed to greet the currently logged in user">
Hello <ph name="USERNAME">%s<ex>Joi</ex></ph>, how are you doing today?
</message>
<message name="IDS_BONGO">
Bongo!
</message>
</messages>''', predetermined_ids_file=ids_file)
os.remove(ids_file)
class IfNodeUnittest(unittest.TestCase):
def testIffyness(self):
grd = grd_reader.Parse(StringIO('''
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<messages>
<if expr="'bingo' in defs">
<message name="IDS_BINGO">
Bingo!
</message>
</if>
<if expr="'hello' in defs">
<message name="IDS_HELLO">
Hello!
</message>
</if>
<if expr="lang == 'fr' or 'FORCE_FRENCH' in defs">
<message name="IDS_HELLO" internal_comment="French version">
Good morning
</message>
</if>
<if expr="is_win">
<message name="IDS_ISWIN">is_win</message>
</if>
</messages>
</release>
</grit>'''), dir='.')
messages_node = grd.children[0].children[0]
bingo_message = messages_node.children[0].children[0]
hello_message = messages_node.children[1].children[0]
french_message = messages_node.children[2].children[0]
is_win_message = messages_node.children[3].children[0]
self.assertTrue(bingo_message.name == 'message')
self.assertTrue(hello_message.name == 'message')
self.assertTrue(french_message.name == 'message')
grd.SetOutputLanguage('fr')
grd.SetDefines({'hello': '1'})
active = set(grd.ActiveDescendants())
self.failUnless(bingo_message not in active)
self.failUnless(hello_message in active)
self.failUnless(french_message in active)
grd.SetOutputLanguage('en')
grd.SetDefines({'bingo': 1})
active = set(grd.ActiveDescendants())
self.failUnless(bingo_message in active)
self.failUnless(hello_message not in active)
self.failUnless(french_message not in active)
grd.SetOutputLanguage('en')
grd.SetDefines({'FORCE_FRENCH': '1', 'bingo': '1'})
active = set(grd.ActiveDescendants())
self.failUnless(bingo_message in active)
self.failUnless(hello_message not in active)
self.failUnless(french_message in active)
grd.SetOutputLanguage('en')
grd.SetDefines({})
self.failUnless(grd.target_platform == sys.platform)
grd.SetTargetPlatform('darwin')
active = set(grd.ActiveDescendants())
self.failUnless(is_win_message not in active)
grd.SetTargetPlatform('win32')
active = set(grd.ActiveDescendants())
self.failUnless(is_win_message in active)
def testElsiness(self):
grd = util.ParseGrdForUnittest('''
<messages>
<if expr="True">
<then> <message name="IDS_YES1"></message> </then>
<else> <message name="IDS_NO1"></message> </else>
</if>
<if expr="True">
<then> <message name="IDS_YES2"></message> </then>
<else> </else>
</if>
<if expr="True">
<then> </then>
<else> <message name="IDS_NO2"></message> </else>
</if>
<if expr="True">
<then> </then>
<else> </else>
</if>
<if expr="False">
<then> <message name="IDS_NO3"></message> </then>
<else> <message name="IDS_YES3"></message> </else>
</if>
<if expr="False">
<then> <message name="IDS_NO4"></message> </then>
<else> </else>
</if>
<if expr="False">
<then> </then>
<else> <message name="IDS_YES4"></message> </else>
</if>
<if expr="False">
<then> </then>
<else> </else>
</if>
</messages>''')
included = [msg.attrs['name'] for msg in grd.ActiveDescendants()
if msg.name == 'message']
self.assertEqual(['IDS_YES1', 'IDS_YES2', 'IDS_YES3', 'IDS_YES4'], included)
def testIffynessWithOutputNodes(self):
grd = grd_reader.Parse(StringIO('''
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<outputs>
<output filename="uncond1.rc" type="rc_data" />
<if expr="lang == 'fr' or 'hello' in defs">
<output filename="only_fr.adm" type="adm" />
<output filename="only_fr.plist" type="plist" />
</if>
<if expr="lang == 'ru'">
<output filename="doc.html" type="document" />
</if>
<output filename="uncond2.adm" type="adm" />
<output filename="iftest.h" type="rc_header">
<emit emit_type='prepend'></emit>
</output>
</outputs>
</grit>'''), dir='.')
outputs_node = grd.children[0]
uncond1_output = outputs_node.children[0]
only_fr_adm_output = outputs_node.children[1].children[0]
only_fr_plist_output = outputs_node.children[1].children[1]
doc_output = outputs_node.children[2].children[0]
uncond2_output = outputs_node.children[0]
self.assertTrue(uncond1_output.name == 'output')
self.assertTrue(only_fr_adm_output.name == 'output')
self.assertTrue(only_fr_plist_output.name == 'output')
self.assertTrue(doc_output.name == 'output')
self.assertTrue(uncond2_output.name == 'output')
grd.SetOutputLanguage('ru')
grd.SetDefines({'hello': '1'})
outputs = [output.GetFilename() for output in grd.GetOutputFiles()]
self.assertEquals(
outputs,
['uncond1.rc', 'only_fr.adm', 'only_fr.plist', 'doc.html',
'uncond2.adm', 'iftest.h'])
grd.SetOutputLanguage('ru')
grd.SetDefines({'bingo': '2'})
outputs = [output.GetFilename() for output in grd.GetOutputFiles()]
self.assertEquals(
outputs,
['uncond1.rc', 'doc.html', 'uncond2.adm', 'iftest.h'])
grd.SetOutputLanguage('fr')
grd.SetDefines({'hello': '1'})
outputs = [output.GetFilename() for output in grd.GetOutputFiles()]
self.assertEquals(
outputs,
['uncond1.rc', 'only_fr.adm', 'only_fr.plist', 'uncond2.adm',
'iftest.h'])
grd.SetOutputLanguage('en')
grd.SetDefines({'bingo': '1'})
outputs = [output.GetFilename() for output in grd.GetOutputFiles()]
self.assertEquals(outputs, ['uncond1.rc', 'uncond2.adm', 'iftest.h'])
grd.SetOutputLanguage('fr')
grd.SetDefines({'bingo': '1'})
outputs = [output.GetFilename() for output in grd.GetOutputFiles()]
self.assertNotEquals(outputs, ['uncond1.rc', 'uncond2.adm', 'iftest.h'])
def testChildrenAccepted(self):
grd_reader.Parse(StringIO(r'''<?xml version="1.0"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<includes>
<if expr="'bingo' in defs">
<include type="gif" name="ID_LOGO2" file="images/logo2.gif" />
</if>
<if expr="'bingo' in defs">
<if expr="'hello' in defs">
<include type="gif" name="ID_LOGO2" file="images/logo2.gif" />
</if>
</if>
</includes>
<structures>
<if expr="'bingo' in defs">
<structure type="dialog" name="IDD_ABOUTBOX" file="grit\test\data\klonk.rc" encoding="utf-16" />
</if>
<if expr="'bingo' in defs">
<if expr="'hello' in defs">
<structure type="dialog" name="IDD_ABOUTBOX" file="grit\test\data\klonk.rc" encoding="utf-16" />
</if>
</if>
</structures>
<messages>
<if expr="'bingo' in defs">
<message name="IDS_BINGO">Bingo!</message>
</if>
<if expr="'bingo' in defs">
<if expr="'hello' in defs">
<message name="IDS_BINGO">Bingo!</message>
</if>
</if>
</messages>
</release>
<translations>
<if expr="'bingo' in defs">
<file lang="nl" path="nl_translations.xtb" />
</if>
<if expr="'bingo' in defs">
<if expr="'hello' in defs">
<file lang="nl" path="nl_translations.xtb" />
</if>
</if>
</translations>
</grit>'''), dir='.')
def testIfBadChildrenNesting(self):
# includes
xml = StringIO(r'''<?xml version="1.0"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<includes>
<if expr="'bingo' in defs">
<structure type="dialog" name="IDD_ABOUTBOX" file="grit\test\data\klonk.rc" encoding="utf-16" />
</if>
</includes>
</release>
</grit>''')
self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
# messages
xml = StringIO(r'''<?xml version="1.0"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<messages>
<if expr="'bingo' in defs">
<structure type="dialog" name="IDD_ABOUTBOX" file="grit\test\data\klonk.rc" encoding="utf-16" />
</if>
</messages>
</release>
</grit>''')
self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
# structures
xml = StringIO('''<?xml version="1.0"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<structures>
<if expr="'bingo' in defs">
<message name="IDS_BINGO">Bingo!</message>
</if>
</structures>
</release>
</grit>''')
# translations
self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
xml = StringIO('''<?xml version="1.0"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<translations>
<if expr="'bingo' in defs">
<message name="IDS_BINGO">Bingo!</message>
</if>
</translations>
</grit>''')
self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
# same with nesting
xml = StringIO(r'''<?xml version="1.0"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<includes>
<if expr="'bingo' in defs">
<if expr="'hello' in defs">
<structure type="dialog" name="IDD_ABOUTBOX" file="grit\test\data\klonk.rc" encoding="utf-16" />
</if>
</if>
</includes>
</release>
</grit>''')
self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
xml = StringIO(r'''<?xml version="1.0"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<messages>
<if expr="'bingo' in defs">
<if expr="'hello' in defs">
<structure type="dialog" name="IDD_ABOUTBOX" file="grit\test\data\klonk.rc" encoding="utf-16" />
</if>
</if>
</messages>
</release>
</grit>''')
self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
xml = StringIO('''<?xml version="1.0"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<structures>
<if expr="'bingo' in defs">
<if expr="'hello' in defs">
<message name="IDS_BINGO">Bingo!</message>
</if>
</if>
</structures>
</release>
</grit>''')
self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
xml = StringIO('''<?xml version="1.0"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<translations>
<if expr="'bingo' in defs">
<if expr="'hello' in defs">
<message name="IDS_BINGO">Bingo!</message>
</if>
</if>
</translations>
</grit>''')
self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
class ReleaseNodeUnittest(unittest.TestCase):
def testPseudoControl(self):
grd = grd_reader.Parse(StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="1" source_lang_id="en-US" current_release="2" base_dir=".">
<release seq="1" allow_pseudo="false">
<messages>
<message name="IDS_HELLO">
Hello
</message>
</messages>
<structures>
<structure type="dialog" name="IDD_ABOUTBOX" encoding="utf-16" file="klonk.rc" />
</structures>
</release>
<release seq="2">
<messages>
<message name="IDS_BINGO">
Bingo
</message>
</messages>
<structures>
<structure type="menu" name="IDC_KLONKMENU" encoding="utf-16" file="klonk.rc" />
</structures>
</release>
</grit>'''), util.PathFromRoot('grit/testdata'))
grd.SetOutputLanguage('en')
grd.RunGatherers()
hello = grd.GetNodeById('IDS_HELLO')
aboutbox = grd.GetNodeById('IDD_ABOUTBOX')
bingo = grd.GetNodeById('IDS_BINGO')
menu = grd.GetNodeById('IDC_KLONKMENU')
for node in [hello, aboutbox]:
self.failUnless(not node.PseudoIsAllowed())
for node in [bingo, menu]:
self.failUnless(node.PseudoIsAllowed())
# TODO(benrg): There was a test here that formatting hello and aboutbox with
# a pseudo language should fail, but they do not fail and the test was
# broken and failed to catch it. Fix this.
# Should not raise an exception since pseudo is allowed
rc.FormatMessage(bingo, 'xyz-pseudo')
rc.FormatStructure(menu, 'xyz-pseudo', '.')
if __name__ == '__main__':
unittest.main()

10
third_party/libwebrtc/tools/grit/grit/node/mock_brotli.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,10 @@
#!/usr/bin/env python
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Mock Brotli Executable for testing purposes."""
import sys
sys.stdout.write('This has been mock compressed!')

117
third_party/libwebrtc/tools/grit/grit/node/node_io.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,117 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''The <output> and <file> elements.
'''
from __future__ import print_function
import os
from grit import xtb_reader
from grit.node import base
class FileNode(base.Node):
'''A <file> element.'''
def __init__(self):
super(FileNode, self).__init__()
self.re = None
self.should_load_ = True
def IsTranslation(self):
return True
def GetLang(self):
return self.attrs['lang']
def DisableLoading(self):
self.should_load_ = False
def MandatoryAttributes(self):
return ['path', 'lang']
def RunPostSubstitutionGatherer(self, debug=False):
if not self.should_load_:
return
root = self.GetRoot()
defs = getattr(root, 'defines', {})
target_platform = getattr(root, 'target_platform', '')
xtb_file = open(self.ToRealPath(self.GetInputPath()), 'rb')
try:
lang = xtb_reader.Parse(xtb_file,
self.UberClique().GenerateXtbParserCallback(
self.attrs['lang'], debug=debug),
defs=defs,
target_platform=target_platform)
except:
print("Exception during parsing of %s" % self.GetInputPath())
raise
# Translation console uses non-standard language codes 'iw' and 'no' for
# Hebrew and Norwegian Bokmal instead of 'he' and 'nb' used in Chrome.
# Note that some Chrome's .grd still use 'no' instead of 'nb', but 'nb' is
# always used for generated .pak files.
ALTERNATIVE_LANG_CODE_MAP = { 'he': 'iw', 'nb': 'no' }
assert (lang == self.attrs['lang'] or
lang == ALTERNATIVE_LANG_CODE_MAP[self.attrs['lang']]), (
'The XTB file you reference must contain messages in the language '
'specified\nby the \'lang\' attribute.')
def GetInputPath(self):
return os.path.expandvars(self.attrs['path'])
class OutputNode(base.Node):
'''An <output> element.'''
def MandatoryAttributes(self):
return ['filename', 'type']
def DefaultAttributes(self):
return {
'lang' : '', # empty lang indicates all languages
'language_section' : 'neutral', # defines a language neutral section
'context' : '',
'fallback_to_default_layout' : 'true',
}
def GetType(self):
return self.attrs['type']
def GetLanguage(self):
'''Returns the language ID, default 'en'.'''
return self.attrs['lang']
def GetContext(self):
return self.attrs['context']
def GetFilename(self):
return self.attrs['filename']
def GetOutputFilename(self):
path = None
if hasattr(self, 'output_filename'):
path = self.output_filename
else:
path = self.attrs['filename']
return os.path.expandvars(path)
def GetFallbackToDefaultLayout(self):
return self.attrs['fallback_to_default_layout'].lower() == 'true'
def _IsValidChild(self, child):
return isinstance(child, EmitNode)
class EmitNode(base.ContentNode):
''' An <emit> element.'''
def DefaultAttributes(self):
return { 'emit_type' : 'prepend'}
def GetEmitType(self):
'''Returns the emit_type for this node. Default is 'append'.'''
return self.attrs['emit_type']

182
third_party/libwebrtc/tools/grit/grit/node/node_io_unittest.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,182 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for node_io.FileNode'''
from __future__ import print_function
import os
import sys
import unittest
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
from six import StringIO
from grit.node import misc
from grit.node import node_io
from grit.node import empty
from grit import grd_reader
from grit import util
def _GetAllCliques(root_node):
"""Return all cliques in the |root_node| tree."""
ret = []
for node in root_node:
ret.extend(node.GetCliques())
return ret
class FileNodeUnittest(unittest.TestCase):
def testGetPath(self):
root = misc.GritNode()
root.StartParsing(u'grit', None)
root.HandleAttribute(u'latest_public_release', u'0')
root.HandleAttribute(u'current_release', u'1')
root.HandleAttribute(u'base_dir', r'..\resource')
translations = empty.TranslationsNode()
translations.StartParsing(u'translations', root)
root.AddChild(translations)
file_node = node_io.FileNode()
file_node.StartParsing(u'file', translations)
file_node.HandleAttribute(u'path', r'flugel\kugel.pdf')
translations.AddChild(file_node)
root.EndParsing()
self.failUnless(root.ToRealPath(file_node.GetInputPath()) ==
util.normpath(
os.path.join(r'../resource', r'flugel/kugel.pdf')))
def VerifyCliquesContainEnglishAndFrenchAndNothingElse(self, cliques):
self.assertEqual(2, len(cliques))
for clique in cliques:
self.assertEqual({'en', 'fr'}, set(clique.clique.keys()))
def testLoadTranslations(self):
xml = '''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<translations>
<file path="generated_resources_fr.xtb" lang="fr" />
</translations>
<release seq="3">
<messages>
<message name="ID_HELLO">Hello!</message>
<message name="ID_HELLO_USER">Hello <ph name="USERNAME">%s<ex>Joi</ex></ph></message>
</messages>
</release>
</grit>'''
grd = grd_reader.Parse(StringIO(xml),
util.PathFromRoot('grit/testdata'))
grd.SetOutputLanguage('en')
grd.RunGatherers()
self.VerifyCliquesContainEnglishAndFrenchAndNothingElse(_GetAllCliques(grd))
def testIffyness(self):
grd = grd_reader.Parse(StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<translations>
<if expr="lang == 'fr'">
<file path="generated_resources_fr.xtb" lang="fr" />
</if>
</translations>
<release seq="3">
<messages>
<message name="ID_HELLO">Hello!</message>
<message name="ID_HELLO_USER">Hello <ph name="USERNAME">%s<ex>Joi</ex></ph></message>
</messages>
</release>
</grit>'''), util.PathFromRoot('grit/testdata'))
grd.SetOutputLanguage('en')
grd.RunGatherers()
cliques = _GetAllCliques(grd)
self.assertEqual(2, len(cliques))
for clique in cliques:
self.assertEqual({'en'}, set(clique.clique.keys()))
grd.SetOutputLanguage('fr')
grd.RunGatherers()
self.VerifyCliquesContainEnglishAndFrenchAndNothingElse(_GetAllCliques(grd))
def testConditionalLoadTranslations(self):
xml = '''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3"
base_dir=".">
<translations>
<if expr="True">
<file path="generated_resources_fr.xtb" lang="fr" />
</if>
<if expr="False">
<file path="no_such_file.xtb" lang="de" />
</if>
</translations>
<release seq="3">
<messages>
<message name="ID_HELLO">Hello!</message>
<message name="ID_HELLO_USER">Hello <ph name="USERNAME">%s<ex>
Joi</ex></ph></message>
</messages>
</release>
</grit>'''
grd = grd_reader.Parse(StringIO(xml),
util.PathFromRoot('grit/testdata'))
grd.SetOutputLanguage('en')
grd.RunGatherers()
self.VerifyCliquesContainEnglishAndFrenchAndNothingElse(_GetAllCliques(grd))
def testConditionalOutput(self):
xml = '''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3"
base_dir=".">
<outputs>
<output filename="resource.h" type="rc_header" />
<output filename="en/generated_resources.rc" type="rc_all"
lang="en" />
<if expr="pp_if('NOT_TRUE')">
<output filename="de/generated_resources.rc" type="rc_all"
lang="de" />
</if>
</outputs>
<release seq="3">
<messages>
<message name="ID_HELLO">Hello!</message>
</messages>
</release>
</grit>'''
grd = grd_reader.Parse(StringIO(xml),
util.PathFromRoot('grit/test/data'),
defines={})
grd.SetOutputLanguage('en')
grd.RunGatherers()
outputs = grd.GetChildrenOfType(node_io.OutputNode)
active = set(grd.ActiveDescendants())
self.failUnless(outputs[0] in active)
self.failUnless(outputs[0].GetType() == 'rc_header')
self.failUnless(outputs[1] in active)
self.failUnless(outputs[1].GetType() == 'rc_all')
self.failUnless(outputs[2] not in active)
self.failUnless(outputs[2].GetType() == 'rc_all')
# Verify that 'iw' and 'no' language codes in xtb files are mapped to 'he' and
# 'nb'.
def testLangCodeMapping(self):
grd = grd_reader.Parse(StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<translations>
<file path="generated_resources_no.xtb" lang="nb" />
<file path="generated_resources_iw.xtb" lang="he" />
</translations>
<release seq="3">
<messages></messages>
</release>
</grit>'''), util.PathFromRoot('grit/testdata'))
grd.SetOutputLanguage('en')
grd.RunGatherers()
self.assertEqual([], _GetAllCliques(grd))
if __name__ == '__main__':
unittest.main()

375
third_party/libwebrtc/tools/grit/grit/node/structure.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,375 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''The <structure> element.
'''
from __future__ import print_function
import os
import platform
import re
from grit import exception
from grit import util
from grit.node import base
from grit.node import variant
import grit.gather.admin_template
import grit.gather.chrome_html
import grit.gather.chrome_scaled_image
import grit.gather.policy_json
import grit.gather.rc
import grit.gather.tr_html
import grit.gather.txt
import grit.format.rc
# Type of the gatherer to use for each type attribute
_GATHERERS = {
'accelerators' : grit.gather.rc.Accelerators,
'admin_template' : grit.gather.admin_template.AdmGatherer,
'chrome_html' : grit.gather.chrome_html.ChromeHtml,
'chrome_scaled_image' : grit.gather.chrome_scaled_image.ChromeScaledImage,
'dialog' : grit.gather.rc.Dialog,
'menu' : grit.gather.rc.Menu,
'rcdata' : grit.gather.rc.RCData,
'tr_html' : grit.gather.tr_html.TrHtml,
'txt' : grit.gather.txt.TxtFile,
'version' : grit.gather.rc.Version,
'policy_template_metafile' : grit.gather.policy_json.PolicyJson,
}
# TODO(joi) Print a warning if the 'variant_of_revision' attribute indicates
# that a skeleton variant is older than the original file.
class StructureNode(base.Node):
'''A <structure> element.'''
# Regular expression for a local variable definition. Each definition
# is of the form NAME=VALUE, where NAME cannot contain '=' or ',' and
# VALUE must escape all commas: ',' -> ',,'. Each variable definition
# should be separated by a comma with no extra whitespace.
# Example: THING1=foo,THING2=bar
variable_pattern = re.compile(r'([^,=\s]+)=((?:,,|[^,])*)')
def __init__(self):
super(StructureNode, self).__init__()
# Keep track of the last filename we flattened to, so we can
# avoid doing it more than once.
self._last_flat_filename = None
# See _Substitute; this substituter is used for local variables and
# the root substituter is used for global variables.
self.substituter = None
def _IsValidChild(self, child):
return isinstance(child, variant.SkeletonNode)
def _ParseVariables(self, variables):
'''Parse a variable string into a dictionary.'''
matches = StructureNode.variable_pattern.findall(variables)
return dict((name, value.replace(',,', ',')) for name, value in matches)
def EndParsing(self):
super(StructureNode, self).EndParsing()
# Now that we have attributes and children, instantiate the gatherers.
gathertype = _GATHERERS[self.attrs['type']]
self.gatherer = gathertype(self.attrs['file'],
self.attrs['name'],
self.attrs['encoding'])
self.gatherer.SetGrdNode(self)
self.gatherer.SetUberClique(self.UberClique())
if hasattr(self.GetRoot(), 'defines'):
self.gatherer.SetDefines(self.GetRoot().defines)
self.gatherer.SetAttributes(self.attrs)
if self.ExpandVariables():
self.gatherer.SetFilenameExpansionFunction(self._Substitute)
# Parse local variables and instantiate the substituter.
if self.attrs['variables']:
variables = self.attrs['variables']
self.substituter = util.Substituter()
self.substituter.AddSubstitutions(self._ParseVariables(variables))
self.skeletons = {} # Maps expressions to skeleton gatherers
for child in self.children:
assert isinstance(child, variant.SkeletonNode)
skel = gathertype(child.attrs['file'],
self.attrs['name'],
child.GetEncodingToUse(),
is_skeleton=True)
skel.SetGrdNode(self) # TODO(benrg): Or child? Only used for ToRealPath
skel.SetUberClique(self.UberClique())
if hasattr(self.GetRoot(), 'defines'):
skel.SetDefines(self.GetRoot().defines)
if self.ExpandVariables():
skel.SetFilenameExpansionFunction(self._Substitute)
self.skeletons[child.attrs['expr']] = skel
def MandatoryAttributes(self):
return ['type', 'name', 'file']
def DefaultAttributes(self):
return {
'encoding': 'cp1252',
'exclude_from_rc': 'false',
'line_end': 'unix',
'output_encoding': 'utf-8',
'generateid': 'true',
'expand_variables': 'false',
'output_filename': '',
'fold_whitespace': 'false',
# Run an arbitrary command after translation is complete
# so that it doesn't interfere with what's in translation
# console.
'run_command': '',
# Leave empty to run on all platforms, comma-separated
# for one or more specific platforms. Values must match
# output of platform.system().
'run_command_on_platforms': '',
'allowexternalscript': 'false',
# preprocess takes the same code path as flattenhtml, but it
# disables any processing/inlining outside of <if> and <include>.
'preprocess': 'false',
'flattenhtml': 'false',
'fallback_to_low_resolution': 'default',
'variables': '',
'compress': 'default',
'use_base_dir': 'true',
}
def IsExcludedFromRc(self):
return self.attrs['exclude_from_rc'] == 'true'
def Process(self, output_dir):
"""Writes the processed data to output_dir. In the case of a chrome_html
structure this will add references to other scale factors. If flattening
this will also write file references to be base64 encoded data URLs. The
name of the new file is returned."""
filename = self.ToRealPath(self.GetInputPath())
flat_filename = os.path.join(output_dir,
self.attrs['name'] + '_' + os.path.basename(filename))
if self._last_flat_filename == flat_filename:
return
with open(flat_filename, 'wb') as outfile:
if self.ExpandVariables():
text = self.gatherer.GetText()
file_contents = self._Substitute(text)
else:
file_contents = self.gatherer.GetData('', 'utf-8')
outfile.write(file_contents.encode('utf-8'))
self._last_flat_filename = flat_filename
return os.path.basename(flat_filename)
def GetLineEnd(self):
'''Returns the end-of-line character or characters for files output because
of this node ('\r\n', '\n', or '\r' depending on the 'line_end' attribute).
'''
if self.attrs['line_end'] == 'unix':
return '\n'
elif self.attrs['line_end'] == 'windows':
return '\r\n'
elif self.attrs['line_end'] == 'mac':
return '\r'
else:
raise exception.UnexpectedAttribute(
"Attribute 'line_end' must be one of 'unix' (default), 'windows' or "
"'mac'")
def GetCliques(self):
return self.gatherer.GetCliques()
def GetDataPackValue(self, lang, encoding):
"""Returns a bytes representation for a data_pack entry."""
if self.ExpandVariables():
text = self.gatherer.GetText()
data = util.Encode(self._Substitute(text), encoding)
else:
data = self.gatherer.GetData(lang, encoding)
if encoding != util.BINARY:
data = data.encode(encoding)
return self.CompressDataIfNeeded(data)
def GetHtmlResourceFilenames(self):
"""Returns a set of all filenames inlined by this node."""
return self.gatherer.GetHtmlResourceFilenames()
def GetInputPath(self):
path = self.gatherer.GetInputPath()
if path is None:
return path
# Do not mess with absolute paths, that would make them invalid.
if os.path.isabs(os.path.expandvars(path)):
return path
# We have no control over code that calls ToRealPath later, so convert
# the path to be relative against our basedir.
if self.attrs.get('use_base_dir', 'true') != 'true':
# Normalize the directory path to use the appropriate OS separator.
# GetBaseDir() may return paths\like\this or paths/like/this, since it is
# read from the base_dir attribute in the grd file.
norm_base_dir = util.normpath(self.GetRoot().GetBaseDir())
return os.path.relpath(path, norm_base_dir)
return path
def GetTextualIds(self):
if not hasattr(self, 'gatherer'):
# This case is needed because this method is called by
# GritNode.ValidateUniqueIds before RunGatherers has been called.
# TODO(benrg): Fix this?
return [self.attrs['name']]
return self.gatherer.GetTextualIds()
def RunPreSubstitutionGatherer(self, debug=False):
if debug:
print('Running gatherer %s for file %s' %
(type(self.gatherer), self.GetInputPath()))
# Note: Parse() is idempotent, therefore this method is also.
self.gatherer.Parse()
for skel in self.skeletons.values():
skel.Parse()
def GetSkeletonGatherer(self):
'''Returns the gatherer for the alternate skeleton that should be used,
based on the expressions for selecting skeletons, or None if the skeleton
from the English version of the structure should be used.
'''
for expr in self.skeletons:
if self.EvaluateCondition(expr):
return self.skeletons[expr]
return None
def HasFileForLanguage(self):
return self.attrs['type'] in ['tr_html', 'admin_template', 'txt',
'chrome_scaled_image',
'chrome_html']
def ExpandVariables(self):
'''Variable expansion on structures is controlled by an XML attribute.
However, old files assume that expansion is always on for Rc files.
Returns:
A boolean.
'''
attrs = self.GetRoot().attrs
if 'grit_version' in attrs and attrs['grit_version'] > 1:
return self.attrs['expand_variables'] == 'true'
else:
return (self.attrs['expand_variables'] == 'true' or
self.attrs['file'].lower().endswith('.rc'))
def _Substitute(self, text):
'''Perform local and global variable substitution.'''
if self.substituter:
text = self.substituter.Substitute(text)
return self.GetRoot().GetSubstituter().Substitute(text)
def RunCommandOnCurrentPlatform(self):
if self.attrs['run_command_on_platforms'] == '':
return True
else:
target_platforms = self.attrs['run_command_on_platforms'].split(',')
return platform.system() in target_platforms
def FileForLanguage(self, lang, output_dir, create_file=True,
return_if_not_generated=True):
'''Returns the filename of the file associated with this structure,
for the specified language.
Args:
lang: 'fr'
output_dir: 'c:\temp'
create_file: True
'''
assert self.HasFileForLanguage()
# If the source language is requested, and no extra changes are requested,
# use the existing file.
if ((not lang or lang == self.GetRoot().GetSourceLanguage()) and
self.attrs['expand_variables'] != 'true' and
(not self.attrs['run_command'] or
not self.RunCommandOnCurrentPlatform())):
if return_if_not_generated:
input_path = self.GetInputPath()
if input_path is None:
return None
return self.ToRealPath(input_path)
else:
return None
if self.attrs['output_filename'] != '':
filename = self.attrs['output_filename']
else:
filename = os.path.basename(self.attrs['file'])
assert len(filename)
filename = '%s_%s' % (lang, filename)
filename = os.path.join(output_dir, filename)
# Only create the output if it was requested by the call.
if create_file:
text = self.gatherer.Translate(
lang,
pseudo_if_not_available=self.PseudoIsAllowed(),
fallback_to_english=self.ShouldFallbackToEnglish(),
skeleton_gatherer=self.GetSkeletonGatherer())
file_contents = util.FixLineEnd(text, self.GetLineEnd())
if self.ExpandVariables():
# Note that we reapply substitution a second time here.
# This is because a) we need to look inside placeholders
# b) the substitution values are language-dependent
file_contents = self._Substitute(file_contents)
with open(filename, 'wb') as file_object:
output_stream = util.WrapOutputStream(file_object,
self.attrs['output_encoding'])
output_stream.write(file_contents)
if self.attrs['run_command'] and self.RunCommandOnCurrentPlatform():
# Run arbitrary commands after translation is complete so that it
# doesn't interfere with what's in translation console.
command = self.attrs['run_command'] % {'filename': filename}
result = os.system(command)
assert result == 0, '"%s" failed.' % command
return filename
def IsResourceMapSource(self):
return True
@staticmethod
def Construct(parent, name, type, file, encoding='cp1252'):
'''Creates a new node which is a child of 'parent', with attributes set
by parameters of the same name.
'''
node = StructureNode()
node.StartParsing('structure', parent)
node.HandleAttribute('name', name)
node.HandleAttribute('type', type)
node.HandleAttribute('file', file)
node.HandleAttribute('encoding', encoding)
node.EndParsing()
return node
def SubstituteMessages(self, substituter):
'''Propagates substitution to gatherer.
Args:
substituter: a grit.util.Substituter object.
'''
assert hasattr(self, 'gatherer')
if self.ExpandVariables():
self.gatherer.SubstituteMessages(substituter)

178
third_party/libwebrtc/tools/grit/grit/node/structure_unittest.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,178 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for <structure> nodes.
'''
from __future__ import print_function
import os
import os.path
import sys
import zlib
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import platform
import tempfile
import unittest
import struct
from grit import constants
from grit import util
from grit.node import brotli_util
from grit.node import structure
from grit.format import rc
def checkIsGzipped(filename, compress_attr):
test_data_root = util.PathFromRoot('grit/testdata')
root = util.ParseGrdForUnittest(
'''
<structures>
<structure name="TEST_TXT" file="%s" %s type="chrome_html"/>
</structures>''' % (filename, compress_attr),
base_dir=test_data_root)
node, = root.GetChildrenOfType(structure.StructureNode)
node.RunPreSubstitutionGatherer()
compressed = node.GetDataPackValue(lang='en', encoding=util.BINARY)
decompressed_data = zlib.decompress(compressed, 16 + zlib.MAX_WBITS)
expected = util.ReadFile(os.path.join(test_data_root, filename), util.BINARY)
return expected == decompressed_data
class StructureUnittest(unittest.TestCase):
def testSkeleton(self):
grd = util.ParseGrdForUnittest('''
<structures>
<structure type="dialog" name="IDD_ABOUTBOX" file="klonk.rc" encoding="utf-16-le">
<skeleton expr="lang == 'fr'" variant_of_revision="1" file="klonk-alternate-skeleton.rc" />
</structure>
</structures>''', base_dir=util.PathFromRoot('grit/testdata'))
grd.SetOutputLanguage('fr')
grd.RunGatherers()
transl = ''.join(rc.Format(grd, 'fr', '.'))
self.failUnless(transl.count('040704') and transl.count('110978'))
self.failUnless(transl.count('2005",IDC_STATIC'))
def testRunCommandOnCurrentPlatform(self):
node = structure.StructureNode()
node.attrs = node.DefaultAttributes()
self.failUnless(node.RunCommandOnCurrentPlatform())
node.attrs['run_command_on_platforms'] = 'Nosuch'
self.failIf(node.RunCommandOnCurrentPlatform())
node.attrs['run_command_on_platforms'] = (
'Nosuch,%s,Othernot' % platform.system())
self.failUnless(node.RunCommandOnCurrentPlatform())
def testVariables(self):
grd = util.ParseGrdForUnittest('''
<structures>
<structure type="chrome_html" name="hello_tmpl" file="structure_variables.html" expand_variables="true" variables="GREETING=Hello,THINGS=foo,, bar,, baz,EQUATION=2+2==4,filename=simple" flattenhtml="true"></structure>
</structures>''', base_dir=util.PathFromRoot('grit/testdata'))
grd.SetOutputLanguage('en')
grd.RunGatherers()
node, = grd.GetChildrenOfType(structure.StructureNode)
filename = node.Process(tempfile.gettempdir())
filepath = os.path.join(tempfile.gettempdir(), filename)
with open(filepath) as f:
result = f.read()
self.failUnlessEqual(('<h1>Hello!</h1>\n'
'Some cool things are foo, bar, baz.\n'
'Did you know that 2+2==4?\n'
'<p>\n'
' Hello!\n'
'</p>\n'), result)
os.remove(filepath)
def testGetPath(self):
base_dir = util.PathFromRoot('grit/testdata')
grd = util.ParseGrdForUnittest('''
<structures>
<structure type="chrome_html" name="hello_tmpl" file="structure_variables.html" expand_variables="true" variables="GREETING=Hello,THINGS=foo,, bar,, baz,EQUATION=2+2==4,filename=simple" flattenhtml="true" use_base_dir="true"></structure>
</structures>''', base_dir)
grd.SetOutputLanguage('en')
grd.RunGatherers()
node, = grd.GetChildrenOfType(structure.StructureNode)
self.assertEqual(grd.ToRealPath(node.GetInputPath()),
os.path.abspath(os.path.join(
base_dir, r'structure_variables.html')))
def testGetPathNoBasedir(self):
base_dir = util.PathFromRoot('grit/testdata')
abs_path = os.path.join(base_dir, r'structure_variables.html')
rel_path = os.path.relpath(abs_path, os.getcwd())
grd = util.ParseGrdForUnittest('''
<structures>
<structure type="chrome_html" name="hello_tmpl" file="''' + rel_path + '''" expand_variables="true" variables="GREETING=Hello,THINGS=foo,, bar,, baz,EQUATION=2+2==4,filename=simple" flattenhtml="true" use_base_dir="false"></structure>
</structures>''', util.PathFromRoot('grit/testdata'))
grd.SetOutputLanguage('en')
grd.RunGatherers()
node, = grd.GetChildrenOfType(structure.StructureNode)
self.assertEqual(grd.ToRealPath(node.GetInputPath()),
os.path.abspath(os.path.join(
base_dir, r'structure_variables.html')))
def testCompressGzip(self):
self.assertTrue(checkIsGzipped('test_text.txt', 'compress="gzip"'))
def testCompressGzipByDefault(self):
self.assertTrue(checkIsGzipped('test_html.html', ''))
self.assertTrue(checkIsGzipped('test_js.js', ''))
self.assertTrue(checkIsGzipped('test_css.css', ''))
self.assertTrue(checkIsGzipped('test_svg.svg', ''))
self.assertTrue(checkIsGzipped('test_html.html', 'compress="default"'))
self.assertTrue(checkIsGzipped('test_js.js', 'compress="default"'))
self.assertTrue(checkIsGzipped('test_css.css', 'compress="default"'))
self.assertTrue(checkIsGzipped('test_svg.svg', 'compress="default"'))
def testCompressBrotli(self):
test_data_root = util.PathFromRoot('grit/testdata')
root = util.ParseGrdForUnittest(
'''
<structures>
<structure name="TEST_TXT" file="test_text.txt"
compress="brotli" type="chrome_html" />
</structures>''',
base_dir=test_data_root)
node, = root.GetChildrenOfType(structure.StructureNode)
node.RunPreSubstitutionGatherer()
# Using the mock brotli decompression executable.
brotli_util.SetBrotliCommand([sys.executable,
os.path.join(os.path.dirname(__file__),
'mock_brotli.py')])
compressed = node.GetDataPackValue(lang='en', encoding=util.BINARY)
# Assert that the first two bytes in compressed format is BROTLI_CONST.
self.assertEqual(constants.BROTLI_CONST, compressed[0:2])
# Compare the actual size of the uncompressed test data with
# the size appended during compression.
actual_size = len(util.ReadFile(
os.path.join(test_data_root, 'test_text.txt'), util.BINARY))
uncompress_size = struct.unpack('<i', compressed[2:6])[0]
uncompress_size += struct.unpack('<h', compressed[6:8])[0] << 4*8
self.assertEqual(actual_size, uncompress_size)
self.assertEqual(b'This has been mock compressed!', compressed[8:])
def testNotCompressed(self):
test_data_root = util.PathFromRoot('grit/testdata')
root = util.ParseGrdForUnittest('''
<structures>
<structure name="TEST_TXT" file="test_text.txt" type="chrome_html" />
</structures>''', base_dir=test_data_root)
node, = root.GetChildrenOfType(structure.StructureNode)
node.RunPreSubstitutionGatherer()
data = node.GetDataPackValue(lang='en', encoding=util.BINARY)
self.assertEqual(util.ReadFile(
os.path.join(test_data_root, 'test_text.txt'), util.BINARY), data)
if __name__ == '__main__':
unittest.main()

41
third_party/libwebrtc/tools/grit/grit/node/variant.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,41 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''The <skeleton> element.
'''
from __future__ import print_function
from grit.node import base
class SkeletonNode(base.Node):
'''A <skeleton> element.'''
# TODO(joi) Support inline skeleton variants as CDATA instead of requiring
# a 'file' attribute.
def MandatoryAttributes(self):
return ['expr', 'variant_of_revision', 'file']
def DefaultAttributes(self):
'''If not specified, 'encoding' will actually default to the parent node's
encoding.
'''
return {'encoding' : ''}
def _ContentType(self):
if 'file' in self.attrs:
return self._CONTENT_TYPE_NONE
else:
return self._CONTENT_TYPE_CDATA
def GetEncodingToUse(self):
if self.attrs['encoding'] == '':
return self.parent.attrs['encoding']
else:
return self.attrs['encoding']
def GetInputPath(self):
return self.attrs['file']

129
third_party/libwebrtc/tools/grit/grit/pseudo.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,129 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Pseudotranslation support. Our pseudotranslations are based on the
P-language, which is a simple vowel-extending language. Examples of P:
- "hello" becomes "hepellopo"
- "howdie" becomes "hopowdiepie"
- "because" becomes "bepecaupause" (but in our implementation we don't
handle the silent e at the end so it actually would return "bepecaupausepe"
The P-language has the excellent quality of increasing the length of text
by around 30-50% which is great for pseudotranslations, to stress test any
GUI layouts etc.
To make the pseudotranslations more obviously "not a translation" and to make
them exercise any code that deals with encodings, we also transform all English
vowels into equivalent vowels with diacriticals on them (rings, acutes,
diaresis, and circumflex), and we write the "p" in the P-language as a Hebrew
character Qof. It looks sort of like a latin character "p" but it is outside
the latin-1 character set which will stress character encoding bugs.
'''
from __future__ import print_function
from grit import lazy_re
from grit import tclib
# An RFC language code for the P pseudolanguage.
PSEUDO_LANG = 'x-P-pseudo'
# Hebrew character Qof. It looks kind of like a 'p' but is outside
# the latin-1 character set which is good for our purposes.
# TODO(joi) For now using P instead of Qof, because of some bugs it used. Find
# a better solution, i.e. one that introduces a non-latin1 character into the
# pseudotranslation.
#_QOF = u'\u05e7'
_QOF = u'P'
# How we map each vowel.
_VOWELS = {
u'a' : u'\u00e5', # a with ring
u'e' : u'\u00e9', # e acute
u'i' : u'\u00ef', # i diaresis
u'o' : u'\u00f4', # o circumflex
u'u' : u'\u00fc', # u diaresis
u'y' : u'\u00fd', # y acute
u'A' : u'\u00c5', # A with ring
u'E' : u'\u00c9', # E acute
u'I' : u'\u00cf', # I diaresis
u'O' : u'\u00d4', # O circumflex
u'U' : u'\u00dc', # U diaresis
u'Y' : u'\u00dd', # Y acute
}
_VOWELS_KEYS = set(_VOWELS.keys())
# Matches vowels and P
_PSUB_RE = lazy_re.compile("(%s)" % '|'.join(_VOWELS_KEYS | {'P'}))
# Pseudotranslations previously created. This is important for performance
# reasons, especially since we routinely pseudotranslate the whole project
# several or many different times for each build.
_existing_translations = {}
def MapVowels(str, also_p = False):
'''Returns a copy of 'str' where characters that exist as keys in _VOWELS
have been replaced with the corresponding value. If also_p is true, this
function will also change capital P characters into a Hebrew character Qof.
'''
def Repl(match):
if match.group() == 'p':
if also_p:
return _QOF
else:
return 'p'
else:
return _VOWELS[match.group()]
return _PSUB_RE.sub(Repl, str)
def PseudoString(str):
'''Returns a pseudotranslation of the provided string, in our enhanced
P-language.'''
if str in _existing_translations:
return _existing_translations[str]
outstr = u''
ix = 0
while ix < len(str):
if str[ix] not in _VOWELS_KEYS:
outstr += str[ix]
ix += 1
else:
# We want to treat consecutive vowels as one composite vowel. This is not
# always accurate e.g. in composite words but good enough.
consecutive_vowels = u''
while ix < len(str) and str[ix] in _VOWELS_KEYS:
consecutive_vowels += str[ix]
ix += 1
changed_vowels = MapVowels(consecutive_vowels)
outstr += changed_vowels
outstr += _QOF
outstr += changed_vowels
_existing_translations[str] = outstr
return outstr
def PseudoMessage(message):
'''Returns a pseudotranslation of the provided message.
Args:
message: tclib.Message()
Return:
tclib.Translation()
'''
transl = tclib.Translation()
for part in message.GetContent():
if isinstance(part, tclib.Placeholder):
transl.AppendPlaceholder(part)
else:
transl.AppendText(PseudoString(part))
return transl

104
third_party/libwebrtc/tools/grit/grit/pseudo_rtl.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,104 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Pseudo RTL, (aka Fake Bidi) support. It simply wraps each word with
Unicode RTL overrides.
More info at https://sites.google.com/a/chromium.org/dev/Home/fake-bidi
'''
from __future__ import print_function
import re
from grit import lazy_re
from grit import tclib
ACCENTED_STRINGS = {
'a': u"\u00e5", 'e': u"\u00e9", 'i': u"\u00ee", 'o': u"\u00f6",
'u': u"\u00fb", 'A': u"\u00c5", 'E': u"\u00c9", 'I': u"\u00ce",
'O': u"\u00d6", 'U': u"\u00db", 'c': u"\u00e7", 'd': u"\u00f0",
'n': u"\u00f1", 'p': u"\u00fe", 'y': u"\u00fd", 'C': u"\u00c7",
'D': u"\u00d0", 'N': u"\u00d1", 'P': u"\u00de", 'Y': u"\u00dd",
'f': u"\u0192", 's': u"\u0161", 'S': u"\u0160", 'z': u"\u017e",
'Z': u"\u017d", 'g': u"\u011d", 'G': u"\u011c", 'h': u"\u0125",
'H': u"\u0124", 'j': u"\u0135", 'J': u"\u0134", 'k': u"\u0137",
'K': u"\u0136", 'l': u"\u013c", 'L': u"\u013b", 't': u"\u0163",
'T': u"\u0162", 'w': u"\u0175", 'W': u"\u0174",
'$': u"\u20ac", '?': u"\u00bf", 'R': u"\u00ae", r'!': u"\u00a1",
}
# a character set containing the keys in ACCENTED_STRINGS
# We should not accent characters in an escape sequence such as "\n".
# To be safe, we assume every character following a backslash is an escaped
# character. We also need to consider the case like "\\n", which means
# a blackslash and a character "n", we will accent the character "n".
TO_ACCENT = lazy_re.compile(
r'[%s]|\\[a-z\\]' % ''.join(ACCENTED_STRINGS.keys()))
# Lex text so that we don't interfere with html tokens and entities.
# This lexing scheme will handle all well formed tags and entities, html or
# xhtml. It will not handle comments, CDATA sections, or the unescaping tags:
# script, style, xmp or listing. If any of those appear in messages,
# something is wrong.
TOKENS = [ lazy_re.compile(
'^%s' % pattern, # match at the beginning of input
re.I | re.S # html tokens are case-insensitive
)
for pattern in
(
# a run of non html special characters
r'[^<&]+',
# a tag
(r'</?[a-z]\w*' # beginning of tag
r'(?:\s+\w+(?:\s*=\s*' # attribute start
r'(?:[^\s"\'>]+|"[^\"]*"|\'[^\']*\'))?' # attribute value
r')*\s*/?>'),
# an entity
r'&(?:[a-z]\w+|#\d+|#x[\da-f]+);',
# an html special character not part of a special sequence
r'.'
) ]
ALPHABETIC_RUN = lazy_re.compile(r'([^\W0-9_]+)')
RLO = u'\u202e'
PDF = u'\u202c'
def PseudoRTLString(text):
'''Returns a fake bidirectional version of the source string. This code is
based on accentString above, in turn copied from Frank Tang.
'''
parts = []
while text:
m = None
for token in TOKENS:
m = token.search(text)
if m:
part = m.group(0)
text = text[len(part):]
if part[0] not in ('<', '&'):
# not a tag or entity, so accent
part = ALPHABETIC_RUN.sub(lambda run: RLO + run.group() + PDF, part)
parts.append(part)
break
return ''.join(parts)
def PseudoRTLMessage(message):
'''Returns a pseudo-RTL (aka Fake-Bidi) translation of the provided message.
Args:
message: tclib.Message()
Return:
tclib.Translation()
'''
transl = tclib.Translation()
for part in message.GetContent():
if isinstance(part, tclib.Placeholder):
transl.AppendPlaceholder(part)
else:
transl.AppendText(PseudoRTLString(part))
return transl

55
third_party/libwebrtc/tools/grit/grit/pseudo_unittest.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,55 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grit.pseudo'''
from __future__ import print_function
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import unittest
from grit import pseudo
from grit import tclib
class PseudoUnittest(unittest.TestCase):
def testVowelMapping(self):
self.failUnless(pseudo.MapVowels('abebibobuby') ==
u'\u00e5b\u00e9b\u00efb\u00f4b\u00fcb\u00fd')
self.failUnless(pseudo.MapVowels('ABEBIBOBUBY') ==
u'\u00c5B\u00c9B\u00cfB\u00d4B\u00dcB\u00dd')
def testPseudoString(self):
out = pseudo.PseudoString('hello')
self.failUnless(out == pseudo.MapVowels(u'hePelloPo', True))
def testConsecutiveVowels(self):
out = pseudo.PseudoString("beautiful weather, ain't it?")
self.failUnless(out == pseudo.MapVowels(
u"beauPeautiPifuPul weaPeathePer, aiPain't iPit?", 1))
def testCapitals(self):
out = pseudo.PseudoString("HOWDIE DOODIE, DR. JONES")
self.failUnless(out == pseudo.MapVowels(
u"HOPOWDIEPIE DOOPOODIEPIE, DR. JOPONEPES", 1))
def testPseudoMessage(self):
msg = tclib.Message(text='Hello USERNAME, how are you?',
placeholders=[
tclib.Placeholder('USERNAME', '%s', 'Joi')])
trans = pseudo.PseudoMessage(msg)
# TODO(joi) It would be nicer if 'you' -> 'youPou' instead of
# 'you' -> 'youPyou' and if we handled the silent e in 'are'
self.failUnless(trans.GetPresentableContent() ==
pseudo.MapVowels(
u'HePelloPo USERNAME, hoPow aParePe youPyou?', 1))
if __name__ == '__main__':
unittest.main()

93
third_party/libwebrtc/tools/grit/grit/shortcuts.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,93 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Stuff to prevent conflicting shortcuts.
'''
from __future__ import print_function
from grit import lazy_re
class ShortcutGroup(object):
'''Manages a list of cliques that belong together in a single shortcut
group. Knows how to detect conflicting shortcut keys.
'''
# Matches shortcut keys, e.g. &J
SHORTCUT_RE = lazy_re.compile('([^&]|^)(&[A-Za-z])')
def __init__(self, name):
self.name = name
# Map of language codes to shortcut keys used (which is a map of
# shortcut keys to counts).
self.keys_by_lang = {}
# List of cliques in this group
self.cliques = []
def AddClique(self, c):
for existing_clique in self.cliques:
if existing_clique.GetId() == c.GetId():
# This happens e.g. when we have e.g.
# <if expr1><structure 1></if> <if expr2><structure 2></if>
# where only one will really be included in the output.
return
self.cliques.append(c)
for (lang, msg) in c.clique.items():
if lang not in self.keys_by_lang:
self.keys_by_lang[lang] = {}
keymap = self.keys_by_lang[lang]
content = msg.GetRealContent()
keys = [groups[1] for groups in self.SHORTCUT_RE.findall(content)]
for key in keys:
key = key.upper()
if key in keymap:
keymap[key] += 1
else:
keymap[key] = 1
def GenerateWarnings(self, tc_project):
# For any language that has more than one occurrence of any shortcut,
# make a list of the conflicting shortcuts.
problem_langs = {}
for (lang, keys) in self.keys_by_lang.items():
for (key, count) in keys.items():
if count > 1:
if lang not in problem_langs:
problem_langs[lang] = []
problem_langs[lang].append(key)
warnings = []
if len(problem_langs):
warnings.append("WARNING - duplicate keys exist in shortcut group %s" %
self.name)
for (lang,keys) in problem_langs.items():
warnings.append(" %6s duplicates: %s" % (lang, ', '.join(keys)))
return warnings
def GenerateDuplicateShortcutsWarnings(uberclique, tc_project):
'''Given an UberClique and a project name, will print out helpful warnings
if there are conflicting shortcuts within shortcut groups in the provided
UberClique.
Args:
uberclique: clique.UberClique()
tc_project: 'MyProjectNameInTheTranslationConsole'
Returns:
['warning line 1', 'warning line 2', ...]
'''
warnings = []
groups = {}
for c in uberclique.AllCliques():
for group in c.shortcut_groups:
if group not in groups:
groups[group] = ShortcutGroup(group)
groups[group].AddClique(c)
for group in groups.values():
warnings += group.GenerateWarnings(tc_project)
return warnings

79
third_party/libwebrtc/tools/grit/grit/shortcuts_unittest.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,79 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grit.shortcuts
'''
from __future__ import print_function
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import unittest
from six import StringIO
from grit import shortcuts
from grit import clique
from grit import tclib
from grit.gather import rc
class ShortcutsUnittest(unittest.TestCase):
def setUp(self):
self.uq = clique.UberClique()
def testFunctionality(self):
c = self.uq.MakeClique(tclib.Message(text="Hello &there"))
c.AddToShortcutGroup('group_name')
c = self.uq.MakeClique(tclib.Message(text="Howdie &there partner"))
c.AddToShortcutGroup('group_name')
warnings = shortcuts.GenerateDuplicateShortcutsWarnings(self.uq, 'PROJECT')
self.failUnless(warnings)
def testAmpersandEscaping(self):
c = self.uq.MakeClique(tclib.Message(text="Hello &there"))
c.AddToShortcutGroup('group_name')
c = self.uq.MakeClique(tclib.Message(text="S&&T are the &letters S and T"))
c.AddToShortcutGroup('group_name')
warnings = shortcuts.GenerateDuplicateShortcutsWarnings(self.uq, 'PROJECT')
self.failUnless(len(warnings) == 0)
def testDialog(self):
dlg = rc.Dialog(StringIO('''\
IDD_SIDEBAR_RSS_PANEL_PROPPAGE DIALOGEX 0, 0, 239, 221
STYLE DS_SETFONT | DS_FIXEDSYS | WS_CHILD
FONT 8, "MS Shell Dlg", 400, 0, 0x1
BEGIN
PUSHBUTTON "Add &URL",IDC_SIDEBAR_RSS_ADD_URL,182,53,57,14
EDITTEXT IDC_SIDEBAR_RSS_NEW_URL,0,53,178,15,ES_AUTOHSCROLL
PUSHBUTTON "&Remove",IDC_SIDEBAR_RSS_REMOVE,183,200,56,14
PUSHBUTTON "&Edit",IDC_SIDEBAR_RSS_EDIT,123,200,56,14
CONTROL "&Automatically add commonly viewed clips",
IDC_SIDEBAR_RSS_AUTO_ADD,"Button",BS_AUTOCHECKBOX |
BS_MULTILINE | WS_TABSTOP,0,200,120,17
PUSHBUTTON "",IDC_SIDEBAR_RSS_HIDDEN,179,208,6,6,NOT WS_VISIBLE
LTEXT "You can display clips from blogs, news sites, and other online sources.",
IDC_STATIC,0,0,239,10
LISTBOX IDC_SIDEBAR_DISPLAYED_FEED_LIST,0,69,239,127,LBS_SORT |
LBS_OWNERDRAWFIXED | LBS_HASSTRINGS |
LBS_NOINTEGRALHEIGHT | WS_VSCROLL | WS_HSCROLL |
WS_TABSTOP
LTEXT "Add a clip from a recently viewed website by clicking Add Recent Clips.",
IDC_STATIC,0,13,141,19
LTEXT "Or, if you know a site supports RSS or Atom, you can enter the RSS or Atom URL below and add it to your list of Web Clips.",
IDC_STATIC,0,33,239,18
PUSHBUTTON "Add Recent &Clips (10)...",
IDC_SIDEBAR_RSS_ADD_RECENT_CLIPS,146,14,93,14
END'''), 'IDD_SIDEBAR_RSS_PANEL_PROPPAGE')
dlg.SetUberClique(self.uq)
dlg.Parse()
warnings = shortcuts.GenerateDuplicateShortcutsWarnings(self.uq, 'PROJECT')
self.failUnless(len(warnings) == 0)

246
third_party/libwebrtc/tools/grit/grit/tclib.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,246 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Adaptation of the extern.tclib classes for our needs.
'''
from __future__ import print_function
import functools
import re
import six
from grit import exception
from grit import lazy_re
import grit.extern.tclib
# Matches whitespace sequences which can be folded into a single whitespace
# character. This matches single characters so that non-spaces are replaced
# with spaces.
_FOLD_WHITESPACE = re.compile(r'\s+')
# Caches compiled regexp used to split tags in BaseMessage.__init__()
_RE_CACHE = {}
def Identity(i):
return i
class BaseMessage(object):
'''Base class with methods shared by Message and Translation.
'''
def __init__(self, text='', placeholders=[], description='', meaning=''):
self.parts = []
self.placeholders = []
self.meaning = meaning
self.dirty = True # True if self.id is (or might be) wrong
self.id = 0
self.SetDescription(description)
if text != '':
if not placeholders or placeholders == []:
self.AppendText(text)
else:
tag_map = {}
for placeholder in placeholders:
tag_map[placeholder.GetPresentation()] = [placeholder, 0]
# This creates a regexp like '(TAG1|TAG2|TAG3)'.
# The tags have to be sorted in order of decreasing length, so that
# longer tags are substituted before shorter tags that happen to be
# substrings of the longer tag.
# E.g. "EXAMPLE_FOO_NAME" must be matched before "EXAMPLE_FOO",
# otherwise "EXAMPLE_FOO" splits "EXAMPLE_FOO_NAME" too.
tags = sorted(tag_map.keys(),
key=functools.cmp_to_key(
lambda x, y: len(x) - len(y) or ((x > y) - (x < y))),
reverse=True)
tag_re = '(' + '|'.join(tags) + ')'
# This caching improves the time to build
# chrome/app:generated_resources from 21.562s to 17.672s on Linux.
compiled_re = _RE_CACHE.get(tag_re, None)
if compiled_re is None:
compiled_re = re.compile(tag_re)
_RE_CACHE[tag_re] = compiled_re
chunked_text = compiled_re.split(text)
for chunk in chunked_text:
if chunk: # ignore empty chunk
if chunk in tag_map:
self.AppendPlaceholder(tag_map[chunk][0])
tag_map[chunk][1] += 1 # increase placeholder use count
else:
self.AppendText(chunk)
for key in tag_map:
assert tag_map[key][1] != 0
def GetRealContent(self, escaping_function=Identity):
'''Returns the original content, i.e. what your application and users
will see.
Specify a function to escape each translateable bit, if you like.
'''
bits = []
for item in self.parts:
if isinstance(item, six.string_types):
bits.append(escaping_function(item))
else:
bits.append(item.GetOriginal())
return ''.join(bits)
def GetPresentableContent(self):
presentable_content = []
for part in self.parts:
if isinstance(part, Placeholder):
presentable_content.append(part.GetPresentation())
else:
presentable_content.append(part)
return ''.join(presentable_content)
def AppendPlaceholder(self, placeholder):
assert isinstance(placeholder, Placeholder)
dup = False
for other in self.GetPlaceholders():
if other.presentation == placeholder.presentation:
assert other.original == placeholder.original
dup = True
if not dup:
self.placeholders.append(placeholder)
self.parts.append(placeholder)
self.dirty = True
def AppendText(self, text):
assert isinstance(text, six.string_types)
assert text != ''
self.parts.append(text)
self.dirty = True
def GetContent(self):
'''Returns the parts of the message. You may modify parts if you wish.
Note that you must not call GetId() on this object until you have finished
modifying the contents.
'''
self.dirty = True # user might modify content
return self.parts
def GetDescription(self):
return self.description
def SetDescription(self, description):
self.description = _FOLD_WHITESPACE.sub(' ', description)
def GetMeaning(self):
return self.meaning
def GetId(self):
if self.dirty:
self.id = self.GenerateId()
self.dirty = False
return self.id
def GenerateId(self):
return grit.extern.tclib.GenerateMessageId(self.GetPresentableContent(),
self.meaning)
def GetPlaceholders(self):
return self.placeholders
def FillTclibBaseMessage(self, msg):
msg.SetDescription(self.description.encode('utf-8'))
for part in self.parts:
if isinstance(part, Placeholder):
ph = grit.extern.tclib.Placeholder(
part.presentation.encode('utf-8'),
part.original.encode('utf-8'),
part.example.encode('utf-8'))
msg.AppendPlaceholder(ph)
else:
msg.AppendText(part.encode('utf-8'))
class Message(BaseMessage):
'''A message.'''
def __init__(self, text='', placeholders=[], description='', meaning='',
assigned_id=None):
super(Message, self).__init__(text, placeholders, description, meaning)
self.assigned_id = assigned_id
def ToTclibMessage(self):
msg = grit.extern.tclib.Message('utf-8', meaning=self.meaning)
self.FillTclibBaseMessage(msg)
return msg
def GetId(self):
'''Use the assigned id if we have one.'''
if self.assigned_id:
return self.assigned_id
return super(Message, self).GetId()
def HasAssignedId(self):
'''Returns True if this message has an assigned id.'''
return bool(self.assigned_id)
class Translation(BaseMessage):
'''A translation.'''
def __init__(self, text='', id='', placeholders=[], description='', meaning=''):
super(Translation, self).__init__(text, placeholders, description, meaning)
self.id = id
def GetId(self):
assert id != '', "ID has not been set."
return self.id
def SetId(self, id):
self.id = id
def ToTclibMessage(self):
msg = grit.extern.tclib.Message(
'utf-8', id=self.id, meaning=self.meaning)
self.FillTclibBaseMessage(msg)
return msg
class Placeholder(grit.extern.tclib.Placeholder):
'''Modifies constructor to accept a Unicode string
'''
# Must match placeholder presentation names
_NAME_RE = lazy_re.compile('^[A-Za-z0-9_]+$')
def __init__(self, presentation, original, example):
'''Creates a new placeholder.
Args:
presentation: 'USERNAME'
original: '%s'
example: 'Joi'
'''
assert presentation != ''
assert original != ''
assert example != ''
if not self._NAME_RE.match(presentation):
raise exception.InvalidPlaceholderName(presentation)
self.presentation = presentation
self.original = original
self.example = example
def GetPresentation(self):
return self.presentation
def GetOriginal(self):
return self.original
def GetExample(self):
return self.example

180
third_party/libwebrtc/tools/grit/grit/tclib_unittest.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,180 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grit.tclib'''
from __future__ import print_function
import sys
import os.path
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import unittest
import six
from grit import tclib
from grit import exception
import grit.extern.tclib
class TclibUnittest(unittest.TestCase):
def testInit(self):
msg = tclib.Message(text=u'Hello Earthlings',
description='Greetings\n\t message')
self.failUnlessEqual(msg.GetPresentableContent(), 'Hello Earthlings')
self.failUnless(isinstance(msg.GetPresentableContent(), six.string_types))
self.failUnlessEqual(msg.GetDescription(), 'Greetings message')
def testGetAttr(self):
msg = tclib.Message()
msg.AppendText(u'Hello') # Tests __getattr__
self.failUnless(msg.GetPresentableContent() == 'Hello')
self.failUnless(isinstance(msg.GetPresentableContent(), six.string_types))
def testAll(self):
text = u'Howdie USERNAME'
phs = [tclib.Placeholder(u'USERNAME', u'%s', 'Joi')]
msg = tclib.Message(text=text, placeholders=phs)
self.failUnless(msg.GetPresentableContent() == 'Howdie USERNAME')
trans = tclib.Translation(text=text, placeholders=phs)
self.failUnless(trans.GetPresentableContent() == 'Howdie USERNAME')
self.failUnless(isinstance(trans.GetPresentableContent(), six.string_types))
def testUnicodeReturn(self):
text = u'\u00fe'
msg = tclib.Message(text=text)
self.failUnless(msg.GetPresentableContent() == text)
from_list = msg.GetContent()[0]
self.failUnless(from_list == text)
def testRegressionTranslationInherited(self):
'''Regression tests a bug that was caused by grit.tclib.Translation
inheriting from the translation console's Translation object
instead of only owning an instance of it.
'''
msg = tclib.Message(text=u"BLA1\r\nFrom: BLA2 \u00fe BLA3",
placeholders=[
tclib.Placeholder('BLA1', '%s', '%s'),
tclib.Placeholder('BLA2', '%s', '%s'),
tclib.Placeholder('BLA3', '%s', '%s')])
transl = tclib.Translation(text=msg.GetPresentableContent(),
placeholders=msg.GetPlaceholders())
content = transl.GetContent()
self.failUnless(isinstance(content[3], six.string_types))
def testFingerprint(self):
# This has Windows line endings. That is on purpose.
id = grit.extern.tclib.GenerateMessageId(
'Google Desktop for Enterprise\r\n'
'All Rights Reserved\r\n'
'\r\n'
'---------\r\n'
'Contents\r\n'
'---------\r\n'
'This distribution contains the following files:\r\n'
'\r\n'
'GoogleDesktopSetup.msi - Installation and setup program\r\n'
'GoogleDesktop.adm - Group Policy administrative template file\r\n'
'AdminGuide.pdf - Google Desktop for Enterprise administrative guide\r\n'
'\r\n'
'\r\n'
'--------------\r\n'
'Documentation\r\n'
'--------------\r\n'
'Full documentation and installation instructions are in the \r\n'
'administrative guide, and also online at \r\n'
'http://desktop.google.com/enterprise/adminguide.html.\r\n'
'\r\n'
'\r\n'
'------------------------\r\n'
'IBM Lotus Notes Plug-In\r\n'
'------------------------\r\n'
'The Lotus Notes plug-in is included in the release of Google \r\n'
'Desktop for Enterprise. The IBM Lotus Notes Plug-in for Google \r\n'
'Desktop indexes mail, calendar, task, contact and journal \r\n'
'documents from Notes. Discussion documents including those from \r\n'
'the discussion and team room templates can also be indexed by \r\n'
'selecting an option from the preferences. Once indexed, this data\r\n'
'will be returned in Google Desktop searches. The corresponding\r\n'
'document can be opened in Lotus Notes from the Google Desktop \r\n'
'results page.\r\n'
'\r\n'
'Install: The plug-in will install automatically during the Google \r\n'
'Desktop setup process if Lotus Notes is already installed. Lotus \r\n'
'Notes must not be running in order for the install to occur. \r\n'
'\r\n'
'Preferences: Preferences and selection of databases to index are\r\n'
'set in the \'Google Desktop for Notes\' dialog reached through the \r\n'
'\'Actions\' menu.\r\n'
'\r\n'
'Reindexing: Selecting \'Reindex all databases\' will index all the \r\n'
'documents in each database again.\r\n'
'\r\n'
'\r\n'
'Notes Plug-in Known Issues\r\n'
'---------------------------\r\n'
'\r\n'
'If the \'Google Desktop for Notes\' item is not available from the \r\n'
'Lotus Notes Actions menu, then installation was not successful. \r\n'
'Installation consists of writing one file, notesgdsplugin.dll, to \r\n'
'the Notes application directory and a setting to the notes.ini \r\n'
'configuration file. The most likely cause of an unsuccessful \r\n'
'installation is that the installer was not able to locate the \r\n'
'notes.ini file. Installation will complete if the user closes Notes\r\n'
'and manually adds the following setting to this file on a new line:\r\n'
'AddinMenus=notegdsplugin.dll\r\n'
'\r\n'
'If the notesgdsplugin.dll file is not in the application directory\r\n'
r'(e.g., C:\Program Files\Lotus\Notes) after Google Desktop \r\n'
'installation, it is likely that Notes was not installed correctly. \r\n'
'\r\n'
'Only local databases can be indexed. If they can be determined, \r\n'
'the user\'s local mail file and address book will be included in the\r\n'
'list automatically. Mail archives and other databases must be \r\n'
'added with the \'Add\' button.\r\n'
'\r\n'
'Some users may experience performance issues during the initial \r\n'
'indexing of a database. The \'Perform the initial index of a \r\n'
'database only when I\'m idle\' option will limit the indexing process\r\n'
'to times when the user is not using the machine. If this does not \r\n'
'alleviate the problem or the user would like to continually index \r\n'
'but just do so more slowly or quickly, the GoogleWaitTime notes.ini\r\n'
'value can be set. Increasing the GoogleWaitTime value will slow \r\n'
'down the indexing process, and lowering the value will speed it up.\r\n'
'A value of zero causes the fastest possible indexing. Removing the\r\n'
'ini parameter altogether returns it to the default (20).\r\n'
'\r\n'
'Crashes have been known to occur with certain types of history \r\n'
'bookmarks. If the Notes client seems to crash randomly, try \r\n'
'disabling the \'Index note history\' option. If it crashes before,\r\n'
'you can get to the preferences, add the following line to your \r\n'
'notes.ini file:\r\n'
'GDSNoIndexHistory=1\r\n')
self.assertEqual(id, '7660964495923572726')
def testPlaceholderNameChecking(self):
try:
ph = tclib.Placeholder('BINGO BONGO', 'bla', 'bla')
raise Exception("We shouldn't get here")
except exception.InvalidPlaceholderName:
pass # Expect exception to be thrown because presentation contained space
def testTagsWithCommonSubstring(self):
word = 'ABCDEFGHIJ'
text = ' '.join([word[:i] for i in range(1, 11)])
phs = [tclib.Placeholder(word[:i], str(i), str(i)) for i in range(1, 11)]
try:
msg = tclib.Message(text=text, placeholders=phs)
self.failUnless(msg.GetRealContent() == '1 2 3 4 5 6 7 8 9 10')
except:
self.fail('tclib.Message() should handle placeholders that are '
'substrings of each other')
if __name__ == '__main__':
unittest.main()

34
third_party/libwebrtc/tools/grit/grit/test_suite_all.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,34 @@
#!/usr/bin/env python3
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit test suite that collects all test cases for GRIT.'''
from __future__ import print_function
import os
import sys
CUR_DIR = os.path.dirname(os.path.realpath(__file__))
SRC_DIR = os.path.dirname(os.path.dirname(os.path.dirname(CUR_DIR)))
TYP_DIR = os.path.join(
SRC_DIR, 'third_party', 'catapult', 'third_party', 'typ')
if TYP_DIR not in sys.path:
sys.path.insert(0, TYP_DIR)
import typ # pylint: disable=import-error,unused-import
def main(args):
return typ.main(
top_level_dirs=[os.path.join(CUR_DIR, '..')],
skip=['grit.format.gen_predetermined_ids_unittest.*',
'grit.pseudo_unittest.*']
)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))

945
third_party/libwebrtc/tools/grit/grit/testdata/GoogleDesktop.adm поставляемый Normal file
Просмотреть файл

@ -0,0 +1,945 @@
CLASS MACHINE
CATEGORY !!Cat_Google
CATEGORY !!Cat_GoogleDesktopSearch
KEYNAME "Software\Policies\Google\Google Desktop"
CATEGORY !!Cat_Preferences
KEYNAME "Software\Policies\Google\Google Desktop\Preferences"
CATEGORY !!Cat_IndexAndCaptureControl
POLICY !!Blacklist_Email
EXPLAIN !!Explain_Blacklist_Email
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-1"
VALUENAME "1"
END POLICY
POLICY !!Blacklist_Gmail
EXPLAIN !!Explain_Blacklist_Gmail
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-pop"
VALUENAME "gmail"
END POLICY
POLICY !!Blacklist_WebHistory
EXPLAIN !!Explain_Blacklist_WebHistory
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-1"
VALUENAME "2"
END POLICY
POLICY !!Blacklist_Chat
EXPLAIN !!Explain_Blacklist_Chat
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-1"
ACTIONLISTON
VALUENAME "3" VALUE NUMERIC 1
END ACTIONLISTON
END POLICY
POLICY !!Blacklist_Text
EXPLAIN !!Explain_Blacklist_Text
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-1"
ACTIONLISTON
VALUENAME "4" VALUE NUMERIC 1
END ACTIONLISTON
END POLICY
POLICY !!Blacklist_Media
EXPLAIN !!Explain_Blacklist_Media
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-1"
ACTIONLISTON
VALUENAME "5" VALUE NUMERIC 1
END ACTIONLISTON
END POLICY
POLICY !!Blacklist_Contact
EXPLAIN !!Explain_Blacklist_Contact
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-1"
ACTIONLISTON
VALUENAME "9" VALUE NUMERIC 1
END ACTIONLISTON
END POLICY
POLICY !!Blacklist_Calendar
EXPLAIN !!Explain_Blacklist_Calendar
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-1"
ACTIONLISTON
VALUENAME "10" VALUE NUMERIC 1
END ACTIONLISTON
END POLICY
POLICY !!Blacklist_Task
EXPLAIN !!Explain_Blacklist_Task
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-1"
ACTIONLISTON
VALUENAME "11" VALUE NUMERIC 1
END ACTIONLISTON
END POLICY
POLICY !!Blacklist_Note
EXPLAIN !!Explain_Blacklist_Note
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-1"
ACTIONLISTON
VALUENAME "12" VALUE NUMERIC 1
END ACTIONLISTON
END POLICY
POLICY !!Blacklist_Journal
EXPLAIN !!Explain_Blacklist_Journal
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-1"
ACTIONLISTON
VALUENAME "13" VALUE NUMERIC 1
END ACTIONLISTON
END POLICY
POLICY !!Blacklist_Word
EXPLAIN !!Explain_Blacklist_Word
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-2"
VALUENAME "DOC"
END POLICY
POLICY !!Blacklist_Excel
EXPLAIN !!Explain_Blacklist_Excel
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-2"
VALUENAME "XLS"
END POLICY
POLICY !!Blacklist_Powerpoint
EXPLAIN !!Explain_Blacklist_Powerpoint
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-2"
VALUENAME "PPT"
END POLICY
POLICY !!Blacklist_PDF
EXPLAIN !!Explain_Blacklist_PDF
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-2"
VALUENAME "PDF"
END POLICY
POLICY !!Blacklist_ZIP
EXPLAIN !!Explain_Blacklist_ZIP
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-2"
VALUENAME "ZIP"
END POLICY
POLICY !!Blacklist_HTTPS
EXPLAIN !!Explain_Blacklist_HTTPS
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-3"
VALUENAME "HTTPS"
END POLICY
POLICY !!Blacklist_PasswordProtectedOffice
EXPLAIN !!Explain_Blacklist_PasswordProtectedOffice
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-13"
VALUENAME "SECUREOFFICE"
END POLICY
POLICY !!Blacklist_URI_Contains
EXPLAIN !!Explain_Blacklist_URI_Contains
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-6"
PART !!Blacklist_URI_Contains LISTBOX
END PART
END POLICY
POLICY !!Blacklist_Extensions
EXPLAIN !!Explain_Blacklist_Extensions
PART !!Blacklist_Extensions EDITTEXT
VALUENAME "file_extensions_to_skip"
END PART
END POLICY
POLICY !!Pol_Disallow_UserSearchLocations
EXPLAIN !!Explain_Disallow_UserSearchLocations
VALUENAME user_search_locations
VALUEON NUMERIC 1
END POLICY
POLICY !!Pol_Search_Location_Whitelist
EXPLAIN !!Explain_Search_Location_Whitelist
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\policy_search_location_whitelist"
PART !!Search_Locations_Whitelist LISTBOX
END PART
END POLICY
POLICY !!Email_Retention
EXPLAIN !!Explain_Email_Retention
PART !!Email_Retention_Edit NUMERIC
VALUENAME "email_days_to_retain"
MIN 1 MAX 65535 DEFAULT 30 SPIN 1
END PART
END POLICY
POLICY !!Webpage_Retention
EXPLAIN !!Explain_Webpage_Retention
PART !!Webpage_Retention_Edit NUMERIC
VALUENAME "webpage_days_to_retain"
MIN 1 MAX 65535 DEFAULT 30 SPIN 1
END PART
END POLICY
POLICY !!File_Retention
EXPLAIN !!Explain_File_Retention
PART !!File_Retention_Edit NUMERIC
VALUENAME "file_days_to_retain"
MIN 1 MAX 65535 DEFAULT 30 SPIN 1
END PART
END POLICY
POLICY !!IM_Retention
EXPLAIN !!Explain_IM_Retention
PART !!IM_Retention_Edit NUMERIC
VALUENAME "im_days_to_retain"
MIN 1 MAX 65535 DEFAULT 30 SPIN 1
END PART
END POLICY
POLICY !!Pol_Remove_Deleted_Items
EXPLAIN !!Explain_Remove_Deleted_Items
VALUENAME remove_deleted_items
VALUEON NUMERIC 1
END POLICY
POLICY !!Pol_Allow_Simultaneous_Indexing
EXPLAIN !!Explain_Allow_Simultaneous_Indexing
VALUENAME simultaneous_indexing
VALUEON NUMERIC 1
END POLICY
END CATEGORY
POLICY !!Pol_TurnOffAdvancedFeatures
EXPLAIN !!Explain_TurnOffAdvancedFeatures
VALUENAME error_report_on
VALUEON NUMERIC 0
END POLICY
POLICY !!Pol_TurnOffImproveGd
EXPLAIN !!Explain_TurnOffImproveGd
VALUENAME improve_gd
VALUEON NUMERIC 0
VALUEOFF NUMERIC 1
END POLICY
POLICY !!Pol_NoPersonalizationInfo
EXPLAIN !!Explain_NoPersonalizationInfo
VALUENAME send_personalization_info
VALUEON NUMERIC 0
VALUEOFF NUMERIC 1
END POLICY
POLICY !!Pol_OneBoxMode
EXPLAIN !!Explain_OneBoxMode
VALUENAME onebox_mode
VALUEON NUMERIC 0
END POLICY
POLICY !!Pol_EncryptIndex
EXPLAIN !!Explain_EncryptIndex
VALUENAME encrypt_index
VALUEON NUMERIC 1
END POLICY
POLICY !!Pol_Hyper
EXPLAIN !!Explain_Hyper
VALUENAME hyper_off
VALUEON NUMERIC 1
END POLICY
POLICY !!Pol_Display_Mode
EXPLAIN !!Explain_Display_Mode
PART !!Pol_Display_Mode DROPDOWNLIST
VALUENAME display_mode
ITEMLIST
NAME !!Sidebar VALUE NUMERIC 1
NAME !!Deskbar VALUE NUMERIC 8
NAME !!FloatingDeskbar VALUE NUMERIC 4
NAME !!None VALUE NUMERIC 0
END ITEMLIST
END PART
END POLICY
END CATEGORY ; Preferences
CATEGORY !!Cat_Enterprise
KEYNAME "Software\Policies\Google\Google Desktop\Enterprise"
POLICY !!Pol_Autoupdate
EXPLAIN !!Explain_Autoupdate
VALUENAME autoupdate_host
VALUEON ""
END POLICY
POLICY !!Pol_AutoupdateAsSystem
EXPLAIN !!Explain_AutoupdateAsSystem
VALUENAME autoupdate_impersonate_user
VALUEON NUMERIC 0
VALUEOFF NUMERIC 1
END POLICY
POLICY !!Pol_EnterpriseTab
EXPLAIN !!Explain_EnterpriseTab
PART !!EnterpriseTabText EDITTEXT
VALUENAME enterprise_tab_text
END PART
PART !!EnterpriseTabHomepage EDITTEXT
VALUENAME enterprise_tab_homepage
END PART
PART !!EnterpriseTabHomepageQuery CHECKBOX
VALUENAME enterprise_tab_homepage_query
END PART
PART !!EnterpriseTabResults EDITTEXT
VALUENAME enterprise_tab_results
END PART
PART !!EnterpriseTabResultsQuery CHECKBOX
VALUENAME enterprise_tab_results_query
END PART
END POLICY
POLICY !!Pol_GSAHosts
EXPLAIN !!Explain_GSAHosts
KEYNAME "Software\Policies\Google\Google Desktop\Enterprise\GSAHosts"
PART !!Pol_GSAHosts LISTBOX
END PART
END POLICY
POLICY !!Pol_PolicyUnawareClientProhibitedFlag
EXPLAIN !!Explain_PolicyUnawareClientProhibitedFlag
KEYNAME "Software\Policies\Google\Google Desktop"
VALUENAME PolicyUnawareClientProhibitedFlag
END POLICY
POLICY !!Pol_MinimumAllowedVersion
EXPLAIN !!Explain_MinimumAllowedVersion
PART !!Pol_MinimumAllowedVersion EDITTEXT
VALUENAME minimum_allowed_version
END PART
END POLICY
POLICY !!Pol_MaximumAllowedVersion
EXPLAIN !!Explain_MaximumAllowedVersion
PART !!Pol_MaximumAllowedVersion EDITTEXT
VALUENAME maximum_allowed_version
END PART
END POLICY
POLICY !!Pol_Disallow_Gadgets
EXPLAIN !!Explain_Disallow_Gadgets
VALUENAME disallow_gadgets
VALUEON NUMERIC 1
PART !!Disallow_Only_Non_Builtin_Gadgets CHECKBOX DEFCHECKED
VALUENAME disallow_only_non_builtin_gadgets
VALUEON NUMERIC 1
VALUEOFF NUMERIC 0
END PART
END POLICY
POLICY !!Pol_Gadget_Whitelist
EXPLAIN !!Explain_Gadget_Whitelist
KEYNAME "Software\Policies\Google\Google Desktop\Enterprise\gadget_whitelist"
PART !!Pol_Gadget_Whitelist LISTBOX
END PART
END POLICY
POLICY !!Pol_Gadget_Install_Confirmation_Whitelist
EXPLAIN !!Explain_Gadget_Install_Confirmation_Whitelist
KEYNAME "Software\Policies\Google\Google Desktop\Enterprise\install_confirmation_whitelist"
PART !!Pol_Gadget_Install_Confirmation_Whitelist LISTBOX
END PART
END POLICY
POLICY !!Pol_Alternate_User_Data_Dir
EXPLAIN !!Explain_Alternate_User_Data_Dir
PART !!Pol_Alternate_User_Data_Dir EDITTEXT
VALUENAME alternate_user_data_dir
END PART
END POLICY
POLICY !!Pol_MaxAllowedOutlookConnections
EXPLAIN !!Explain_MaxAllowedOutlookConnections
PART !!Pol_MaxAllowedOutlookConnections NUMERIC
VALUENAME max_allowed_outlook_connections
MIN 1 MAX 65535 DEFAULT 400 SPIN 1
END PART
END POLICY
POLICY !!Pol_DisallowSsdService
EXPLAIN !!Explain_DisallowSsdService
VALUENAME disallow_ssd_service
VALUEON NUMERIC 1
END POLICY
POLICY !!Pol_DisallowSsdOutbound
EXPLAIN !!Explain_DisallowSsdOutbound
VALUENAME disallow_ssd_outbound
VALUEON NUMERIC 1
END POLICY
POLICY !!Pol_Disallow_Store_Gadget_Service
EXPLAIN !!Explain_Disallow_Store_Gadget_Service
VALUENAME disallow_store_gadget_service
VALUEON NUMERIC 1
END POLICY
POLICY !!Pol_MaxExchangeIndexingRate
EXPLAIN !!Explain_MaxExchangeIndexingRate
PART !!Pol_MaxExchangeIndexingRate NUMERIC
VALUENAME max_exchange_indexing_rate
MIN 1 MAX 1000 DEFAULT 60 SPIN 1
END PART
END POLICY
POLICY !!Pol_EnableSafeweb
EXPLAIN !!Explain_Safeweb
VALUENAME safe_browsing
VALUEON NUMERIC 1
VALUEOFF NUMERIC 0
END POLICY
END CATEGORY ; Enterprise
END CATEGORY ; GoogleDesktopSearch
END CATEGORY ; Google
CLASS USER
CATEGORY !!Cat_Google
CATEGORY !!Cat_GoogleDesktopSearch
KEYNAME "Software\Policies\Google\Google Desktop"
CATEGORY !!Cat_Preferences
KEYNAME "Software\Policies\Google\Google Desktop\Preferences"
CATEGORY !!Cat_IndexAndCaptureControl
POLICY !!Blacklist_Email
EXPLAIN !!Explain_Blacklist_Email
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-1"
VALUENAME "1"
END POLICY
POLICY !!Blacklist_Gmail
EXPLAIN !!Explain_Blacklist_Gmail
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-pop"
VALUENAME "gmail"
END POLICY
POLICY !!Blacklist_WebHistory
EXPLAIN !!Explain_Blacklist_WebHistory
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-1"
VALUENAME "2"
END POLICY
POLICY !!Blacklist_Chat
EXPLAIN !!Explain_Blacklist_Chat
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-1"
ACTIONLISTON
VALUENAME "3" VALUE NUMERIC 1
END ACTIONLISTON
END POLICY
POLICY !!Blacklist_Text
EXPLAIN !!Explain_Blacklist_Text
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-1"
ACTIONLISTON
VALUENAME "4" VALUE NUMERIC 1
END ACTIONLISTON
END POLICY
POLICY !!Blacklist_Media
EXPLAIN !!Explain_Blacklist_Media
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-1"
ACTIONLISTON
VALUENAME "5" VALUE NUMERIC 1
END ACTIONLISTON
END POLICY
POLICY !!Blacklist_Contact
EXPLAIN !!Explain_Blacklist_Contact
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-1"
ACTIONLISTON
VALUENAME "9" VALUE NUMERIC 1
END ACTIONLISTON
END POLICY
POLICY !!Blacklist_Calendar
EXPLAIN !!Explain_Blacklist_Calendar
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-1"
ACTIONLISTON
VALUENAME "10" VALUE NUMERIC 1
END ACTIONLISTON
END POLICY
POLICY !!Blacklist_Task
EXPLAIN !!Explain_Blacklist_Task
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-1"
ACTIONLISTON
VALUENAME "11" VALUE NUMERIC 1
END ACTIONLISTON
END POLICY
POLICY !!Blacklist_Note
EXPLAIN !!Explain_Blacklist_Note
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-1"
ACTIONLISTON
VALUENAME "12" VALUE NUMERIC 1
END ACTIONLISTON
END POLICY
POLICY !!Blacklist_Journal
EXPLAIN !!Explain_Blacklist_Journal
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-1"
ACTIONLISTON
VALUENAME "13" VALUE NUMERIC 1
END ACTIONLISTON
END POLICY
POLICY !!Blacklist_Word
EXPLAIN !!Explain_Blacklist_Word
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-2"
VALUENAME "DOC"
END POLICY
POLICY !!Blacklist_Excel
EXPLAIN !!Explain_Blacklist_Excel
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-2"
VALUENAME "XLS"
END POLICY
POLICY !!Blacklist_Powerpoint
EXPLAIN !!Explain_Blacklist_Powerpoint
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-2"
VALUENAME "PPT"
END POLICY
POLICY !!Blacklist_PDF
EXPLAIN !!Explain_Blacklist_PDF
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-2"
VALUENAME "PDF"
END POLICY
POLICY !!Blacklist_ZIP
EXPLAIN !!Explain_Blacklist_ZIP
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-2"
VALUENAME "ZIP"
END POLICY
POLICY !!Blacklist_HTTPS
EXPLAIN !!Explain_Blacklist_HTTPS
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-3"
VALUENAME "HTTPS"
END POLICY
POLICY !!Blacklist_PasswordProtectedOffice
EXPLAIN !!Explain_Blacklist_PasswordProtectedOffice
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-13"
VALUENAME "SECUREOFFICE"
END POLICY
POLICY !!Blacklist_URI_Contains
EXPLAIN !!Explain_Blacklist_URI_Contains
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\blacklist-6"
PART !!Blacklist_URI_Contains LISTBOX
END PART
END POLICY
POLICY !!Blacklist_Extensions
EXPLAIN !!Explain_Blacklist_Extensions
PART !!Blacklist_Extensions EDITTEXT
VALUENAME "file_extensions_to_skip"
END PART
END POLICY
POLICY !!Pol_Disallow_UserSearchLocations
EXPLAIN !!Explain_Disallow_UserSearchLocations
VALUENAME user_search_locations
VALUEON NUMERIC 1
END POLICY
POLICY !!Pol_Search_Location_Whitelist
EXPLAIN !!Explain_Search_Location_Whitelist
KEYNAME "Software\Policies\Google\Google Desktop\Preferences\policy_search_location_whitelist"
PART !!Search_Locations_Whitelist LISTBOX
END PART
END POLICY
POLICY !!Email_Retention
EXPLAIN !!Explain_Email_Retention
PART !!Email_Retention_Edit NUMERIC
VALUENAME "email_days_to_retain"
MIN 1 MAX 65535 DEFAULT 30 SPIN 1
END PART
END POLICY
POLICY !!Webpage_Retention
EXPLAIN !!Explain_Webpage_Retention
PART !!Webpage_Retention_Edit NUMERIC
VALUENAME "webpage_days_to_retain"
MIN 1 MAX 65535 DEFAULT 30 SPIN 1
END PART
END POLICY
POLICY !!File_Retention
EXPLAIN !!Explain_File_Retention
PART !!File_Retention_Edit NUMERIC
VALUENAME "file_days_to_retain"
MIN 1 MAX 65535 DEFAULT 30 SPIN 1
END PART
END POLICY
POLICY !!IM_Retention
EXPLAIN !!Explain_IM_Retention
PART !!IM_Retention_Edit NUMERIC
VALUENAME "im_days_to_retain"
MIN 1 MAX 65535 DEFAULT 30 SPIN 1
END PART
END POLICY
POLICY !!Pol_Remove_Deleted_Items
EXPLAIN !!Explain_Remove_Deleted_Items
VALUENAME remove_deleted_items
VALUEON NUMERIC 1
END POLICY
POLICY !!Pol_Allow_Simultaneous_Indexing
EXPLAIN !!Explain_Allow_Simultaneous_Indexing
VALUENAME simultaneous_indexing
VALUEON NUMERIC 1
END POLICY
END CATEGORY
POLICY !!Pol_TurnOffAdvancedFeatures
EXPLAIN !!Explain_TurnOffAdvancedFeatures
VALUENAME error_report_on
VALUEON NUMERIC 0
END POLICY
POLICY !!Pol_TurnOffImproveGd
EXPLAIN !!Explain_TurnOffImproveGd
VALUENAME improve_gd
VALUEON NUMERIC 0
VALUEOFF NUMERIC 1
END POLICY
POLICY !!Pol_NoPersonalizationInfo
EXPLAIN !!Explain_NoPersonalizationInfo
VALUENAME send_personalization_info
VALUEON NUMERIC 0
VALUEOFF NUMERIC 1
END POLICY
POLICY !!Pol_OneBoxMode
EXPLAIN !!Explain_OneBoxMode
VALUENAME onebox_mode
VALUEON NUMERIC 0
END POLICY
POLICY !!Pol_EncryptIndex
EXPLAIN !!Explain_EncryptIndex
VALUENAME encrypt_index
VALUEON NUMERIC 1
END POLICY
POLICY !!Pol_Hyper
EXPLAIN !!Explain_Hyper
VALUENAME hyper_off
VALUEON NUMERIC 1
END POLICY
POLICY !!Pol_Display_Mode
EXPLAIN !!Explain_Display_Mode
PART !!Pol_Display_Mode DROPDOWNLIST
VALUENAME display_mode
ITEMLIST
NAME !!Sidebar VALUE NUMERIC 1
NAME !!Deskbar VALUE NUMERIC 8
NAME !!FloatingDeskbar VALUE NUMERIC 4
NAME !!None VALUE NUMERIC 0
END ITEMLIST
END PART
END POLICY
END CATEGORY ; Preferences
CATEGORY !!Cat_Enterprise
KEYNAME "Software\Policies\Google\Google Desktop\Enterprise"
POLICY !!Pol_Autoupdate
EXPLAIN !!Explain_Autoupdate
VALUENAME autoupdate_host
VALUEON ""
END POLICY
POLICY !!Pol_AutoupdateAsSystem
EXPLAIN !!Explain_AutoupdateAsSystem
VALUENAME autoupdate_impersonate_user
VALUEON NUMERIC 0
VALUEOFF NUMERIC 1
END POLICY
POLICY !!Pol_EnterpriseTab
EXPLAIN !!Explain_EnterpriseTab
PART !!EnterpriseTabText EDITTEXT
VALUENAME enterprise_tab_text
END PART
PART !!EnterpriseTabHomepage EDITTEXT
VALUENAME enterprise_tab_homepage
END PART
PART !!EnterpriseTabHomepageQuery CHECKBOX
VALUENAME enterprise_tab_homepage_query
END PART
PART !!EnterpriseTabResults EDITTEXT
VALUENAME enterprise_tab_results
END PART
PART !!EnterpriseTabResultsQuery CHECKBOX
VALUENAME enterprise_tab_results_query
END PART
END POLICY
POLICY !!Pol_GSAHosts
EXPLAIN !!Explain_GSAHosts
KEYNAME "Software\Policies\Google\Google Desktop\Enterprise\GSAHosts"
PART !!Pol_GSAHosts LISTBOX
END PART
END POLICY
POLICY !!Pol_Disallow_Gadgets
EXPLAIN !!Explain_Disallow_Gadgets
VALUENAME disallow_gadgets
VALUEON NUMERIC 1
PART !!Disallow_Only_Non_Builtin_Gadgets CHECKBOX DEFCHECKED
VALUENAME disallow_only_non_builtin_gadgets
VALUEON NUMERIC 1
VALUEOFF NUMERIC 0
END PART
END POLICY
POLICY !!Pol_Gadget_Whitelist
EXPLAIN !!Explain_Gadget_Whitelist
KEYNAME "Software\Policies\Google\Google Desktop\Enterprise\gadget_whitelist"
PART !!Pol_Gadget_Whitelist LISTBOX
END PART
END POLICY
POLICY !!Pol_Gadget_Install_Confirmation_Whitelist
EXPLAIN !!Explain_Gadget_Install_Confirmation_Whitelist
KEYNAME "Software\Policies\Google\Google Desktop\Enterprise\install_confirmation_whitelist"
PART !!Pol_Gadget_Install_Confirmation_Whitelist LISTBOX
END PART
END POLICY
POLICY !!Pol_Alternate_User_Data_Dir
EXPLAIN !!Explain_Alternate_User_Data_Dir
PART !!Pol_Alternate_User_Data_Dir EDITTEXT
VALUENAME alternate_user_data_dir
END PART
END POLICY
POLICY !!Pol_MaxAllowedOutlookConnections
EXPLAIN !!Explain_MaxAllowedOutlookConnections
PART !!Pol_MaxAllowedOutlookConnections NUMERIC
VALUENAME max_allowed_outlook_connections
MIN 1 MAX 65535 DEFAULT 400 SPIN 1
END PART
END POLICY
POLICY !!Pol_DisallowSsdService
EXPLAIN !!Explain_DisallowSsdService
VALUENAME disallow_ssd_service
VALUEON NUMERIC 1
END POLICY
POLICY !!Pol_DisallowSsdOutbound
EXPLAIN !!Explain_DisallowSsdOutbound
VALUENAME disallow_ssd_outbound
VALUEON NUMERIC 1
END POLICY
POLICY !!Pol_Disallow_Store_Gadget_Service
EXPLAIN !!Explain_Disallow_Store_Gadget_Service
VALUENAME disallow_store_gadget_service
VALUEON NUMERIC 1
END POLICY
POLICY !!Pol_MaxExchangeIndexingRate
EXPLAIN !!Explain_MaxExchangeIndexingRate
PART !!Pol_MaxExchangeIndexingRate NUMERIC
VALUENAME max_exchange_indexing_rate
MIN 1 MAX 1000 DEFAULT 60 SPIN 1
END PART
END POLICY
POLICY !!Pol_EnableSafeweb
EXPLAIN !!Explain_Safeweb
VALUENAME safe_browsing
VALUEON NUMERIC 1
VALUEOFF NUMERIC 0
END POLICY
END CATEGORY ; Enterprise
END CATEGORY ; GoogleDesktopSearch
END CATEGORY ; Google
;------------------------------------------------------------------------------
[strings]
Cat_Google="Google"
Cat_GoogleDesktopSearch="Google Desktop"
;------------------------------------------------------------------------------
; Preferences
;------------------------------------------------------------------------------
Cat_Preferences="Preferences"
Explain_Preferences="Controls Google Desktop preferences"
Cat_IndexAndCaptureControl="Indexing and Capture Control"
Explain_IndexAndCaptureControl="Controls what files, web pages, and other content will be indexed by Google Desktop."
Blacklist_Email="Prevent indexing of email"
Explain_Blacklist_Email="Enabling this policy will prevent Google Desktop from indexing emails.\n\nIf this policy is not configured, the user can choose whether or not to index emails."
Blacklist_Gmail="Prevent indexing of Gmail"
Explain_Blacklist_Gmail="Enabling this policy prevents Google Desktop from indexing Gmail messages.\n\nThis policy is in effect only when the policy "Prevent indexing of email" is disabled. When that policy is enabled, all email indexing is disabled, including Gmail indexing.\n\nIf both this policy and "Prevent indexing of email" are disabled or not configured, a user can choose whether or not to index Gmail messages."
Blacklist_WebHistory="Prevent indexing of web pages"
Explain_Blacklist_WebHistory="Enabling this policy will prevent Google Desktop from indexing web pages.\n\nIf this policy is disabled or not configured, the user can choose whether or not to index web pages."
Blacklist_Text="Prevent indexing of text files"
Explain_Blacklist_Text="Enabling this policy will prevent Google Desktop from indexing text files.\n\nIf this policy is disabled or not configured, the user can choose whether or not to index text files."
Blacklist_Media="Prevent indexing of media files"
Explain_Blacklist_Media="Enabling this policy will prevent Google Desktop from indexing media files.\n\nIf this policy is disabled or not configured, the user can choose whether or not to index media files."
Blacklist_Contact="Prevent indexing of contacts"
Explain_Blacklist_Contact="Enabling this policy will prevent Google Desktop from indexing contacts.\n\nIf this policy is disabled or not configured, the user can choose whether or not to index contacts."
Blacklist_Calendar="Prevent indexing of calendar entries"
Explain_Blacklist_Calendar="Enabling this policy will prevent Google Desktop from indexing calendar entries.\n\nIf this policy is disabled or not configured, the user can choose whether or not to index calendar entries."
Blacklist_Task="Prevent indexing of tasks"
Explain_Blacklist_Task="Enabling this policy will prevent Google Desktop from indexing tasks.\n\nIf this policy is disabled or not configured, the user can choose whether or not to index tasks."
Blacklist_Note="Prevent indexing of notes"
Explain_Blacklist_Note="Enabling this policy will prevent Google Desktop from indexing notes.\n\nIf this policy is disabled or not configured, the user can choose whether or not to index notes."
Blacklist_Journal="Prevent indexing of journal entries"
Explain_Blacklist_Journal="Enabling this policy will prevent Google Desktop from indexing journal entries.\n\nIf this policy is disabled or not configured, the user can choose whether or not to index journal entries."
Blacklist_Word="Prevent indexing of Word documents"
Explain_Blacklist_Word="Enabling this policy will prevent Google Desktop from indexing Word documents.\n\nIf this policy is disabled or not configured, the user can choose whether or not to index Word documents."
Blacklist_Excel="Prevent indexing of Excel documents"
Explain_Blacklist_Excel="Enabling this policy will prevent Google Desktop from indexing Excel documents.\n\nIf this policy is disabled or not configured, the user can choose whether or not to index Excel documents."
Blacklist_Powerpoint="Prevent indexing of PowerPoint documents"
Explain_Blacklist_Powerpoint="Enabling this policy will prevent Google Desktop from indexing PowerPoint documents.\n\nIf this policy is disabled or not configured, the user can choose whether or not to index PowerPoint documents."
Blacklist_PDF="Prevent indexing of PDF documents"
Explain_Blacklist_PDF="Enabling this policy will prevent Google Desktop from indexing PDF documents.\n\nIf this policy is disabled or not configured, the user can choose whether or not to index PDF documents."
Blacklist_ZIP="Prevent indexing of ZIP files"
Explain_Blacklist_ZIP="Enabling this policy will prevent Google Desktop from indexing ZIP files.\n\nIf this policy is disabled or not configured, the user can choose whether or not to index ZIP files."
Blacklist_HTTPS="Prevent indexing of secure web pages"
Explain_Blacklist_HTTPS="Enabling this policy will prevent Google Desktop from indexing secure web pages (pages with HTTPS in the URL).\n\nIf this policy is disabled or not configured, the user can choose whether or not to index secure web pages."
Blacklist_URI_Contains="Prevent indexing of specific web sites and folders"
Explain_Blacklist_URI_Contains="This policy allows you to prevent Google Desktop from indexing specific websites or folders. If an item's URL or path name contains any of these specified strings, it will not be indexed. These restrictions will be applied in addition to any websites or folders that the user has specified.\n\nThis policy has no effect when disabled or not configured."
Blacklist_Chat="Prevent indexing of IM chats"
Explain_Blacklist_Chat="Enabling this policy will prevent Google Desktop from indexing IM chat conversations.\n\nIf this policy is disabled or not configured, the user can choose whether or not to index IM chat conversations."
Blacklist_PasswordProtectedOffice="Prevent indexing of password-protected Office documents (Word, Excel)"
Explain_Blacklist_PasswordProtectedOffice="Enabling this policy will prevent Google Desktop from indexing password-protected office documents.\n\nIf this policy is disabled or not configured, the user can choose whether or not to index password-protected office documents."
Blacklist_Extensions="Prevent indexing of specific file extensions"
Explain_Blacklist_Extensions="This policy allows you to prevent Google Desktop from indexing files with specific extensions. Enter a list of file extensions, separated by commas, that you wish to exclude from indexing.\n\nThis policy has no effect when disabled or not configured."
Pol_Disallow_UserSearchLocations="Disallow adding search locations for indexing"
Explain_Disallow_UserSearchLocations="Enabling this policy will prevent the user from specifying additional drives or networked folders to be indexed by Google Desktop.\n\nIf this policy is disabled or not configured, users may specify additional drives and networked folders to be indexed."
Pol_Search_Location_Whitelist="Allow indexing of specific folders"
Explain_Search_Location_Whitelist="This policy allows you to add additional drives and networked folders to index."
Search_Locations_Whitelist="Search these locations"
Email_Retention="Only retain emails that are less than x days old"
Explain_Email_Retention="This policy allows you to configure Google Desktop to only retain emails that are less than the specified number of days old in the index. Enter the number of days to retain emails for\n\nThis policy has no effect when disabled or not configured."
Email_Retention_Edit="Number of days to retain emails"
Webpage_Retention="Only retain webpages that are less than x days old"
Explain_Webpage_Retention="This policy allows you to configure Google Desktop to only retain webpages that are less than the specified number of days old in the index. Enter the number of days to retain webpages for\n\nThis policy has no effect when disabled or not configured."
Webpage_Retention_Edit="Number of days to retain webpages"
File_Retention="Only retain files that are less than x days old"
Explain_File_Retention="This policy allows you to configure Google Desktop to only retain files that are less than the specified number of days old in the index. Enter the number of days to retain files for\n\nThis policy has no effect when disabled or not configured."
File_Retention_Edit="Number of days to retain files"
IM_Retention="Only retain IM that are less than x days old"
Explain_IM_Retention="This policy allows you to configure Google Desktop to only retain IM that are less than the specified number of days old in the index. Enter the number of days to retain IM for\n\nThis policy has no effect when disabled or not configured."
IM_Retention_Edit="Number of days to retain IM"
Pol_Remove_Deleted_Items="Remove deleted items from the index."
Explain_Remove_Deleted_Items="Enabling this policy will remove all deleted items from the index and cache. Any items that are deleted will no longer be searchable."
Pol_Allow_Simultaneous_Indexing="Allow historical indexing for multiple users simultaneously."
Explain_Allow_Simultaneous_Indexing="Enabling this policy will allow a computer to generate first-time indexes for multiple users simultaneously. \n\nIf this policy is disabled or not configured, historical indexing will happen only for the logged-in user that was connected last; historical indexing for any other logged-in user will happen the next time that other user connects."
Pol_TurnOffAdvancedFeatures="Turn off Advanced Features options"
Explain_TurnOffAdvancedFeatures="Enabling this policy will prevent Google Desktop from sending Advanced Features data to Google (for either improvements or personalization), and users won't be able to change these options. Enabling this policy also prevents older versions of Google Desktop from sending data.\n\nIf this policy is disabled or not configured and the user has a pre-5.5 version of Google Desktop, the user can choose whether or not to enable sending data to Google. If the user has version 5.5 or later, the 'Turn off Improve Google Desktop option' and 'Do not send personalization info' policies will be used instead."
Pol_TurnOffImproveGd="Turn off Improve Google Desktop option"
Explain_TurnOffImproveGd="Enabling this policy will prevent Google Desktop from sending improvement data, including crash reports and anonymous usage data, to Google.\n\nIf this policy is disabled, improvement data will be sent to Google and the user won't be able to change the option.\n\nIf this policy is not configured, the user can choose whether or not to enable the Improve Google Desktop option.\n\nNote that this policy applies only to version 5.5 or later and doesn't affect previous versions of Google Desktop.\n\nAlso note that this policy can be overridden by the 'Turn off Advanced Features options' policy."
Pol_NoPersonalizationInfo="Do not send personalization info"
Explain_NoPersonalizationInfo="Enabling this policy will prevent Google Desktop from displaying personalized content, such as news that reflects the user's past interest in articles. Personalized content is derived from anonymous usage data sent to Google.\n\nIf this policy is disabled, personalized content will be displayed for all users, and users won't be able to disable this feature.\n\nIf this policy is not configured, users can choose whether or not to enable personalization in each gadget that supports this feature.\n\nNote that this policy applies only to version 5.5 or later and doesn't affect previous versions of Google Desktop.\n\nAlso note that this policy can be overridden by the 'Turn off Advanced Features options' policy."
Pol_OneBoxMode="Turn off Google Web Search Integration"
Explain_OneBoxMode="Enabling this policy will prevent Google Desktop from displaying Desktop Search results in queries to google.com.\n\nIf this policy is disabled or not configured, the user can choose whether or not to include Desktop Search results in queries to google.com."
Pol_EncryptIndex="Encrypt index data"
Explain_EncryptIndex="Enabling this policy will cause Google Desktop to turn on Windows file encryption for the folder containing the Google Desktop index and related user data the next time it is run.\n\nNote that Windows EFS is only available on NTFS volumes. If the user's data is stored on a FAT volume, this policy will have no effect.\n\nThis policy has no effect when disabled or not configured."
Pol_Hyper="Turn off Quick Find"
Explain_Hyper="Enabling this policy will cause Google Desktop to turn off Quick Find feature. Quick Find allows you to see results as you type.\n\nIf this policy is disabled or not configured, the user can choose whether or not to enable it."
Pol_Display_Mode="Choose display option"
Explain_Display_Mode="This policy sets the Google Desktop display option: Sidebar, Deskbar, Floating Deskbar or none.\n\nNote that on 64-bit systems, a setting of Deskbar will be interpreted as Floating Deskbar.\n\nIf this policy is disabled or not configured, the user can choose a display option."
Sidebar="Sidebar"
Deskbar="Deskbar"
FloatingDeskbar="Floating Deskbar"
None="None"
;------------------------------------------------------------------------------
; Enterprise
;------------------------------------------------------------------------------
Cat_Enterprise="Enterprise Integration"
Explain_Enterprise="Controls features specific to Enterprise installations of Google Desktop"
Pol_Autoupdate="Block Auto-update"
Explain_Autoupdate="Enabling this policy prevents Google Desktop from automatically checking for and installing updates from google.com.\n\nIf you enable this policy, you must distribute updates to Google Desktop using Group Policy, SMS, or a similar enterprise software distribution mechanism. You should check http://desktop.google.com/enterprise/ for updates.\n\nIf this policy is disabled or not configured, Google Desktop will periodically check for updates from desktop.google.com."
Pol_AutoupdateAsSystem="Use system proxy settings when auto-updating"
Explain_AutoupdateAsSystem="Enabling this policy makes Google Desktop use the machine-wide proxy settings (as specified using e.g. proxycfg.exe) when performing autoupdates (if enabled).\n\nIf this policy is disabled or not configured, Google Desktop will use the logged-on user's Internet Explorer proxy settings when checking for auto-updates (if enabled)."
Pol_EnterpriseTab="Enterprise search tab"
Explain_EnterpriseTab="This policy allows you to add a search tab for your Google Search Appliance to Google Desktop and google.com web pages.\n\nYou must provide the name of the tab, such as "Intranet", as well as URLs for the search homepage and for retrieving search results. Use [DISP_QUERY] in place of the query term for the search results URL.\n\nSee the administrator's guide for more details."
EnterpriseTabText="Tab name"
EnterpriseTabHomepage="Search homepage URL"
EnterpriseTabHomepageQuery="Check if search homepage supports '&&q=<query>'"
EnterpriseTabResults="Search results URL"
EnterpriseTabResultsQuery="Check if search results page supports '&&q=<query>'"
Pol_GSAHosts="Google Search Appliances"
Explain_GSAHosts="This policy allows you to list any Google Search Appliances in your intranet. When properly configured, Google Desktop will insert Google Desktop results into the results of queries on the Google Search Appliance"
Pol_PolicyUnawareClientProhibitedFlag="Prohibit Policy-Unaware versions"
Explain_PolicyUnawareClientProhibitedFlag="Prohibits installation and execution of versions of Google Desktop that are unaware of group policy.\n\nEnabling this policy will prevent users from installing or running version 1.0 of Google Desktop.\n\nThis policy has no effect when disabled or not configured."
Pol_MinimumAllowedVersion="Minimum allowed version"
Explain_MinimumAllowedVersion="This policy allows you to prevent installation and/or execution of older versions of Google Desktop by specifying the minimum version you wish to allow. When enabling this policy, you should also enable the "Prohibit Policy-Unaware versions" policy to block versions of Google Desktop that did not support group policy.\n\nThis policy has no effect when disabled or not configured."
Pol_MaximumAllowedVersion="Maximum allowed version"
Explain_MaximumAllowedVersion="This policy allows you to prevent installation and/or execution of newer versions of Google Desktop by specifying the maximum version you wish to allow.\n\nThis policy has no effect when disabled or not configured."
Pol_Disallow_Gadgets="Disallow gadgets and indexing plug-ins"
Explain_Disallow_Gadgets="This policy prevents the use of all Google Desktop gadgets and indexing plug-ins. The policy applies to gadgets that are included in the Google Desktop installation package (built-in gadgets), built-in indexing plug-ins (currently only the Lotus Notes plug-in), and to gadgets or indexing plug-ins that a user might want to add later (non-built-in gadgets and indexing plug-ins).\n\nYou can prohibit use of all non-built-in gadgets and indexing plug-ins, but allow use of built-in gadgets and indexing plug-ins. To do so, enable this policy and then select the option "Disallow only non-built-in gadgets and indexing plug-ins.\n\nYou can supersede this policy to allow specified built-in and non-built-in gadgets and indexing plug-ins. To do so, enable this policy and then specify the gadgets and/or indexing plug-ins you want to allow under "Gadget and Plug-in Whitelist.""
Disallow_Only_Non_Builtin_Gadgets="Disallow only non-built-in gadgets and indexing plug-ins"
Pol_Gadget_Whitelist="Gadget and plug-in whitelist"
Explain_Gadget_Whitelist="This policy specifies a list of Google Desktop gadgets and indexing plug-ins that you want to allow, as exceptions to the "Disallow gadgets and indexing plug-ins" policy. This policy is valid only when the "Disallow gadgets and indexing plug-ins" policy is enabled.\n\nFor each gadget or indexing plug-in you wish to allow, add the CLSID or PROGID of the gadget or indexing plug-in (see the administrator's guide for more details).\n\nThis policy has no effect when disabled or not configured."
Pol_Gadget_Install_Confirmation_Whitelist="Allow silent installation of gadgets"
Explain_Gadget_Install_Confirmation_Whitelist="Enabling this policy lets you specify a list of Google Desktop gadgets or indexing plug-ins that can be installed without confirmation from the user.\n\nAdd a gadget or indexing plug-in by placing its class ID (CLSID) or program identifier (PROGID) in the list, surrounded with curly braces ({ }).\n\nThis policy has no effect when disabled or not configured."
Pol_Alternate_User_Data_Dir="Alternate user data directory"
Explain_Alternate_User_Data_Dir="This policy allows you to specify a directory to be used to store user data for Google Desktop (such as index data and cached documents).\n\nYou may use [USER_NAME] or [DOMAIN_NAME] in the path to specify the current user's name or domain. If [USER_NAME] is not specified, the user name will be appended at the end of the path.\n\nThis policy has no effect when disabled or not configured."
Pol_MaxAllowedOutlookConnections="Maximum allowed Outlook connections"
Explain_MaxAllowedOutlookConnections="This policy specifies the maximum number of open connections that Google Desktop maintains with the Exchange server. Google Desktop opens a connection for each email folder that it indexes. If insufficient connections are allowed, Google Desktop cannot index all the user email folders.\n\nThe default value is 400. Because users rarely have as many as 400 email folders, Google Desktop rarely reaches the limit.\n\nIf you set this policy's value above 400, you must also configure the number of open connections between Outlook and the Exchange server. By default, approximately 400 connections are allowed. If Google Desktop uses too many of these connections, Outlook might be unable to access email.\n\nThis policy has no effect when disabled or not configured."
Pol_DisallowSsdService="Disallow sharing and receiving of web history and documents across computers"
Explain_DisallowSsdService="Enabling this policy will prevent Google Desktop from sharing the user's web history and document contents across the user's different Google Desktop installations, and will also prevent it from receiving such shared items from the user's other machines. To allow reception but disallow sharing, use DisallowSsdOutbound.\nThis policy has no effect when disabled or not configured."
Pol_DisallowSsdOutbound="Disallow sharing of web history and documents to user's other computers."
Explain_DisallowSsdOutbound="Enabling this policy will prevent Google Desktop from sending the user's web history and document contents from this machine to the user's other machines. It does not prevent reception of items from the user's other machines; to disallow both, use DisallowSsdService.\nThis policy has no effect when disabled or not configured."
Pol_Disallow_Store_Gadget_Service="Disallow storage of gadget content and settings."
Explain_Disallow_Store_Gadget_Service="Enabling this policy will prevent users from storing their gadget content and settings with Google. Users will be unable to access their gadget content and settings from other computers and all content and settings will be lost if Google Desktop is uninstalled."
Pol_MaxExchangeIndexingRate="Maximum allowed Exchange indexing rate"
Explain_MaxExchangeIndexingRate="This policy allows you to specify the maximum number of emails that are indexed per minute. \n\nThis policy has no effect when disabled or not configured."
Pol_EnableSafeweb="Enable or disable safe browsing"
Explain_Safeweb="Google Desktop safe browsing informs the user whenever they visit any site which is a suspected forgery site or may harm their computer. Enabling this policy turns on safe browsing; disabling the policy turns it off. \n\nIf this policy is not configured, the user can select whether to turn on safe browsing."

87
third_party/libwebrtc/tools/grit/grit/testdata/README.txt поставляемый Normal file
Просмотреть файл

@ -0,0 +1,87 @@
Google Desktop for Enterprise
Copyright (C) 2007 Google Inc.
All Rights Reserved
---------
Contents
---------
This distribution contains the following files:
GoogleDesktopSetup.msi - Installation and setup program
GoogleDesktop.adm - Group Policy administrative template file
AdminGuide.pdf - Google Desktop for Enterprise administrative guide
--------------
Documentation
--------------
Full documentation and installation instructions are in the
administrative guide, and also online at
http://desktop.google.com/enterprise/adminguide.html.
------------------------
IBM Lotus Notes Plug-In
------------------------
The Lotus Notes plug-in is included in the release of Google
Desktop for Enterprise. The IBM Lotus Notes Plug-in for Google
Desktop indexes mail, calendar, task, contact and journal
documents from Notes. Discussion documents including those from
the discussion and team room templates can also be indexed by
selecting an option from the preferences. Once indexed, this data
will be returned in Google Desktop searches. The corresponding
document can be opened in Lotus Notes from the Google Desktop
results page.
Install: The plug-in will install automatically during the Google
Desktop setup process if Lotus Notes is already installed. Lotus
Notes must not be running in order for the install to occur. The
Class ID for this plug-in is {8F42BDFB-33E8-427B-AFDC-A04E046D3F07}.
Preferences: Preferences and selection of databases to index are
set in the 'Google Desktop for Notes' dialog reached through the
'Actions' menu.
Reindexing: Selecting 'Reindex all databases' will index all the
documents in each database again.
Notes Plug-in Known Issues
---------------------------
If the 'Google Desktop for Notes' item is not available from the
Lotus Notes Actions menu, then installation was not successful.
Installation consists of writing one file, notesgdsplugin.dll, to
the Notes application directory and a setting to the notes.ini
configuration file. The most likely cause of an unsuccessful
installation is that the installer was not able to locate the
notes.ini file. Installation will complete if the user closes Notes
and manually adds the following setting to this file on a new line:
AddinMenus=notesgdsplugin.dll
If the notesgdsplugin.dll file is not in the application directory
(e.g., C:\Program Files\Lotus\Notes) after Google Desktop
installation, it is likely that Notes was not installed correctly.
Only local databases can be indexed. If they can be determined,
the user's local mail file and address book will be included in the
list automatically. Mail archives and other databases must be
added with the 'Add' button.
Some users may experience performance issues during the initial
indexing of a database. The 'Perform the initial index of a
database only when I'm idle' option will limit the indexing process
to times when the user is not using the machine. If this does not
alleviate the problem or the user would like to continually index
but just do so more slowly or quickly, the GoogleWaitTime notes.ini
value can be set. Increasing the GoogleWaitTime value will slow
down the indexing process, and lowering the value will speed it up.
A value of zero causes the fastest possible indexing. Removing the
ini parameter altogether returns it to the default (20).
Crashes have been known to occur with certain types of history
bookmarks. If the Notes client seems to crash randomly, try
disabling the 'Index note history' option. If it crashes before,
you can get to the preferences, add the following line to your
notes.ini file:
GDSNoIndexHistory=1

45
third_party/libwebrtc/tools/grit/grit/testdata/about.html поставляемый Normal file
Просмотреть файл

@ -0,0 +1,45 @@
[HEADER]
<table cellspacing=0 cellPadding=0 width="100%" border=0><tr bgcolor=#3399cc><td align=middle height=1><img height=1 width=1></td></tr></table>
<table cellspacing=0 cellPadding=1 width="100%" bgcolor=#e8f4f7 border=0><tr><td height=20><font size=+1 color=#000000>&nbsp;<b>[TITLE]</b></font></td></tr></table>
<br><center><span style="line-height:16pt"><font color=#335cec><B>Google Desktop Search: Search your own computer.</B></font></span></center><br>
<table cellspacing=1 cellpadding=0 width=300 align=center border=0>
<tr><td nowrap><font size=-1><img style="vertical-align:middle" height=16 src="outlook.gif" width=16>&nbsp;&nbsp;Outlook Email</font></td>
<td nowrap>&nbsp;</td>
<td nowrap><font size=-1><img style="vertical-align:middle" height=16 src="netscape.gif" width=16>&nbsp;&nbsp;Netscape Mail / Thunderbird</font></td></tr>
<tr><td nowrap><font size=-1><img style="vertical-align:middle" height=16 src="oe.gif" width=16>&nbsp;&nbsp;Outlook Express</font></td>
<td nowrap>&nbsp;</td>
<td nowrap><font size=-1><img style="vertical-align:middle" height=16 src="ff.gif" width=16>&nbsp;&nbsp;Netscape / Firefox / Mozilla</font></td></tr>
<tr><td nowrap><font size=-1><img style="vertical-align:middle" height=16 src="doc.gif" width=16>&nbsp;&nbsp;Word</font></td>
<td nowrap>&nbsp;</td>
<td nowrap><font size=-1><img style="vertical-align:middle" height=16 src="pdf.gif" width=16>&nbsp;&nbsp;PDF</font></td></tr>
<tr><td nowrap><font size=-1><img style="vertical-align:middle" height=16 src="xls.gif" width=16>&nbsp;&nbsp;Excel</font></td>
<td nowrap>&nbsp;</td>
<td nowrap><font size=-1><img style="vertical-align:middle" height=16 src="mus.gif" width=16>&nbsp;&nbsp;Music</font></td></tr>
<tr><td nowrap><font size=-1><img style="vertical-align:middle" height=16 src="ppt.gif" width=16>&nbsp;&nbsp;PowerPoint</font></td>
<td nowrap>&nbsp;</td>
<td nowrap><font size=-1><img style="vertical-align:middle" height=16 src="jpg.gif" width=16>&nbsp;&nbsp;Images</font></td></tr>
<tr><td nowrap><font size=-1><img style="vertical-align:middle" height=16 src="ie.gif" width=16>&nbsp;&nbsp;Internet Explorer</font></td>
<td nowrap>&nbsp;</td>
<td nowrap><font size=-1><img style="vertical-align:middle" height=16 src="mov.gif" width=16>&nbsp;&nbsp;Video</font></td></tr>
<tr><td nowrap><font size=-1><img style="vertical-align:middle" height=16 src="aim.gif" width=16>&nbsp;&nbsp;AOL Instant Messenger</font></td>
<td nowrap>&nbsp;</td>
<td nowrap><font size=-1><img style="vertical-align:middle" height=16 src="other.gif" width=16>&nbsp;&nbsp;Even more with <a href="http://desktop.google.com/plugins.html">these plug-ins</A></font></td></tr>
<tr><td nowrap><font size=-1><img style="vertical-align:middle" height=16 src="txt.gif" width=16>&nbsp;&nbsp;Text and others</font></td></tr>
</table>
<center>
<p><table cellpadding=1>
<tr><td><a href="http://desktop.google.com/gettingstarted.html?hl=[LANG_CODE]"><B>Getting Started</B></A> - Learn more about using Google Desktop Search</td></tr>
<tr><td><a href="http://desktop.google.com/help.html?hl=[LANG_CODE]"><B>Online Help</B></A> - Up-to-date answers to your questions</td></tr>
<tr><td><a href="[$~PRIVACY~$]"><B>Privacy</B></A> - A few words about privacy and Google Desktop Search</td></tr>
<tr><td><a href="http://desktop.google.com/uninstall.html?hl=[LANG_CODE]"><B>Uninstall</B></A> - How to uninstall Google Desktop Search</td></tr>
<tr><td><a href="http://desktop.google.com/feedback.html?hl=[LANG_CODE]"><B>Submit Feedback</B></A> - Send us your comments and ideas</td></tr>
</table><br><font size=-2>Google Desktop Search [$~BUILDNUMBER~$]</font><br><br>
[FOOTER]

24
third_party/libwebrtc/tools/grit/grit/testdata/android.xml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,24 @@
<!--
Copyright (c) 2012 The Chromium Authors. All rights reserved.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
-->
<resources>
<!-- A string with placeholder. -->
<string xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2" name="placeholders">
Open <xliff:g id="FILENAME" example="internet.html">%s</xliff:g>?
</string>
<!-- A simple string. -->
<string name="simple">A simple string.</string>
<!-- A string with a comment. -->
<string name="comment">Contains a <!-- ignore this --> comment. </string>
<!-- A second simple string. -->
<string name="simple2"> Another simple string. </string>
<!-- A non-translatable string. -->
<string name="constant" translatable="false">Do not translate me.</string>
</resources>

16
third_party/libwebrtc/tools/grit/grit/testdata/bad_browser.html поставляемый Normal file
Просмотреть файл

@ -0,0 +1,16 @@
<p><b>We're sorry, but we don't seem to be compatible.</b></p>
<p><font size="-1">Our software suggests that you're using a browser incompatible with Google Desktop Search.
Google Desktop Search currently supports the following:</font></p>
<ul><font size="-1">
<li>Microsoft IE 5 and newer (<a href="http://www.microsoft.com/windows/ie/downloads/default.asp">Download</a>)</li>
<li>Mozilla (<a href="http://www.mozilla.org/products/mozilla1.x/">Download</a>)</li>
<li>Mozilla Firefox (<a href="http://www.mozilla.org/products/firefox/">Download</a>)</li>
<li>Netscape 7 and newer (<a href="http://channels.netscape.com/ns/browsers/download.jsp">Download</a>)</li>
</font></ul>
<p><font size="-1">You may <a href="[REDIR]">click here</a> to use your
unsupported browser, though you likely will encounter some areas that don't
work as expected. You need to have Javascript enabled, regardless of the
browser you use.</font>
<p><font size="-1">We hope to expand this list in the near future and announce new
browsers as they become available.

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше