зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1265787 - add javascript CSS lexer to devtools; r=pbro
MozReview-Commit-ID: BgbACHTTMtX --HG-- extra : transplant_source : %A3iN%27%23%04g%91%06%2CsB%24%ABA%E9%81%D2%EA%8C
This commit is contained in:
Родитель
099c9858d7
Коммит
4383c1a8b1
|
@ -112,6 +112,7 @@ devtools/client/webide/**
|
|||
devtools/server/**
|
||||
!devtools/server/actors/webbrowser.js
|
||||
devtools/shared/*.js
|
||||
!devtools/shared/css-lexer.js
|
||||
devtools/shared/*.jsm
|
||||
devtools/shared/apps/**
|
||||
devtools/shared/client/**
|
||||
|
@ -126,6 +127,7 @@ devtools/shared/qrcode/**
|
|||
devtools/shared/security/**
|
||||
devtools/shared/shims/**
|
||||
devtools/shared/tests/**
|
||||
!devtools/shared/tests/unit/test_csslexer.js
|
||||
devtools/shared/touch/**
|
||||
devtools/shared/transport/**
|
||||
devtools/shared/webconsole/test/**
|
||||
|
|
|
@ -12,9 +12,7 @@ const {setTimeout, clearTimeout} =
|
|||
const {parseDeclarations} =
|
||||
require("devtools/client/shared/css-parsing-utils");
|
||||
const promise = require("promise");
|
||||
|
||||
loader.lazyServiceGetter(this, "domUtils",
|
||||
"@mozilla.org/inspector/dom-utils;1", "inIDOMUtils");
|
||||
const {getCSSLexer} = require("devtools/shared/css-lexer");
|
||||
|
||||
const HTML_NS = "http://www.w3.org/1999/xhtml";
|
||||
|
||||
|
@ -87,7 +85,7 @@ function advanceValidate(keyCode, value, insertionPoint) {
|
|||
// value. Otherwise it's been inserted in some spot where it has a
|
||||
// valid meaning, like a comment or string.
|
||||
value = value.slice(0, insertionPoint) + ";" + value.slice(insertionPoint);
|
||||
let lexer = domUtils.getCSSLexer(value);
|
||||
let lexer = getCSSLexer(value);
|
||||
while (true) {
|
||||
let token = lexer.nextToken();
|
||||
if (token.endOffset > insertionPoint) {
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
const {Cc, Ci, Cu} = require("chrome");
|
||||
loader.lazyRequireGetter(this, "CSS", "CSS");
|
||||
const promise = require("promise");
|
||||
const {getCSSLexer} = require("devtools/shared/css-lexer");
|
||||
Cu.import("resource://gre/modules/Task.jsm", this);
|
||||
loader.lazyGetter(this, "DOMUtils", () => {
|
||||
return Cc["@mozilla.org/inspector/dom-utils;1"].getService(Ci.inIDOMUtils);
|
||||
|
@ -51,7 +52,7 @@ const COMMENT_PARSING_HEURISTIC_BYPASS_CHAR = "!";
|
|||
* @see CSSToken for details about the returned tokens
|
||||
*/
|
||||
function* cssTokenizer(string) {
|
||||
let lexer = DOMUtils.getCSSLexer(string);
|
||||
let lexer = getCSSLexer(string);
|
||||
while (true) {
|
||||
let token = lexer.nextToken();
|
||||
if (!token) {
|
||||
|
@ -77,14 +78,14 @@ function* cssTokenizer(string) {
|
|||
* simpler and better to use the CSSToken offsets, rather than line
|
||||
* and column. Also, this function lexes the entire input string at
|
||||
* once, rather than lazily yielding a token stream. Use
|
||||
* |cssTokenizer| or |DOMUtils.getCSSLexer| instead.
|
||||
* |cssTokenizer| or |getCSSLexer| instead.
|
||||
*
|
||||
* @param{String} string The input string.
|
||||
* @return {Array} An array of tokens (@see CSSToken) that have
|
||||
* line and column information.
|
||||
*/
|
||||
function cssTokenizerWithLineColumn(string) {
|
||||
let lexer = DOMUtils.getCSSLexer(string);
|
||||
let lexer = getCSSLexer(string);
|
||||
let result = [];
|
||||
let prevToken = undefined;
|
||||
while (true) {
|
||||
|
@ -292,7 +293,7 @@ function parseDeclarationsInternal(inputString, parseComments,
|
|||
throw new Error("empty input string");
|
||||
}
|
||||
|
||||
let lexer = DOMUtils.getCSSLexer(inputString);
|
||||
let lexer = getCSSLexer(inputString);
|
||||
|
||||
let declarations = [getEmptyDeclaration()];
|
||||
let lastProp = declarations[0];
|
||||
|
@ -574,7 +575,7 @@ RuleRewriter.prototype = {
|
|||
* to be "lexically safe".
|
||||
*/
|
||||
sanitizePropertyValue: function(text) {
|
||||
let lexer = DOMUtils.getCSSLexer(text);
|
||||
let lexer = getCSSLexer(text);
|
||||
|
||||
let result = "";
|
||||
let previousOffset = 0;
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
const {Cc, Ci, Cu} = require("chrome");
|
||||
const {angleUtils} = require("devtools/shared/css-angle");
|
||||
const {colorUtils} = require("devtools/client/shared/css-color");
|
||||
const {getCSSLexer} = require("devtools/shared/css-lexer");
|
||||
const Services = require("Services");
|
||||
const EventEmitter = require("devtools/shared/event-emitter");
|
||||
|
||||
|
@ -163,7 +164,7 @@ OutputParser.prototype = {
|
|||
text = text.trim();
|
||||
this.parsed.length = 0;
|
||||
|
||||
let tokenStream = DOMUtils.getCSSLexer(text);
|
||||
let tokenStream = getCSSLexer(text);
|
||||
let parenDepth = 0;
|
||||
let outerMostFunctionTakesColor = false;
|
||||
|
||||
|
@ -489,7 +490,7 @@ OutputParser.prototype = {
|
|||
*/
|
||||
_sanitizeURL: function(url) {
|
||||
// Re-lex the URL and add any needed termination characters.
|
||||
let urlTokenizer = DOMUtils.getCSSLexer(url);
|
||||
let urlTokenizer = getCSSLexer(url);
|
||||
// Just read until EOF; there will only be a single token.
|
||||
while (urlTokenizer.nextToken()) {
|
||||
// Nothing.
|
||||
|
|
|
@ -32,10 +32,8 @@ const {
|
|||
PRESETS,
|
||||
DEFAULT_PRESET_CATEGORY
|
||||
} = require("devtools/client/shared/widgets/CubicBezierPresets");
|
||||
const {getCSSLexer} = require("devtools/shared/css-lexer");
|
||||
const {Cc, Ci} = require('chrome');
|
||||
loader.lazyGetter(this, "DOMUtils", () => {
|
||||
return Cc["@mozilla.org/inspector/dom-utils;1"].getService(Ci.inIDOMUtils);
|
||||
});
|
||||
|
||||
/**
|
||||
* CubicBezier data structure helper
|
||||
|
@ -829,7 +827,7 @@ function parseTimingFunction(value) {
|
|||
return PREDEFINED[value];
|
||||
}
|
||||
|
||||
let tokenStream = DOMUtils.getCSSLexer(value);
|
||||
let tokenStream = getCSSLexer(value);
|
||||
let getNextToken = () => {
|
||||
while (true) {
|
||||
let token = tokenStream.nextToken();
|
||||
|
|
|
@ -27,8 +27,7 @@
|
|||
const {Cc, Cu, Ci} = require("chrome");
|
||||
const Services = require("Services");
|
||||
const Promise = require("promise");
|
||||
const DOMUtils = Cc["@mozilla.org/inspector/dom-utils;1"]
|
||||
.getService(Ci.inIDOMUtils);
|
||||
const {getCSSLexer} = require("devtools/shared/css-lexer");
|
||||
|
||||
// Parameters for the XHR request
|
||||
// see https://developer.mozilla.org/en-US/docs/MDN/Kuma/API#Document_parameters
|
||||
|
@ -83,7 +82,7 @@ const COMMENT_COLOR = "theme-comment";
|
|||
function appendSyntaxHighlightedCSS(cssText, parentElement) {
|
||||
let doc = parentElement.ownerDocument;
|
||||
let identClass = PROPERTY_NAME_COLOR;
|
||||
let lexer = DOMUtils.getCSSLexer(cssText);
|
||||
let lexer = getCSSLexer(cssText);
|
||||
|
||||
/**
|
||||
* Create a SPAN node with the given text content and class.
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -49,6 +49,8 @@ const { getRootBindingParent } = require("devtools/shared/layout/utils");
|
|||
// on the worker thread, where Cu is not available.
|
||||
loader.lazyRequireGetter(this, "CSS", "CSS");
|
||||
|
||||
loader.lazyRequireGetter(this, "CSSLexer", "devtools/shared/css-lexer");
|
||||
|
||||
function CssLogic() {
|
||||
// The cache of examined CSS properties.
|
||||
this._propertyInfos = {};
|
||||
|
@ -992,7 +994,7 @@ CssLogic.prettifyCSS = function(text, ruleCount) {
|
|||
// minified file.
|
||||
let indent = "";
|
||||
let indentLevel = 0;
|
||||
let tokens = domUtils.getCSSLexer(text);
|
||||
let tokens = CSSLexer.getCSSLexer(text);
|
||||
let result = "";
|
||||
let pushbackToken = undefined;
|
||||
|
||||
|
|
|
@ -41,6 +41,7 @@ DevToolsModules(
|
|||
'async-utils.js',
|
||||
'content-observer.js',
|
||||
'css-angle.js',
|
||||
'css-lexer.js',
|
||||
'deprecated-sync-thenables.js',
|
||||
'DevToolsUtils.js',
|
||||
'event-emitter.js',
|
||||
|
|
|
@ -0,0 +1,242 @@
|
|||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
*/
|
||||
|
||||
// This file is a copy of layout/style/test/test_csslexer.js, modified
|
||||
// to use both our pure-JS lexer and the DOMUtils lexer for
|
||||
// cross-checking.
|
||||
|
||||
"use strict";
|
||||
|
||||
const jsLexer = require("devtools/shared/css-lexer");
|
||||
const domutils = Components.classes["@mozilla.org/inspector/dom-utils;1"]
|
||||
.getService(Components.interfaces.inIDOMUtils);
|
||||
|
||||
// An object that acts like a CSSLexer but verifies that the DOM lexer
|
||||
// and the JS lexer do the same thing.
|
||||
function DoubleLexer(input) {
|
||||
do_print("DoubleLexer input: " + input);
|
||||
this.domLexer = domutils.getCSSLexer(input);
|
||||
this.jsLexer = jsLexer.getCSSLexer(input);
|
||||
}
|
||||
|
||||
DoubleLexer.prototype = {
|
||||
checkState: function () {
|
||||
equal(this.domLexer.lineNumber, this.jsLexer.lineNumber,
|
||||
"check line number");
|
||||
equal(this.domLexer.columnNumber, this.jsLexer.columnNumber,
|
||||
"check column number");
|
||||
},
|
||||
|
||||
get lineNumber() {
|
||||
return this.domLexer.lineNumber;
|
||||
},
|
||||
|
||||
get columnNumber() {
|
||||
return this.domLexer.columnNumber;
|
||||
},
|
||||
|
||||
performEOFFixup: function (inputString, preserveBackslash) {
|
||||
let d = this.domLexer.performEOFFixup(inputString, preserveBackslash);
|
||||
let j = this.jsLexer.performEOFFixup(inputString, preserveBackslash);
|
||||
|
||||
equal(d, j);
|
||||
return d;
|
||||
},
|
||||
|
||||
mungeNumber: function (token) {
|
||||
if (token && (token.tokenType === "number" ||
|
||||
token.tokenType === "percentage") &&
|
||||
!token.isInteger) {
|
||||
// The JS lexer does its computations in double, but the
|
||||
// platform lexer does its computations in float. Account for
|
||||
// this discrepancy in a way that's sufficient for this test.
|
||||
// See https://bugzilla.mozilla.org/show_bug.cgi?id=1163047
|
||||
token.number = parseFloat(token.number.toPrecision(8));
|
||||
}
|
||||
},
|
||||
|
||||
nextToken: function () {
|
||||
// Check state both before and after.
|
||||
this.checkState();
|
||||
|
||||
let d = this.domLexer.nextToken();
|
||||
let j = this.jsLexer.nextToken();
|
||||
|
||||
this.mungeNumber(d);
|
||||
this.mungeNumber(j);
|
||||
|
||||
deepEqual(d, j);
|
||||
|
||||
this.checkState();
|
||||
|
||||
return d;
|
||||
}
|
||||
};
|
||||
|
||||
function test_lexer(cssText, tokenTypes) {
|
||||
let lexer = new DoubleLexer(cssText);
|
||||
let reconstructed = "";
|
||||
let lastTokenEnd = 0;
|
||||
let i = 0;
|
||||
while (true) {
|
||||
let token = lexer.nextToken();
|
||||
if (!token) {
|
||||
break;
|
||||
}
|
||||
let combined = token.tokenType;
|
||||
if (token.text) {
|
||||
combined += ":" + token.text;
|
||||
}
|
||||
equal(combined, tokenTypes[i]);
|
||||
ok(token.endOffset > token.startOffset);
|
||||
equal(token.startOffset, lastTokenEnd);
|
||||
lastTokenEnd = token.endOffset;
|
||||
reconstructed += cssText.substring(token.startOffset, token.endOffset);
|
||||
++i;
|
||||
}
|
||||
// Ensure that we saw the correct number of tokens.
|
||||
equal(i, tokenTypes.length);
|
||||
// Ensure that the reported offsets cover all the text.
|
||||
equal(reconstructed, cssText);
|
||||
}
|
||||
|
||||
var LEX_TESTS = [
|
||||
["simple", ["ident:simple"]],
|
||||
["simple: { hi; }",
|
||||
["ident:simple", "symbol::",
|
||||
"whitespace", "symbol:{",
|
||||
"whitespace", "ident:hi",
|
||||
"symbol:;", "whitespace",
|
||||
"symbol:}"]],
|
||||
["/* whatever */", ["comment"]],
|
||||
["'string'", ["string:string"]],
|
||||
['"string"', ["string:string"]],
|
||||
["rgb(1,2,3)", ["function:rgb", "number",
|
||||
"symbol:,", "number",
|
||||
"symbol:,", "number",
|
||||
"symbol:)"]],
|
||||
["@media", ["at:media"]],
|
||||
["#hibob", ["id:hibob"]],
|
||||
["#123", ["hash:123"]],
|
||||
["23px", ["dimension:px"]],
|
||||
["23%", ["percentage"]],
|
||||
["url(http://example.com)", ["url:http://example.com"]],
|
||||
["url('http://example.com')", ["url:http://example.com"]],
|
||||
["url( 'http://example.com' )",
|
||||
["url:http://example.com"]],
|
||||
// In CSS Level 3, this is an ordinary URL, not a BAD_URL.
|
||||
["url(http://example.com", ["url:http://example.com"]],
|
||||
// See bug 1153981 to understand why this gets a SYMBOL token.
|
||||
["url(http://example.com @", ["bad_url:http://example.com", "symbol:@"]],
|
||||
["quo\\ting", ["ident:quoting"]],
|
||||
["'bad string\n", ["bad_string:bad string", "whitespace"]],
|
||||
["~=", ["includes"]],
|
||||
["|=", ["dashmatch"]],
|
||||
["^=", ["beginsmatch"]],
|
||||
["$=", ["endsmatch"]],
|
||||
["*=", ["containsmatch"]],
|
||||
|
||||
// URANGE may be on the way out, and it isn't used by devutils, so
|
||||
// let's skip it.
|
||||
|
||||
["<!-- html comment -->", ["htmlcomment", "whitespace", "ident:html",
|
||||
"whitespace", "ident:comment", "whitespace",
|
||||
"htmlcomment"]],
|
||||
|
||||
// earlier versions of CSS had "bad comment" tokens, but in level 3,
|
||||
// unterminated comments are just comments.
|
||||
["/* bad comment", ["comment"]]
|
||||
];
|
||||
|
||||
function test_lexer_linecol(cssText, locations) {
|
||||
let lexer = new DoubleLexer(cssText);
|
||||
let i = 0;
|
||||
while (true) {
|
||||
let token = lexer.nextToken();
|
||||
let startLine = lexer.lineNumber;
|
||||
let startColumn = lexer.columnNumber;
|
||||
|
||||
// We do this in a bit of a funny way so that we can also test the
|
||||
// location of the EOF.
|
||||
let combined = ":" + startLine + ":" + startColumn;
|
||||
if (token) {
|
||||
combined = token.tokenType + combined;
|
||||
}
|
||||
|
||||
equal(combined, locations[i]);
|
||||
++i;
|
||||
|
||||
if (!token) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
// Ensure that we saw the correct number of tokens.
|
||||
equal(i, locations.length);
|
||||
}
|
||||
|
||||
function test_lexer_eofchar(cssText, argText, expectedAppend,
|
||||
expectedNoAppend) {
|
||||
let lexer = new DoubleLexer(cssText);
|
||||
while (lexer.nextToken()) {
|
||||
// Nothing.
|
||||
}
|
||||
|
||||
do_print("EOF char test, input = " + cssText);
|
||||
|
||||
let result = lexer.performEOFFixup(argText, true);
|
||||
equal(result, expectedAppend);
|
||||
|
||||
result = lexer.performEOFFixup(argText, false);
|
||||
equal(result, expectedNoAppend);
|
||||
}
|
||||
|
||||
var LINECOL_TESTS = [
|
||||
["simple", ["ident:0:0", ":0:6"]],
|
||||
["\n stuff", ["whitespace:0:0", "ident:1:4", ":1:9"]],
|
||||
['"string with \\\nnewline" \r\n', ["string:0:0", "whitespace:1:8",
|
||||
":2:0"]]
|
||||
];
|
||||
|
||||
var EOFCHAR_TESTS = [
|
||||
["hello", "hello"],
|
||||
["hello \\", "hello \\\\", "hello \\\uFFFD"],
|
||||
["'hello", "'hello'"],
|
||||
["\"hello", "\"hello\""],
|
||||
["'hello\\", "'hello\\\\'", "'hello'"],
|
||||
["\"hello\\", "\"hello\\\\\"", "\"hello\""],
|
||||
["/*hello", "/*hello*/"],
|
||||
["/*hello*", "/*hello*/"],
|
||||
["/*hello\\", "/*hello\\*/"],
|
||||
["url(hello", "url(hello)"],
|
||||
["url('hello", "url('hello')"],
|
||||
["url(\"hello", "url(\"hello\")"],
|
||||
["url(hello\\", "url(hello\\\\)", "url(hello\\\uFFFD)"],
|
||||
["url('hello\\", "url('hello\\\\')", "url('hello')"],
|
||||
["url(\"hello\\", "url(\"hello\\\\\")", "url(\"hello\")"],
|
||||
];
|
||||
|
||||
function run_test() {
|
||||
let text, result;
|
||||
for ([text, result] of LEX_TESTS) {
|
||||
test_lexer(text, result);
|
||||
}
|
||||
|
||||
for ([text, result] of LINECOL_TESTS) {
|
||||
test_lexer_linecol(text, result);
|
||||
}
|
||||
|
||||
let expectedAppend, expectedNoAppend;
|
||||
for ([text, expectedAppend, expectedNoAppend] of EOFCHAR_TESTS) {
|
||||
if (!expectedNoAppend) {
|
||||
expectedNoAppend = expectedAppend;
|
||||
}
|
||||
test_lexer_eofchar(text, text, expectedAppend, expectedNoAppend);
|
||||
}
|
||||
|
||||
// Ensure that passing a different inputString to performEOFFixup
|
||||
// doesn't cause an assertion trying to strip a backslash from the
|
||||
// end of an empty string.
|
||||
test_lexer_eofchar("'\\", "", "\\'", "'");
|
||||
}
|
|
@ -8,6 +8,7 @@ support-files =
|
|||
exposeLoader.js
|
||||
|
||||
[test_assert.js]
|
||||
[test_csslexer.js]
|
||||
[test_fetch-chrome.js]
|
||||
[test_fetch-file.js]
|
||||
[test_fetch-http.js]
|
||||
|
|
Загрузка…
Ссылка в новой задаче