diff --git a/devtools/shared/css-lexer.js b/devtools/shared/css-lexer.js index 9013b63ea572..3f93c07870fb 100644 --- a/devtools/shared/css-lexer.js +++ b/devtools/shared/css-lexer.js @@ -1073,6 +1073,9 @@ Scanner.prototype = { this.ScanString(aToken); if (aToken.mType == eCSSToken_Bad_String) { aToken.mType = eCSSToken_Bad_URL; + // Flag us as having been a Bad_String. + aToken.mInteger2 = 1; + this.ConsumeBadURLRemnants(aToken); return; } } else { @@ -1093,9 +1096,44 @@ Scanner.prototype = { } } else { aToken.mType = eCSSToken_Bad_URL; + if (aToken.mSymbol != 0) { + // Flag us as having been a String, not a Bad_String. + aToken.mInteger2 = 0; + } + this.ConsumeBadURLRemnants(aToken); } }, + ConsumeBadURLRemnants: function (aToken) { + aToken.mInteger = aToken.mIdent.length; + let ch = this.Peek(); + do { + if (ch < 0) { + this.AddEOFCharacters(eEOFCharacters_CloseParen); + break; + } + + if (ch == REVERSE_SOLIDUS && this.GatherEscape(aToken.mIdent, false)) { + // Nothing else needs to be done here for the moment; we've consumed the + // backslash and following escape. + } else { + // We always want to consume this character. + if (IsVertSpace(ch)) { + this.AdvanceLine(); + } else { + this.Advance(); + } + if (ch == 0) { + aToken.mIdent.push(UCS2_REPLACEMENT_CHAR); + } else { + aToken.mIdent.push(ch); + } + } + + ch = this.Peek(); + } while (ch != RIGHT_PARENTHESIS); + }, + /** * Primary scanner entry point. Consume one token and fill in * |aToken| accordingly. Will skip over any number of comments first, diff --git a/devtools/shared/tests/unit/test_csslexer.js b/devtools/shared/tests/unit/test_csslexer.js index 84353def007d..cc55af7e5ffe 100644 --- a/devtools/shared/tests/unit/test_csslexer.js +++ b/devtools/shared/tests/unit/test_csslexer.js @@ -128,8 +128,9 @@ var LEX_TESTS = [ ["url:http://example.com"]], // In CSS Level 3, this is an ordinary URL, not a BAD_URL. ["url(http://example.com", ["url:http://example.com"]], - // See bug 1153981 to understand why this gets a SYMBOL token. - ["url(http://example.com @", ["bad_url:http://example.com", "symbol:@"]], + // We end up losing the whitespace before the '@' because it's skipped by the + // lexer before we discover we have a BAD_URL token. + ["url(http://example.com @", ["bad_url:http://example.com@"]], ["quo\\ting", ["ident:quoting"]], ["'bad string\n", ["bad_string:bad string", "whitespace"]], ["~=", ["includes"]], diff --git a/dom/webidl/CSSLexer.webidl b/dom/webidl/CSSLexer.webidl index 86fbfcfffe73..09f119d7b844 100644 --- a/dom/webidl/CSSLexer.webidl +++ b/dom/webidl/CSSLexer.webidl @@ -36,8 +36,10 @@ enum CSSTokenType { "bad_string", // A URL. |text| holds the URL. "url", - // A "bad URL". This is a URL that is unterminated at EOF. |text| - // holds the URL. + // A "bad URL". This is a URL that either contains a bad_string or contains + // garbage after the string or unquoted URL test. |text| holds the URL and + // potentially whatever garbage came after it, up to but not including the + // following ')'. "bad_url", // A "symbol" is any one-character symbol. This corresponds to the // DELIM token in the CSS specification. diff --git a/layout/reftests/css-parsing/invalid-url-handling.xhtml b/layout/reftests/css-parsing/invalid-url-handling.xhtml index b5f41a5e43c9..c37734fa767b 100644 --- a/layout/reftests/css-parsing/invalid-url-handling.xhtml +++ b/layout/reftests/css-parsing/invalid-url-handling.xhtml @@ -22,17 +22,19 @@ #two { background-color: green; }