No bug - Revendor rust dependencies

This commit is contained in:
Servo VCS Sync 2017-09-02 00:17:56 +00:00
Родитель 08057f5e1e
Коммит ed507e14c2
11 изменённых файлов: 415 добавлений и 274 удалений

Просмотреть файл

@ -1 +1 @@
{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".travis.yml":"f1fb4b65964c81bc1240544267ea334f554ca38ae7a74d57066f4d47d2b5d568","Cargo.toml":"a52213c38e6ff8fcbf4c2f632c6d78521a9a8b9cfcfdfa34339544649d486076","LICENSE":"fab3dd6bdab226f1c08630b1dd917e11fcb4ec5e1e020e2c16f83a0a13863e85","README.md":"c5781e673335f37ed3d7acb119f8ed33efdf6eb75a7094b7da2abe0c3230adb8","build.rs":"950bcc47a196f07f99f59637c28cc65e02a885130011f90a2b2608248b4724a2","build/match_byte.rs":"89e8b941af74df2c204abf808672d3ff278bdec75abc918c41a843260b924677","docs/.nojekyll":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855","docs/404.html":"025861f76f8d1f6d67c20ab624c6e418f4f824385e2dd8ad8732c4ea563c6a2e","docs/index.html":"025861f76f8d1f6d67c20ab624c6e418f4f824385e2dd8ad8732c4ea563c6a2e","src/color.rs":"b847b80097015cb7d0f4be67c0d8b2f6b82006be865917ff14a96b484760d460","src/cow_rc_str.rs":"541216f8ef74ee3cc5cbbc1347e5f32ed66588c401851c9a7d68b867aede1de0","src/from_bytes.rs":"331fe63af2123ae3675b61928a69461b5ac77799fff3ce9978c55cf2c558f4ff","src/lib.rs":"77c0852be9ba7682f4e325a09ebac03ce25aafec30142eb10937b77651a29d67","src/macros.rs":"adb9773c157890381556ea83d7942dcc676f99eea71abbb6afeffee1e3f28960","src/nth.rs":"246fa83a3ab97a7bb617c97a976af77136652ce77ba8ccca22e144b213b61310","src/parser.rs":"3a315b7600e80b577c5d04f215038c55ae1c9e5a2c70c6587850cd7fc1be6ae4","src/rules_and_declarations.rs":"44e47663aaa8a5ff167393b91337e377e5a4fcbef64b227028780b6d22879f69","src/serializer.rs":"843c9d01de00523851a4c40f791c64e3b00325426cb38f897e4a2ddb4cfa6de8","src/size_of_tests.rs":"a28664d44797519119d659eaf7e84e1789ef97e9e2c2d36630eb9f226c0cc0a6","src/tests.rs":"c07f5d8464217b1650f7ee8911b90ef67947876305be215d1e666a20a793dbfb","src/tokenizer.rs":"63640e6a2d875e8afda9dea6034b8c57db9b5877c3c491a97fee1c6ec223b75d","src/unicode_range.rs":"fbbd0f4b393944699730a6b0f945b2b2376fcea61fce2ea37190fb287793021a"},"package":"dc476dc0960774aa1cabfd0044de7d4585a8f2f8a3ef72e6d9d1e16c1e2492b1"}
{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".travis.yml":"f1fb4b65964c81bc1240544267ea334f554ca38ae7a74d57066f4d47d2b5d568","Cargo.toml":"b15b69a36fd6c23052045990fcfe68e8cad505d4d92d568a52eac041982699ee","LICENSE":"fab3dd6bdab226f1c08630b1dd917e11fcb4ec5e1e020e2c16f83a0a13863e85","README.md":"c5781e673335f37ed3d7acb119f8ed33efdf6eb75a7094b7da2abe0c3230adb8","build.rs":"950bcc47a196f07f99f59637c28cc65e02a885130011f90a2b2608248b4724a2","build/match_byte.rs":"89e8b941af74df2c204abf808672d3ff278bdec75abc918c41a843260b924677","docs/.nojekyll":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855","docs/404.html":"025861f76f8d1f6d67c20ab624c6e418f4f824385e2dd8ad8732c4ea563c6a2e","docs/index.html":"025861f76f8d1f6d67c20ab624c6e418f4f824385e2dd8ad8732c4ea563c6a2e","src/color.rs":"422a2e934b06a2cca7beef7afeab42bdca81a73eb27afcbdb3d2a98db892590b","src/cow_rc_str.rs":"541216f8ef74ee3cc5cbbc1347e5f32ed66588c401851c9a7d68b867aede1de0","src/from_bytes.rs":"331fe63af2123ae3675b61928a69461b5ac77799fff3ce9978c55cf2c558f4ff","src/lib.rs":"37aec41c81021cd4cc8f34491ee75de2e8340feada2d0096b107597fc4ac485d","src/macros.rs":"adb9773c157890381556ea83d7942dcc676f99eea71abbb6afeffee1e3f28960","src/nth.rs":"246fa83a3ab97a7bb617c97a976af77136652ce77ba8ccca22e144b213b61310","src/parser.rs":"9f147bc14e25fd4789e390ad0c9d2270188002a3a4785c150f39ef278b291259","src/rules_and_declarations.rs":"962f59aab8030b0d1202859ff841ed6254ce4bd4159eee5e915ccdf4b802f4d5","src/serializer.rs":"9e0c821b1ee6d35ca0632f7f01209f7174eef053b69a5c25b7145e1e8e667efe","src/size_of_tests.rs":"a28664d44797519119d659eaf7e84e1789ef97e9e2c2d36630eb9f226c0cc0a6","src/tests.rs":"ff00ab136330a5798d2b28375069f03f6019cdb1c8b38b407d24120e106a9f1e","src/tokenizer.rs":"fb2e3036f9a20969a1feaf2da293de224efb092f7abb4c01e7d5aaf981c29826","src/unicode_range.rs":"fbbd0f4b393944699730a6b0f945b2b2376fcea61fce2ea37190fb287793021a"},"package":"2334576d63647dd96a6238cc3fb1d51b2aae3eb98872de157ae35c0b2e358fd2"}

31
third_party/rust/cssparser/Cargo.toml поставляемый
Просмотреть файл

@ -12,7 +12,7 @@
[package]
name = "cssparser"
version = "0.19.5"
version = "0.20.0"
authors = ["Simon Sapin <simon.sapin@exyr.org>"]
build = "build.rs"
exclude = ["src/css-parsing-tests/**", "src/big-data-url.css"]
@ -22,15 +22,6 @@ readme = "README.md"
keywords = ["css", "syntax", "parser"]
license = "MPL-2.0"
repository = "https://github.com/servo/rust-cssparser"
[dependencies.cssparser-macros]
version = "0.3"
[dependencies.procedural-masquerade]
version = "0.1"
[dependencies.matches]
version = "0.1"
[dependencies.dtoa-short]
version = "0.3"
@ -47,14 +38,26 @@ optional = true
[dependencies.heapsize]
version = ">= 0.3, < 0.5"
optional = true
[dev-dependencies.difference]
version = "1.0"
[dev-dependencies.encoding_rs]
version = "0.5"
[dependencies.procedural-masquerade]
version = "0.1"
[dependencies.itoa]
version = "0.3"
[dependencies.matches]
version = "0.1"
[dependencies.cssparser-macros]
version = "0.3"
[dev-dependencies.rustc-serialize]
version = "0.3"
[dev-dependencies.encoding_rs]
version = "0.7"
[dev-dependencies.difference]
version = "1.0"
[build-dependencies.syn]
version = "0.11"

28
third_party/rust/cssparser/src/color.rs поставляемый
Просмотреть файл

@ -100,18 +100,26 @@ impl ToCss for RGBA {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result
where W: fmt::Write,
{
// Try first with two decimal places, then with three.
let mut rounded_alpha = (self.alpha_f32() * 100.).round() / 100.;
if clamp_unit_f32(rounded_alpha) != self.alpha {
rounded_alpha = (self.alpha_f32() * 1000.).round() / 1000.;
}
let serialize_alpha = self.alpha != 255;
if self.alpha == 255 {
write!(dest, "rgb({}, {}, {})", self.red, self.green, self.blue)
} else {
write!(dest, "rgba({}, {}, {}, {})",
self.red, self.green, self.blue, rounded_alpha)
dest.write_str(if serialize_alpha { "rgba(" } else { "rgb(" })?;
self.red.to_css(dest)?;
dest.write_str(", ")?;
self.green.to_css(dest)?;
dest.write_str(", ")?;
self.blue.to_css(dest)?;
if serialize_alpha {
dest.write_str(", ")?;
// Try first with two decimal places, then with three.
let mut rounded_alpha = (self.alpha_f32() * 100.).round() / 100.;
if clamp_unit_f32(rounded_alpha) != self.alpha {
rounded_alpha = (self.alpha_f32() * 1000.).round() / 1000.;
}
rounded_alpha.to_css(dest)?;
}
dest.write_char(')')
}
}

1
third_party/rust/cssparser/src/lib.rs поставляемый
Просмотреть файл

@ -69,6 +69,7 @@ fn parse_border_spacing(_context: &ParserContext, input: &mut Parser)
#![recursion_limit="200"] // For color::parse_color_keyword
extern crate dtoa_short;
extern crate itoa;
#[macro_use] extern crate cssparser_macros;
#[macro_use] extern crate matches;
#[macro_use] extern crate procedural_masquerade;

22
third_party/rust/cssparser/src/parser.rs поставляемый
Просмотреть файл

@ -57,6 +57,7 @@ pub enum BasicParseError<'a> {
}
impl<'a, T> From<BasicParseError<'a>> for ParseError<'a, T> {
#[inline]
fn from(this: BasicParseError<'a>) -> ParseError<'a, T> {
ParseError::Basic(this)
}
@ -200,16 +201,19 @@ mod ClosingDelimiter {
impl BitOr<Delimiters> for Delimiters {
type Output = Delimiters;
#[inline]
fn bitor(self, other: Delimiters) -> Delimiters {
Delimiters { bits: self.bits | other.bits }
}
}
impl Delimiters {
#[inline]
fn contains(self, other: Delimiters) -> bool {
(self.bits & other.bits) != 0
}
#[inline]
fn from_byte(byte: Option<u8>) -> Delimiters {
match byte {
Some(b';') => Delimiter::Semicolon,
@ -349,20 +353,6 @@ impl<'i: 't, 't> Parser<'i, 't> {
self.input.tokenizer.seen_var_functions()
}
/// Start looking for viewport percentage lengths. (See the `seen_viewport_percentages`
/// method.)
#[inline]
pub fn look_for_viewport_percentages(&mut self) {
self.input.tokenizer.look_for_viewport_percentages()
}
/// Return whether a `vh`, `vw`, `vmin`, or `vmax` dimension has been seen by the tokenizer
/// since `look_for_viewport_percentages` was called, and stop looking.
#[inline]
pub fn seen_viewport_percentages(&mut self) -> bool {
self.input.tokenizer.seen_viewport_percentages()
}
/// Execute the given closure, passing it the parser.
/// If the result (returned unchanged) is `Err`,
/// the internal state of the parser (including position within the input)
@ -441,7 +431,6 @@ impl<'i: 't, 't> Parser<'i, 't> {
if cached_token.start_position == token_start_position => {
self.input.tokenizer.reset(&cached_token.end_state);
match cached_token.token {
Token::Dimension { ref unit, .. } => self.input.tokenizer.see_dimension(unit),
Token::Function(ref name) => self.input.tokenizer.see_function(name),
_ => {}
}
@ -822,6 +811,7 @@ pub fn parse_until_after<'i: 't, 't, F, T, E>(parser: &mut Parser<'i, 't>,
let next_byte = (parser.input.tokenizer).next_byte();
if next_byte.is_some() && !parser.stop_before.contains(Delimiters::from_byte(next_byte)) {
debug_assert!(delimiters.contains(Delimiters::from_byte(next_byte)));
// We know this byte is ASCII.
(parser.input.tokenizer).advance(1);
if next_byte == Some(b'{') {
consume_until_end_of_block(BlockType::CurlyBracket, &mut parser.input.tokenizer);
@ -860,6 +850,8 @@ pub fn parse_nested_block<'i: 't, 't, F, T, E>(parser: &mut Parser<'i, 't>, pars
result
}
#[inline(never)]
#[cold]
fn consume_until_end_of_block(block_type: BlockType, tokenizer: &mut Tokenizer) {
let mut stack = SmallVec::<[BlockType; 16]>::new();
stack.push(block_type);

Просмотреть файл

@ -23,24 +23,17 @@ pub fn parse_important<'i, 't>(input: &mut Parser<'i, 't>) -> Result<(), BasicPa
/// The return value for `AtRuleParser::parse_prelude`.
/// Indicates whether the at-rule is expected to have a `{ /* ... */ }` block
/// or end with a `;` semicolon.
pub enum AtRuleType<P, R> {
pub enum AtRuleType<P, PB> {
/// The at-rule is expected to end with a `;` semicolon. Example: `@import`.
///
/// The value is the finished representation of an at-rule
/// as returned by `RuleListParser::next` or `DeclarationListParser::next`.
WithoutBlock(R),
/// The value is the representation of all data of the rule which would be
/// handled in rule_without_block.
WithoutBlock(P),
/// The at-rule is expected to have a a `{ /* ... */ }` block. Example: `@media`
///
/// The value is the representation of the "prelude" part of the rule.
WithBlock(P),
/// The at-rule may either have a block or end with a semicolon.
///
/// This is mostly for testing. As of this writing no real CSS at-rule behaves like this.
///
/// The value is the representation of the "prelude" part of the rule.
OptionalBlock(P),
WithBlock(PB),
}
/// A trait to provide various parsing of declaration values.
@ -85,8 +78,11 @@ pub trait DeclarationParser<'i> {
/// so that `impl AtRuleParser<(), ()> for ... {}` can be used
/// for using `DeclarationListParser` to parse a declartions list with only qualified rules.
pub trait AtRuleParser<'i> {
/// The intermediate representation of an at-rule prelude.
type Prelude;
/// The intermediate representation of prelude of an at-rule without block;
type PreludeNoBlock;
/// The intermediate representation of prelude of an at-rule with block;
type PreludeBlock;
/// The finished representation of an at-rule.
type AtRule;
@ -112,36 +108,39 @@ pub trait AtRuleParser<'i> {
/// that ends wherever the prelude should end.
/// (Before the next semicolon, the next `{`, or the end of the current block.)
fn parse_prelude<'t>(&mut self, name: CowRcStr<'i>, input: &mut Parser<'i, 't>)
-> Result<AtRuleType<Self::Prelude, Self::AtRule>, ParseError<'i, Self::Error>> {
-> Result<AtRuleType<Self::PreludeNoBlock, Self::PreludeBlock>,
ParseError<'i, Self::Error>> {
let _ = name;
let _ = input;
Err(ParseError::Basic(BasicParseError::AtRuleInvalid(name)))
}
/// End an at-rule which doesn't have block. Return the finished
/// representation of the at-rule.
///
/// This is only called when `parse_prelude` returned `WithoutBlock`, and
/// either the `;` semicolon indeed follows the prelude, or parser is at
/// the end of the input.
fn rule_without_block(&mut self, prelude: Self::PreludeNoBlock) -> Self::AtRule {
let _ = prelude;
panic!("The `AtRuleParser::rule_without_block` method must be overriden \
if `AtRuleParser::parse_prelude` ever returns `AtRuleType::WithoutBlock`.")
}
/// Parse the content of a `{ /* ... */ }` block for the body of the at-rule.
///
/// Return the finished representation of the at-rule
/// as returned by `RuleListParser::next` or `DeclarationListParser::next`,
/// or `Err(())` to ignore the entire at-rule as invalid.
///
/// This is only called when `parse_prelude` returned `WithBlock` or `OptionalBlock`,
/// and a block was indeed found following the prelude.
fn parse_block<'t>(&mut self, prelude: Self::Prelude, input: &mut Parser<'i, 't>)
/// This is only called when `parse_prelude` returned `WithBlock`, and a block
/// was indeed found following the prelude.
fn parse_block<'t>(&mut self, prelude: Self::PreludeBlock, input: &mut Parser<'i, 't>)
-> Result<Self::AtRule, ParseError<'i, Self::Error>> {
let _ = prelude;
let _ = input;
Err(ParseError::Basic(BasicParseError::AtRuleBodyInvalid))
}
/// An `OptionalBlock` prelude was followed by `;`.
///
/// Convert the prelude into the finished representation of the at-rule
/// as returned by `RuleListParser::next` or `DeclarationListParser::next`.
fn rule_without_block(&mut self, prelude: Self::Prelude) -> Self::AtRule {
let _ = prelude;
panic!("The `AtRuleParser::rule_without_block` method must be overriden \
if `AtRuleParser::parse_prelude` ever returns `AtRuleType::OptionalBlock`.")
}
}
/// A trait to provide various parsing of qualified rules.
@ -460,9 +459,9 @@ fn parse_at_rule<'i: 't, 't, P, E>(start: &ParserState, name: CowRcStr<'i>,
parser.parse_prelude(name, input)
});
match result {
Ok(AtRuleType::WithoutBlock(rule)) => {
Ok(AtRuleType::WithoutBlock(prelude)) => {
match input.next() {
Ok(&Token::Semicolon) | Err(_) => Ok(rule),
Ok(&Token::Semicolon) | Err(_) => Ok(parser.rule_without_block(prelude)),
Ok(&Token::CurlyBracketBlock) => Err(PreciseParseError {
error: ParseError::Basic(BasicParseError::UnexpectedToken(Token::CurlyBracketBlock)),
slice: input.slice_from(start.position()),
@ -495,21 +494,6 @@ fn parse_at_rule<'i: 't, 't, P, E>(start: &ParserState, name: CowRcStr<'i>,
Ok(_) => unreachable!()
}
}
Ok(AtRuleType::OptionalBlock(prelude)) => {
match input.next() {
Ok(&Token::Semicolon) | Err(_) => Ok(parser.rule_without_block(prelude)),
Ok(&Token::CurlyBracketBlock) => {
// FIXME: https://github.com/rust-lang/rust/issues/42508
parse_nested_block::<'i, 't, _, _, _>(input, move |input| parser.parse_block(prelude, input))
.map_err(|e| PreciseParseError {
error: e,
slice: input.slice_from(start.position()),
location: start.source_location(),
})
}
_ => unreachable!()
}
}
Err(error) => {
let end_position = input.position();
match input.next() {

93
third_party/rust/cssparser/src/serializer.rs поставляемый
Просмотреть файл

@ -3,8 +3,10 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dtoa_short::{self, Notation};
use itoa;
use std::ascii::AsciiExt;
use std::fmt::{self, Write};
use std::io;
use std::str;
use super::Token;
@ -24,23 +26,6 @@ pub trait ToCss {
self.to_css(&mut s).unwrap();
s
}
/// Serialize `self` in CSS syntax and return a result compatible with `std::fmt::Show`.
///
/// Typical usage is, for a `Foo` that implements `ToCss`:
///
/// ```{rust,ignore}
/// use std::fmt;
/// impl fmt::Show for Foo {
/// #[inline] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.fmt_to_css(f) }
/// }
/// ```
///
/// (This is a convenience wrapper for `to_css` and probably should not be overridden.)
#[inline]
fn fmt_to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
self.to_css(dest).map_err(|_| fmt::Error)
}
}
#[inline]
@ -90,7 +75,7 @@ impl<'a> ToCss for Token<'a> {
serialize_unquoted_url(&**value, dest)?;
dest.write_str(")")?;
},
Token::Delim(value) => write!(dest, "{}", value)?,
Token::Delim(value) => dest.write_char(value)?,
Token::Number { value, int_value, has_sign } => {
write_numeric(value, int_value, has_sign, dest)?
@ -112,7 +97,11 @@ impl<'a> ToCss for Token<'a> {
},
Token::WhiteSpace(content) => dest.write_str(content)?,
Token::Comment(content) => write!(dest, "/*{}*/", content)?,
Token::Comment(content) => {
dest.write_str("/*")?;
dest.write_str(content)?;
dest.write_str("*/")?
}
Token::Colon => dest.write_str(":")?,
Token::Semicolon => dest.write_str(";")?,
Token::Comma => dest.write_str(",")?,
@ -143,6 +132,32 @@ impl<'a> ToCss for Token<'a> {
}
}
fn to_hex_byte(value: u8) -> u8 {
match value {
0...9 => value + b'0',
_ => value - 10 + b'a',
}
}
fn hex_escape<W>(ascii_byte: u8, dest: &mut W) -> fmt::Result where W:fmt::Write {
let high = ascii_byte >> 4;
let b3;
let b4;
let bytes = if high > 0 {
let low = ascii_byte & 0x0F;
b4 = [b'\\', to_hex_byte(high), to_hex_byte(low), b' '];
&b4[..]
} else {
b3 = [b'\\', to_hex_byte(ascii_byte), b' '];
&b3[..]
};
dest.write_str(unsafe { str::from_utf8_unchecked(&bytes) })
}
fn char_escape<W>(ascii_byte: u8, dest: &mut W) -> fmt::Result where W:fmt::Write {
let bytes = [b'\\', ascii_byte];
dest.write_str(unsafe { str::from_utf8_unchecked(&bytes) })
}
/// Write a CSS identifier, escaping characters as necessary.
pub fn serialize_identifier<W>(mut value: &str, dest: &mut W) -> fmt::Result where W:fmt::Write {
@ -161,7 +176,7 @@ pub fn serialize_identifier<W>(mut value: &str, dest: &mut W) -> fmt::Result whe
value = &value[1..];
}
if let digit @ b'0'...b'9' = value.as_bytes()[0] {
write!(dest, "\\3{} ", digit as char)?;
hex_escape(digit, dest)?;
value = &value[1..];
}
serialize_name(value, dest)
@ -182,9 +197,9 @@ fn serialize_name<W>(value: &str, dest: &mut W) -> fmt::Result where W:fmt::Writ
if let Some(escaped) = escaped {
dest.write_str(escaped)?;
} else if (b >= b'\x01' && b <= b'\x1F') || b == b'\x7F' {
write!(dest, "\\{:x} ", b)?;
hex_escape(b, dest)?;
} else {
write!(dest, "\\{}", b as char)?;
char_escape(b, dest)?;
}
chunk_start = i + 1;
}
@ -202,9 +217,9 @@ fn serialize_unquoted_url<W>(value: &str, dest: &mut W) -> fmt::Result where W:f
};
dest.write_str(&value[chunk_start..i])?;
if hex {
write!(dest, "\\{:X} ", b)?;
hex_escape(b, dest)?;
} else {
write!(dest, "\\{}", b as char)?;
char_escape(b, dest)?;
}
chunk_start = i + 1;
}
@ -262,7 +277,7 @@ impl<'a, W> fmt::Write for CssStringWriter<'a, W> where W: fmt::Write {
self.inner.write_str(&s[chunk_start..i])?;
match escaped {
Some(x) => self.inner.write_str(x)?,
None => write!(self.inner, "\\{:x} ", b)?,
None => hex_escape(b, self.inner)?,
};
chunk_start = i + 1;
}
@ -275,7 +290,33 @@ macro_rules! impl_tocss_for_int {
($T: ty) => {
impl<'a> ToCss for $T {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
write!(dest, "{}", *self)
struct AssumeUtf8<W: fmt::Write>(W);
impl<W: fmt::Write> io::Write for AssumeUtf8<W> {
#[inline]
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
// Safety: itoa only emits ASCII, which is also well-formed UTF-8.
debug_assert!(buf.is_ascii());
self.0.write_str(unsafe { str::from_utf8_unchecked(buf) })
.map_err(|_| io::ErrorKind::Other.into())
}
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.write_all(buf)?;
Ok(buf.len())
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
match itoa::write(AssumeUtf8(dest), *self) {
Ok(_) => Ok(()),
Err(_) => Err(fmt::Error)
}
}
}
}

80
third_party/rust/cssparser/src/tests.rs поставляемый
Просмотреть файл

@ -300,10 +300,10 @@ fn unquoted_url_escaping() {
let serialized = token.to_css_string();
assert_eq!(serialized, "\
url(\
\\1 \\2 \\3 \\4 \\5 \\6 \\7 \\8 \\9 \\A \\B \\C \\D \\E \\F \\10 \
\\11 \\12 \\13 \\14 \\15 \\16 \\17 \\18 \\19 \\1A \\1B \\1C \\1D \\1E \\1F \\20 \
\\1 \\2 \\3 \\4 \\5 \\6 \\7 \\8 \\9 \\a \\b \\c \\d \\e \\f \\10 \
\\11 \\12 \\13 \\14 \\15 \\16 \\17 \\18 \\19 \\1a \\1b \\1c \\1d \\1e \\1f \\20 \
!\\\"#$%&\\'\\(\\)*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]\
^_`abcdefghijklmnopqrstuvwxyz{|}~\\7F é\
^_`abcdefghijklmnopqrstuvwxyz{|}~\\7f é\
)\
");
let mut input = ParserInput::new(&serialized);
@ -745,17 +745,28 @@ impl<'i> DeclarationParser<'i> for JsonParser {
}
impl<'i> AtRuleParser<'i> for JsonParser {
type Prelude = Vec<Json>;
type PreludeNoBlock = Vec<Json>;
type PreludeBlock = Vec<Json>;
type AtRule = Json;
type Error = ();
fn parse_prelude<'t>(&mut self, name: CowRcStr<'i>, input: &mut Parser<'i, 't>)
-> Result<AtRuleType<Vec<Json>, Json>, ParseError<'i, ()>> {
Ok(AtRuleType::OptionalBlock(vec![
-> Result<AtRuleType<Vec<Json>, Vec<Json>>, ParseError<'i, ()>> {
let prelude = vec![
"at-rule".to_json(),
name.to_json(),
Json::Array(component_values_to_json(input)),
]))
];
match_ignore_ascii_case! { &*name,
"media" | "foo-with-block" => Ok(AtRuleType::WithBlock(prelude)),
"charset" => Err(BasicParseError::AtRuleInvalid(name.clone()).into()),
_ => Ok(AtRuleType::WithoutBlock(prelude)),
}
}
fn rule_without_block(&mut self, mut prelude: Vec<Json>) -> Json {
prelude.push(Json::Null);
Json::Array(prelude)
}
fn parse_block<'t>(&mut self, mut prelude: Vec<Json>, input: &mut Parser<'i, 't>)
@ -763,11 +774,6 @@ impl<'i> AtRuleParser<'i> for JsonParser {
prelude.push(Json::Array(component_values_to_json(input)));
Ok(Json::Array(prelude))
}
fn rule_without_block(&mut self, mut prelude: Vec<Json>) -> Json {
prelude.push(Json::Null);
Json::Array(prelude)
}
}
impl<'i> QualifiedRuleParser<'i> for JsonParser {
@ -1049,3 +1055,53 @@ fn roundtrip_percentage_token() {
}
}
}
#[test]
fn utf16_columns() {
// This particular test serves two purposes. First, it checks
// that the column number computations are correct. Second, it
// checks that tokenizer code paths correctly differentiate
// between the different UTF-8 encoding bytes. In particular
// different leader bytes and continuation bytes are treated
// differently, so we make sure to include all lengths in the
// tests, using the string "QΡ✈🆒". Also, remember that because
// the column is in units of UTF-16, the 4-byte sequence results
// in two columns.
let tests = vec![
("", 0),
("ascii", 5),
("/*QΡ✈🆒*/", 9),
("'QΡ✈🆒*'", 8),
("\"\\\"'QΡ✈🆒*'", 11),
("\\Q\\Ρ\\\\🆒", 9),
("QΡ✈🆒", 5),
("QΡ✈🆒\\Q\\Ρ\\\\🆒", 14),
("newline\r\nQΡ✈🆒", 5),
("url(QΡ✈🆒\\Q\\Ρ\\\\🆒)", 19),
("url(QΡ✈🆒)", 10),
("url(\r\nQΡ✈🆒\\Q\\Ρ\\\\🆒)", 15),
("url(\r\nQΡ✈🆒\\Q\\Ρ\\\\🆒", 14),
("url(\r\nQΡ✈🆒\\Q\\Ρ\\\\🆒 x", 16),
("QΡ✈🆒()", 7),
// Test that under/over-flow of current_line_start_position is
// handled properly; see the special case in consume_4byte_intro.
("🆒", 2),
];
for test in tests {
let mut input = ParserInput::new(test.0);
let mut parser = Parser::new(&mut input);
// Read all tokens.
loop {
match parser.next() {
Err(BasicParseError::EndOfInput) => { break; }
Err(_) => { assert!(false); }
Ok(_) => {}
};
}
// Check the resulting column.
assert_eq!(parser.current_source_location().column, test.1);
}
}

332
third_party/rust/cssparser/src/tokenizer.rs поставляемый
Просмотреть файл

@ -44,9 +44,10 @@ pub enum Token<'a> {
/// The value does not include the quotes.
QuotedString(CowRcStr<'a>),
/// A [`<url-token>`](https://drafts.csswg.org/css-syntax/#url-token-diagram) or `url( <string-token> )` function
/// A [`<url-token>`](https://drafts.csswg.org/css-syntax/#url-token-diagram)
///
/// The value does not include the `url(` `)` markers or the quotes.
/// The value does not include the `url(` `)` markers. Note that `url( <string-token> )` is represented by a
/// `Function` token.
UnquotedUrl(CowRcStr<'a>),
/// A `<delim-token>`
@ -205,10 +206,12 @@ pub struct Tokenizer<'a> {
input: &'a str,
/// Counted in bytes, not code points. From 0.
position: usize,
/// The position at the start of the current line; but adjusted to
/// ensure that computing the column will give the result in units
/// of UTF-16 characters.
current_line_start_position: usize,
current_line_number: u32,
var_functions: SeenStatus,
viewport_percentages: SeenStatus,
source_map_url: Option<&'a str>,
}
@ -234,7 +237,6 @@ impl<'a> Tokenizer<'a> {
current_line_start_position: 0,
current_line_number: first_line_number,
var_functions: SeenStatus::DontCare,
viewport_percentages: SeenStatus::DontCare,
source_map_url: None,
}
}
@ -260,30 +262,6 @@ impl<'a> Tokenizer<'a> {
}
}
#[inline]
pub fn look_for_viewport_percentages(&mut self) {
self.viewport_percentages = SeenStatus::LookingForThem;
}
#[inline]
pub fn seen_viewport_percentages(&mut self) -> bool {
let seen = self.viewport_percentages == SeenStatus::SeenAtLeastOne;
self.viewport_percentages = SeenStatus::DontCare;
seen
}
#[inline]
pub fn see_dimension(&mut self, unit: &str) {
if self.viewport_percentages == SeenStatus::LookingForThem {
if unit.eq_ignore_ascii_case("vh") ||
unit.eq_ignore_ascii_case("vw") ||
unit.eq_ignore_ascii_case("vmin") ||
unit.eq_ignore_ascii_case("vmax") {
self.viewport_percentages = SeenStatus::SeenAtLeastOne;
}
}
}
#[inline]
pub fn next(&mut self) -> Result<Token<'a>, ()> {
next_token(self)
@ -363,8 +341,23 @@ impl<'a> Tokenizer<'a> {
#[inline]
fn has_at_least(&self, n: usize) -> bool { self.position + n < self.input.len() }
// Advance over N bytes in the input. This function can advance
// over ASCII bytes (excluding newlines), or UTF-8 sequence
// leaders (excluding leaders for 4-byte sequences).
#[inline]
pub fn advance(&mut self, n: usize) { self.position += n }
pub fn advance(&mut self, n: usize) {
if cfg!(debug_assertions) {
// Each byte must either be an ASCII byte or a sequence
// leader, but not a 4-byte leader; also newlines are
// rejected.
for i in 0..n {
let b = self.byte_at(i);
debug_assert!(b.is_ascii() || (b & 0xF0 != 0xF0 && b & 0xC0 != 0x80));
debug_assert!(b != b'\r' && b != b'\n' && b != b'\x0C');
}
}
self.position += n
}
// Assumes non-EOF
#[inline]
@ -375,10 +368,44 @@ impl<'a> Tokenizer<'a> {
self.input.as_bytes()[self.position + offset]
}
// Advance over a single byte; the byte must be a UTF-8 sequence
// leader for a 4-byte sequence.
#[inline]
fn consume_byte(&mut self) -> u8 {
fn consume_4byte_intro(&mut self) {
debug_assert!(self.next_byte_unchecked() & 0xF0 == 0xF0);
// This takes two UTF-16 characters to represent, so we
// actually have an undercount.
self.current_line_start_position = self.current_line_start_position.wrapping_sub(1);
self.position += 1;
self.input.as_bytes()[self.position - 1]
}
// Advance over a single byte; the byte must be a UTF-8
// continuation byte.
#[inline]
fn consume_continuation_byte(&mut self) {
debug_assert!(self.next_byte_unchecked() & 0xC0 == 0x80);
// Continuation bytes contribute to column overcount. Note
// that due to the special case for the 4-byte sequence intro,
// we must use wrapping add here.
self.current_line_start_position = self.current_line_start_position.wrapping_add(1);
self.position += 1;
}
// Advance over any kind of byte, excluding newlines.
#[inline(never)]
fn consume_known_byte(&mut self, byte: u8) {
debug_assert!(byte != b'\r' && byte != b'\n' && byte != b'\x0C');
self.position += 1;
// Continuation bytes contribute to column overcount.
if byte & 0xF0 == 0xF0 {
// This takes two UTF-16 characters to represent, so we
// actually have an undercount.
self.current_line_start_position = self.current_line_start_position.wrapping_sub(1);
} else if byte & 0xC0 == 0x80 {
// Note that due to the special case for the 4-byte
// sequence intro, we must use wrapping add here.
self.current_line_start_position = self.current_line_start_position.wrapping_add(1);
}
}
#[inline]
@ -386,9 +413,15 @@ impl<'a> Tokenizer<'a> {
self.input[self.position..].chars().next().unwrap()
}
fn seen_newline(&mut self, is_cr: bool) {
if is_cr && self.next_byte() == Some(/* LF */ b'\n') {
return
// Given that a newline has been seen, advance over the newline
// and update the state.
#[inline]
fn consume_newline(&mut self) {
let byte = self.next_byte_unchecked();
debug_assert!(byte == b'\r' || byte == b'\n' || byte == b'\x0C');
self.position += 1;
if byte == b'\r' && self.next_byte() == Some(b'\n') {
self.position += 1;
}
self.current_line_start_position = self.position;
self.current_line_number += 1;
@ -403,7 +436,11 @@ impl<'a> Tokenizer<'a> {
#[inline]
fn consume_char(&mut self) -> char {
let c = self.next_char();
self.position += c.len_utf8();
let len_utf8 = c.len_utf8();
self.position += len_utf8;
// Note that due to the special case for the 4-byte sequence
// intro, we must use wrapping add here.
self.current_line_start_position = self.current_line_start_position.wrapping_add(len_utf8 - c.len_utf16());
c
}
@ -418,13 +455,8 @@ impl<'a> Tokenizer<'a> {
b' ' | b'\t' => {
self.advance(1)
},
b'\n' | b'\x0C' => {
self.advance(1);
self.seen_newline(false);
},
b'\r' => {
self.advance(1);
self.seen_newline(true);
b'\n' | b'\x0C' | b'\r' => {
self.consume_newline();
},
b'/' => {
if self.starts_with(b"/*") {
@ -446,13 +478,8 @@ impl<'a> Tokenizer<'a> {
b' ' | b'\t' => {
self.advance(1)
},
b'\n' | b'\x0C' => {
self.advance(1);
self.seen_newline(false);
},
b'\r' => {
self.advance(1);
self.seen_newline(true);
b'\n' | b'\x0C' | b'\r' => {
self.consume_newline();
},
b'/' => {
if self.starts_with(b"/*") {
@ -495,6 +522,7 @@ pub struct SourceLocation {
pub line: u32,
/// The column number within a line, starting at 0 for first the character of the line.
/// Column numbers are in units of UTF-16 characters.
pub column: u32,
}
@ -506,22 +534,19 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
let b = tokenizer.next_byte_unchecked();
let token = match_byte! { b,
b' ' | b'\t' => {
consume_whitespace(tokenizer, false, false)
consume_whitespace(tokenizer, false)
},
b'\n' | b'\x0C' => {
consume_whitespace(tokenizer, true, false)
},
b'\r' => {
consume_whitespace(tokenizer, true, true)
b'\n' | b'\x0C' | b'\r' => {
consume_whitespace(tokenizer, true)
},
b'"' => { consume_string(tokenizer, false) },
b'#' => {
tokenizer.advance(1);
if is_ident_start(tokenizer) { IDHash(consume_name(tokenizer)) }
else if !tokenizer.is_eof() && match tokenizer.next_byte_unchecked() {
b'a'...b'z' | b'A'...b'Z' | b'0'...b'9' | b'-' | b'_' => true,
b'\\' => !tokenizer.has_newline_at(1),
_ => !b.is_ascii(),
// Any other valid case here already resulted in IDHash.
b'0'...b'9' | b'-' => true,
_ => false,
} { Hash(consume_name(tokenizer)) }
else { Delim('#') }
},
@ -642,11 +667,12 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
}
fn consume_whitespace<'a>(tokenizer: &mut Tokenizer<'a>, newline: bool, is_cr: bool) -> Token<'a> {
fn consume_whitespace<'a>(tokenizer: &mut Tokenizer<'a>, newline: bool) -> Token<'a> {
let start_position = tokenizer.position();
tokenizer.advance(1);
if newline {
tokenizer.seen_newline(is_cr)
tokenizer.consume_newline();
} else {
tokenizer.advance(1);
}
while !tokenizer.is_eof() {
let b = tokenizer.next_byte_unchecked();
@ -654,13 +680,8 @@ fn consume_whitespace<'a>(tokenizer: &mut Tokenizer<'a>, newline: bool, is_cr: b
b' ' | b'\t' => {
tokenizer.advance(1);
}
b'\n' | b'\x0C' => {
tokenizer.advance(1);
tokenizer.seen_newline(false);
}
b'\r' => {
tokenizer.advance(1);
tokenizer.seen_newline(true);
b'\n' | b'\x0C' | b'\r' => {
tokenizer.consume_newline();
}
_ => {
break
@ -700,15 +721,13 @@ fn consume_comment<'a>(tokenizer: &mut Tokenizer<'a>) -> &'a str {
return contents
}
}
b'\n' | b'\x0C' => {
tokenizer.advance(1);
tokenizer.seen_newline(false);
}
b'\r' => {
tokenizer.advance(1);
tokenizer.seen_newline(true);
b'\n' | b'\x0C' | b'\r' => {
tokenizer.consume_newline();
}
b'\x80'...b'\xBF' => { tokenizer.consume_continuation_byte(); }
b'\xF0'...b'\xFF' => { tokenizer.consume_4byte_intro(); }
_ => {
// ASCII or other leading byte.
tokenizer.advance(1);
}
}
@ -744,6 +763,7 @@ fn consume_quoted_string<'a>(tokenizer: &mut Tokenizer<'a>, single_quote: bool)
tokenizer.advance(1);
return Ok(value.into())
}
tokenizer.advance(1);
}
b'\'' => {
if single_quote {
@ -751,6 +771,7 @@ fn consume_quoted_string<'a>(tokenizer: &mut Tokenizer<'a>, single_quote: bool)
tokenizer.advance(1);
return Ok(value.into())
}
tokenizer.advance(1);
}
b'\\' | b'\0' => {
// * The tokenizers input is UTF-8 since its `&str`.
@ -764,49 +785,45 @@ fn consume_quoted_string<'a>(tokenizer: &mut Tokenizer<'a>, single_quote: bool)
b'\n' | b'\r' | b'\x0C' => {
return Err(tokenizer.slice_from(start_pos).into())
},
_ => {}
b'\x80'...b'\xBF' => { tokenizer.consume_continuation_byte(); }
b'\xF0'...b'\xFF' => { tokenizer.consume_4byte_intro(); }
_ => {
// ASCII or other leading byte.
tokenizer.advance(1);
}
}
tokenizer.consume_byte();
}
while !tokenizer.is_eof() {
if matches!(tokenizer.next_byte_unchecked(), b'\n' | b'\r' | b'\x0C') {
return Err(
// string_bytes is well-formed UTF-8, see other comments.
unsafe {
from_utf8_release_unchecked(string_bytes)
}.into()
);
}
let b = tokenizer.consume_byte();
let b = tokenizer.next_byte_unchecked();
match_byte! { b,
b'\n' | b'\r' | b'\x0C' => {
return Err(
// string_bytes is well-formed UTF-8, see other comments.
unsafe {
from_utf8_release_unchecked(string_bytes)
}.into()
);
}
b'"' => {
tokenizer.advance(1);
if !single_quote {
break;
}
}
b'\'' => {
tokenizer.advance(1);
if single_quote {
break;
}
}
b'\\' => {
tokenizer.advance(1);
if !tokenizer.is_eof() {
match tokenizer.next_byte_unchecked() {
// Escaped newline
b'\n' | b'\x0C' => {
tokenizer.advance(1);
tokenizer.seen_newline(false);
}
b'\r' => {
tokenizer.advance(1);
if tokenizer.next_byte() == Some(b'\n') {
tokenizer.advance(1);
}
// `is_cr = true` is useful to skip \r when the next iteration
// of a loop will call `seen_newline` again for the following \n.
// In this case were consuming both in this iteration, so passing `false`.
tokenizer.seen_newline(false);
b'\n' | b'\x0C' | b'\r' => {
tokenizer.consume_newline();
}
// This pushes one well-formed code point
_ => consume_escape_and_write(tokenizer, &mut string_bytes)
@ -816,10 +833,16 @@ fn consume_quoted_string<'a>(tokenizer: &mut Tokenizer<'a>, single_quote: bool)
continue;
}
b'\0' => {
tokenizer.advance(1);
string_bytes.extend("\u{FFFD}".as_bytes());
continue;
}
_ => {},
b'\x80'...b'\xBF' => { tokenizer.consume_continuation_byte(); }
b'\xF0'...b'\xFF' => { tokenizer.consume_4byte_intro(); }
_ => {
// ASCII or other leading byte.
tokenizer.advance(1);
},
}
// If this byte is part of a multi-byte code point,
@ -887,11 +910,11 @@ fn consume_name<'a>(tokenizer: &mut Tokenizer<'a>) -> CowRcStr<'a> {
value_bytes = tokenizer.slice_from(start_pos).as_bytes().to_owned();
break
}
b'\x80'...b'\xBF' => { tokenizer.consume_continuation_byte(); }
b'\xC0'...b'\xEF' => { tokenizer.advance(1); }
b'\xF0'...b'\xFF' => { tokenizer.consume_4byte_intro(); }
b => {
if b.is_ascii() {
return tokenizer.slice_from(start_pos).into();
}
tokenizer.advance(1);
return tokenizer.slice_from(start_pos).into();
}
}
}
@ -913,15 +936,26 @@ fn consume_name<'a>(tokenizer: &mut Tokenizer<'a>) -> CowRcStr<'a> {
tokenizer.advance(1);
value_bytes.extend("\u{FFFD}".as_bytes());
},
_ => {
if b.is_ascii() {
break;
}
tokenizer.advance(1);
b'\x80'...b'\xBF' => {
// This byte *is* part of a multi-byte code point,
// well end up copying the whole code point before this loop does something else.
tokenizer.consume_continuation_byte();
value_bytes.push(b)
}
b'\xC0'...b'\xEF' => {
// This byte *is* part of a multi-byte code point,
// well end up copying the whole code point before this loop does something else.
tokenizer.advance(1);
value_bytes.push(b)
}
b'\xF0'...b'\xFF' => {
tokenizer.consume_4byte_intro();
value_bytes.push(b)
}
_ => {
// ASCII
break;
}
}
}
// string_bytes is well-formed UTF-8, see other comments.
@ -1045,7 +1079,6 @@ fn consume_numeric<'a>(tokenizer: &mut Tokenizer<'a>) -> Token<'a> {
let value = value as f32;
if is_ident_start(tokenizer) {
let unit = consume_name(tokenizer);
tokenizer.see_dimension(&unit);
Dimension {
value: value,
int_value: int_value,
@ -1101,11 +1134,15 @@ fn consume_unquoted_url<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>,
}
b'"' | b'\'' => { return Err(()) }, // Do not advance
b')' => {
tokenizer.advance(offset + 1);
// Don't use advance, because we may be skipping
// newlines here, and we want to avoid the assert.
tokenizer.position += offset + 1;
break
}
_ => {
tokenizer.advance(offset);
// Don't use advance, because we may be skipping
// newlines here, and we want to avoid the assert.
tokenizer.position += offset;
found_printable_char = true;
break
}
@ -1114,6 +1151,8 @@ fn consume_unquoted_url<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>,
if newlines > 0 {
tokenizer.current_line_number += newlines;
// No need for wrapping_add here, because there's no possible
// way to wrap.
tokenizer.current_line_start_position = start_position + last_newline + 1;
}
@ -1157,27 +1196,33 @@ fn consume_unquoted_url<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>,
string_bytes = tokenizer.slice_from(start_pos).as_bytes().to_owned();
break
}
b'\x80'...b'\xBF' => { tokenizer.consume_continuation_byte(); }
b'\xF0'...b'\xFF' => { tokenizer.consume_4byte_intro(); }
_ => {
// ASCII or other leading byte.
tokenizer.advance(1);
}
}
}
while !tokenizer.is_eof() {
match_byte! { tokenizer.consume_byte(),
let b = tokenizer.next_byte_unchecked();
match_byte! { b,
b' ' | b'\t' | b'\n' | b'\r' | b'\x0C' => {
// string_bytes is well-formed UTF-8, see other comments.
let string = unsafe { from_utf8_release_unchecked(string_bytes) }.into();
tokenizer.position -= 1;
return consume_url_end(tokenizer, start_pos, string)
}
b')' => {
tokenizer.advance(1);
break;
}
b'\x01'...b'\x08' | b'\x0B' | b'\x0E'...b'\x1F' | b'\x7F' // non-printable
| b'"' | b'\'' | b'(' => {
tokenizer.advance(1);
return consume_bad_url(tokenizer, start_pos);
}
b'\\' => {
tokenizer.advance(1);
if tokenizer.has_newline_at(0) {
return consume_bad_url(tokenizer, start_pos)
}
@ -1186,11 +1231,28 @@ fn consume_unquoted_url<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>,
consume_escape_and_write(tokenizer, &mut string_bytes)
},
b'\0' => {
tokenizer.advance(1);
string_bytes.extend("\u{FFFD}".as_bytes());
}
b'\x80'...b'\xBF' => {
// Well end up copying the whole code point
// before this loop does something else.
tokenizer.consume_continuation_byte();
string_bytes.push(b);
}
b'\xF0'...b'\xFF' => {
// Well end up copying the whole code point
// before this loop does something else.
tokenizer.consume_4byte_intro();
string_bytes.push(b);
}
// If this byte is part of a multi-byte code point,
// well end up copying the whole code point before this loop does something else.
b => { string_bytes.push(b) }
b => {
// ASCII or other leading byte.
tokenizer.advance(1);
string_bytes.push(b)
}
}
}
UnquotedUrl(
@ -1204,18 +1266,17 @@ fn consume_unquoted_url<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>,
string: CowRcStr<'a>)
-> Token<'a> {
while !tokenizer.is_eof() {
match_byte! { tokenizer.consume_byte(),
match_byte! { tokenizer.next_byte_unchecked(),
b')' => {
tokenizer.advance(1);
break
}
b' ' | b'\t' => {}
b'\n' | b'\x0C' => {
tokenizer.seen_newline(false);
b' ' | b'\t' => { tokenizer.advance(1); }
b'\n' | b'\x0C' | b'\r' => {
tokenizer.consume_newline();
}
b'\r' => {
tokenizer.seen_newline(true);
}
_ => {
b => {
tokenizer.consume_known_byte(b);
return consume_bad_url(tokenizer, start_pos);
}
}
@ -1226,22 +1287,23 @@ fn consume_unquoted_url<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>,
fn consume_bad_url<'a>(tokenizer: &mut Tokenizer<'a>, start_pos: SourcePosition) -> Token<'a> {
// Consume up to the closing )
while !tokenizer.is_eof() {
match_byte! { tokenizer.consume_byte(),
match_byte! { tokenizer.next_byte_unchecked(),
b')' => {
tokenizer.advance(1);
break
}
b'\\' => {
tokenizer.advance(1);
if matches!(tokenizer.next_byte(), Some(b')') | Some(b'\\')) {
tokenizer.advance(1); // Skip an escaped ')' or '\'
}
}
b'\n' | b'\x0C' => {
tokenizer.seen_newline(false);
b'\n' | b'\x0C' | b'\r' => {
tokenizer.consume_newline();
}
b'\r' => {
tokenizer.seen_newline(true);
b => {
tokenizer.consume_known_byte(b);
}
_ => {},
}
}
BadUrl(tokenizer.slice_from(start_pos).into())
@ -1285,16 +1347,8 @@ fn consume_escape(tokenizer: &mut Tokenizer) -> char {
b' ' | b'\t' => {
tokenizer.advance(1)
}
b'\n' | b'\x0C' => {
tokenizer.advance(1);
tokenizer.seen_newline(false)
}
b'\r' => {
tokenizer.advance(1);
if !tokenizer.is_eof() && tokenizer.next_byte_unchecked() == b'\n' {
tokenizer.advance(1);
}
tokenizer.seen_newline(false)
b'\n' | b'\x0C' | b'\r' => {
tokenizer.consume_newline();
}
_ => {}
}

13
toolkit/library/gtest/rust/Cargo.lock сгенерированный
Просмотреть файл

@ -296,11 +296,12 @@ dependencies = [
[[package]]
name = "cssparser"
version = "0.19.5"
version = "0.20.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"cssparser-macros 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"dtoa-short 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"itoa 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"matches 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
"phf 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)",
"procedural-masquerade 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
@ -547,7 +548,7 @@ name = "geckoservo"
version = "0.0.1"
dependencies = [
"atomic_refcell 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"cssparser 0.19.5 (registry+https://github.com/rust-lang/crates.io-index)",
"cssparser 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.24 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1109,7 +1110,7 @@ name = "selectors"
version = "0.19.0"
dependencies = [
"bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"cssparser 0.19.5 (registry+https://github.com/rust-lang/crates.io-index)",
"cssparser 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)",
"fnv 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"matches 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1211,7 +1212,7 @@ dependencies = [
"bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"cssparser 0.19.5 (registry+https://github.com/rust-lang/crates.io-index)",
"cssparser 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)",
"euclid 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)",
"fnv 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
"hashglobe 0.1.0",
@ -1260,7 +1261,7 @@ version = "0.0.1"
dependencies = [
"app_units 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
"bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"cssparser 0.19.5 (registry+https://github.com/rust-lang/crates.io-index)",
"cssparser 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)",
"euclid 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)",
"selectors 0.19.0",
]
@ -1577,7 +1578,7 @@ dependencies = [
"checksum core-foundation-sys 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "41115a6aa5d3e1e5ef98148373f25971d1fad53818553f216495f9e67e90a624"
"checksum core-graphics 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a9f841e9637adec70838c537cae52cb4c751cc6514ad05669b51d107c2021c79"
"checksum core-text 6.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "16ce16d9ed00181016c11ff48e561314bec92bfbce9fe48f319366618d4e5de6"
"checksum cssparser 0.19.5 (registry+https://github.com/rust-lang/crates.io-index)" = "dc476dc0960774aa1cabfd0044de7d4585a8f2f8a3ef72e6d9d1e16c1e2492b1"
"checksum cssparser 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2334576d63647dd96a6238cc3fb1d51b2aae3eb98872de157ae35c0b2e358fd2"
"checksum cssparser-macros 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "079adec4af52bb5275eadd004292028c79eb3c5f5b4ee8086a36d4197032f6df"
"checksum darling 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9861a8495606435477df581bc858ccf15a3469747edf175b94a4704fd9aaedac"
"checksum darling_core 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1486a8b00b45062c997f767738178b43219133dd0c8c826cb811e60563810821"

13
toolkit/library/rust/Cargo.lock сгенерированный
Просмотреть файл

@ -294,11 +294,12 @@ dependencies = [
[[package]]
name = "cssparser"
version = "0.19.5"
version = "0.20.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"cssparser-macros 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"dtoa-short 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"itoa 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"matches 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
"phf 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)",
"procedural-masquerade 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
@ -545,7 +546,7 @@ name = "geckoservo"
version = "0.0.1"
dependencies = [
"atomic_refcell 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"cssparser 0.19.5 (registry+https://github.com/rust-lang/crates.io-index)",
"cssparser 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.24 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1096,7 +1097,7 @@ name = "selectors"
version = "0.19.0"
dependencies = [
"bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"cssparser 0.19.5 (registry+https://github.com/rust-lang/crates.io-index)",
"cssparser 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)",
"fnv 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"matches 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1198,7 +1199,7 @@ dependencies = [
"bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"cssparser 0.19.5 (registry+https://github.com/rust-lang/crates.io-index)",
"cssparser 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)",
"euclid 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)",
"fnv 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
"hashglobe 0.1.0",
@ -1247,7 +1248,7 @@ version = "0.0.1"
dependencies = [
"app_units 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
"bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"cssparser 0.19.5 (registry+https://github.com/rust-lang/crates.io-index)",
"cssparser 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)",
"euclid 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)",
"selectors 0.19.0",
]
@ -1564,7 +1565,7 @@ dependencies = [
"checksum core-foundation-sys 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "41115a6aa5d3e1e5ef98148373f25971d1fad53818553f216495f9e67e90a624"
"checksum core-graphics 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a9f841e9637adec70838c537cae52cb4c751cc6514ad05669b51d107c2021c79"
"checksum core-text 6.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "16ce16d9ed00181016c11ff48e561314bec92bfbce9fe48f319366618d4e5de6"
"checksum cssparser 0.19.5 (registry+https://github.com/rust-lang/crates.io-index)" = "dc476dc0960774aa1cabfd0044de7d4585a8f2f8a3ef72e6d9d1e16c1e2492b1"
"checksum cssparser 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2334576d63647dd96a6238cc3fb1d51b2aae3eb98872de157ae35c0b2e358fd2"
"checksum cssparser-macros 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "079adec4af52bb5275eadd004292028c79eb3c5f5b4ee8086a36d4197032f6df"
"checksum darling 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9861a8495606435477df581bc858ccf15a3469747edf175b94a4704fd9aaedac"
"checksum darling_core 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1486a8b00b45062c997f767738178b43219133dd0c8c826cb811e60563810821"