Bug 1341102: Revendor rust dependencies to unbust tree. r=me

MozReview-Commit-ID: 6ChbWFfPHGS


--HG--
rename : third_party/rust/cssparser/.cargo-checksum.json => third_party/rust/cssparser-0.19.0/.cargo-checksum.json
rename : third_party/rust/cssparser/Cargo.toml => third_party/rust/cssparser-0.19.0/Cargo.toml
rename : third_party/rust/cssparser/src/parser.rs => third_party/rust/cssparser-0.19.0/src/parser.rs
rename : third_party/rust/cssparser/src/size_of_tests.rs => third_party/rust/cssparser-0.19.0/src/size_of_tests.rs
rename : third_party/rust/cssparser/src/tests.rs => third_party/rust/cssparser-0.19.0/src/tests.rs
rename : third_party/rust/cssparser/src/tokenizer.rs => third_party/rust/cssparser-0.19.0/src/tokenizer.rs
This commit is contained in:
Emilio Cobos Álvarez 2017-08-17 20:51:43 +02:00
Родитель 23aba0f9de
Коммит 24cbad8b67
31 изменённых файлов: 6331 добавлений и 20 удалений

1
third_party/rust/cssparser-0.19.0/.cargo-checksum.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1 @@
{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".travis.yml":"f1fb4b65964c81bc1240544267ea334f554ca38ae7a74d57066f4d47d2b5d568","Cargo.toml":"cd0faaf645b871741c6270f7ebff68b0aff08be5ed387728fa7d90e0d8403420","LICENSE":"fab3dd6bdab226f1c08630b1dd917e11fcb4ec5e1e020e2c16f83a0a13863e85","README.md":"c5781e673335f37ed3d7acb119f8ed33efdf6eb75a7094b7da2abe0c3230adb8","build.rs":"950bcc47a196f07f99f59637c28cc65e02a885130011f90a2b2608248b4724a2","build/match_byte.rs":"89e8b941af74df2c204abf808672d3ff278bdec75abc918c41a843260b924677","docs/.nojekyll":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855","docs/404.html":"025861f76f8d1f6d67c20ab624c6e418f4f824385e2dd8ad8732c4ea563c6a2e","docs/index.html":"025861f76f8d1f6d67c20ab624c6e418f4f824385e2dd8ad8732c4ea563c6a2e","src/color.rs":"b847b80097015cb7d0f4be67c0d8b2f6b82006be865917ff14a96b484760d460","src/cow_rc_str.rs":"541216f8ef74ee3cc5cbbc1347e5f32ed66588c401851c9a7d68b867aede1de0","src/from_bytes.rs":"331fe63af2123ae3675b61928a69461b5ac77799fff3ce9978c55cf2c558f4ff","src/lib.rs":"a3994f121fbff3dd9cf5b72e2f31d34fa14a26e451278faeff423697943fe5ed","src/macros.rs":"adb9773c157890381556ea83d7942dcc676f99eea71abbb6afeffee1e3f28960","src/nth.rs":"246fa83a3ab97a7bb617c97a976af77136652ce77ba8ccca22e144b213b61310","src/parser.rs":"e5cbc7df1f7d2e57b909ab9ebe5916096eb7f01a67a32a3155f92193d1c73fab","src/rules_and_declarations.rs":"f2cde5c4518a2349d24049f6195e31783a8af2815d84394d21f90c763fc257a1","src/serializer.rs":"c872921703dc029155a8019b51df0d23066b072c7e1f553422e448e66218fbdc","src/size_of_tests.rs":"544193a839daf4f9eb615a3657e0b95ee35c482e8de717f4899ad323b121240e","src/tests.rs":"0d07575505e3d125932ce4ff79f7864fd2ef7c81714e71c6f30a46c55adbc6dd","src/tokenizer.rs":"3855802ca8a2236c463c76208a115ddb8dbf2087de2f7a711ef2aef81d83c508","src/unicode_range.rs":"fbbd0f4b393944699730a6b0f945b2b2376fcea61fce2ea37190fb287793021a"},"package":"f3a5464ebae36626f28254b60d1abbba951417383192bcea65578b40fbec1a47"}

0
third_party/rust/cssparser-0.19.0/.cargo-ok поставляемый Normal file
Просмотреть файл

17
third_party/rust/cssparser-0.19.0/.travis.yml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,17 @@
language: rust
rust:
- nightly
- beta
- stable
script:
- cargo build --verbose
- cargo test --verbose
- cargo doc --verbose
- cargo test --features heapsize
- cargo test --features dummy_match_byte
- if [ "$TRAVIS_RUST_VERSION" == "nightly" ]; then cargo test --features bench; fi
- if [ "$TRAVIS_RUST_VERSION" == "nightly" ]; then cargo test --features "bench dummy_match_byte"; fi
notifications:
webhooks: http://build.servo.org:54856/travis

60
third_party/rust/cssparser-0.19.0/Cargo.toml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,60 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g. crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
name = "cssparser"
version = "0.19.0"
authors = ["Simon Sapin <simon.sapin@exyr.org>"]
build = "build.rs"
exclude = ["src/css-parsing-tests/**", "src/big-data-url.css"]
description = "Rust implementation of CSS Syntax Level 3"
documentation = "https://docs.rs/cssparser/"
readme = "README.md"
keywords = ["css", "syntax", "parser"]
license = "MPL-2.0"
repository = "https://github.com/servo/rust-cssparser"
[dependencies.heapsize]
version = ">= 0.3, < 0.5"
optional = true
[dependencies.procedural-masquerade]
version = "0.1"
[dependencies.cssparser-macros]
version = "0.3"
[dependencies.phf]
version = "0.7"
[dependencies.serde]
version = "1.0"
optional = true
[dependencies.matches]
version = "0.1"
[dev-dependencies.difference]
version = "1.0"
[dev-dependencies.encoding_rs]
version = "0.5"
[dev-dependencies.rustc-serialize]
version = "0.3"
[build-dependencies.syn]
version = "0.11"
[build-dependencies.quote]
version = "0.3"
[features]
bench = []
dummy_match_byte = []

373
third_party/rust/cssparser-0.19.0/LICENSE поставляемый Normal file
Просмотреть файл

@ -0,0 +1,373 @@
Mozilla Public License Version 2.0
==================================
1. Definitions
--------------
1.1. "Contributor"
means each individual or legal entity that creates, contributes to
the creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used
by a Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached
the notice in Exhibit A, the Executable Form of such Source Code
Form, and Modifications of such Source Code Form, in each case
including portions thereof.
1.5. "Incompatible With Secondary Licenses"
means
(a) that the initial Contributor has attached the notice described
in Exhibit B to the Covered Software; or
(b) that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the
terms of a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in
a separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible,
whether at the time of the initial grant or subsequently, any and
all of the rights conveyed by this License.
1.10. "Modifications"
means any of the following:
(a) any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered
Software; or
(b) any new file in Source Code Form that contains any Covered
Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the
License, by the making, using, selling, offering for sale, having
made, import, or transfer of either its Contributions or its
Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU
Lesser General Public License, Version 2.1, the GNU Affero General
Public License, Version 3.0, or any later versions of those
licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that
controls, is controlled by, or is under common control with You. For
purposes of this definition, "control" means (a) the power, direct
or indirect, to cause the direction or management of such entity,
whether by contract or otherwise, or (b) ownership of more than
fifty percent (50%) of the outstanding shares or beneficial
ownership of such entity.
2. License Grants and Conditions
--------------------------------
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
(a) under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
(b) under Patent Claims of such Contributor to make, use, sell, offer
for sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
(a) for any code that a Contributor has removed from Covered Software;
or
(b) for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
(c) under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights
to grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
in Section 2.1.
3. Responsibilities
-------------------
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
(a) such Covered Software must also be made available in Source Code
Form, as described in Section 3.1, and You must inform recipients of
the Executable Form how they can obtain a copy of such Source Code
Form by reasonable means in a timely manner, at a charge no more
than the cost of distribution to the recipient; and
(b) You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter
the recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty,
or limitations of liability) contained within the Source Code Form of
the Covered Software, except that You may alter any license notices to
the extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
---------------------------------------------------
If it is impossible for You to comply with any of the terms of this
License with respect to some or all of the Covered Software due to
statute, judicial order, or regulation then You must: (a) comply with
the terms of this License to the maximum extent possible; and (b)
describe the limitations and the code they affect. Such description must
be placed in a text file included with all distributions of the Covered
Software under this License. Except to the extent prohibited by statute
or regulation, such description must be sufficiently detailed for a
recipient of ordinary skill to be able to understand it.
5. Termination
--------------
5.1. The rights granted under this License will terminate automatically
if You fail to comply with any of its terms. However, if You become
compliant, then the rights granted under this License from a particular
Contributor are reinstated (a) provisionally, unless and until such
Contributor explicitly and finally terminates Your grants, and (b) on an
ongoing basis, if such Contributor fails to notify You of the
non-compliance by some reasonable means prior to 60 days after You have
come back into compliance. Moreover, Your grants from a particular
Contributor are reinstated on an ongoing basis if such Contributor
notifies You of the non-compliance by some reasonable means, this is the
first time You have received notice of non-compliance with this License
from such Contributor, and You become compliant prior to 30 days after
Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
end user license agreements (excluding distributors and resellers) which
have been validly granted by You or Your distributors under this License
prior to termination shall survive termination.
************************************************************************
* *
* 6. Disclaimer of Warranty *
* ------------------------- *
* *
* Covered Software is provided under this License on an "as is" *
* basis, without warranty of any kind, either expressed, implied, or *
* statutory, including, without limitation, warranties that the *
* Covered Software is free of defects, merchantable, fit for a *
* particular purpose or non-infringing. The entire risk as to the *
* quality and performance of the Covered Software is with You. *
* Should any Covered Software prove defective in any respect, You *
* (not any Contributor) assume the cost of any necessary servicing, *
* repair, or correction. This disclaimer of warranty constitutes an *
* essential part of this License. No use of any Covered Software is *
* authorized under this License except under this disclaimer. *
* *
************************************************************************
************************************************************************
* *
* 7. Limitation of Liability *
* -------------------------- *
* *
* Under no circumstances and under no legal theory, whether tort *
* (including negligence), contract, or otherwise, shall any *
* Contributor, or anyone who distributes Covered Software as *
* permitted above, be liable to You for any direct, indirect, *
* special, incidental, or consequential damages of any character *
* including, without limitation, damages for lost profits, loss of *
* goodwill, work stoppage, computer failure or malfunction, or any *
* and all other commercial damages or losses, even if such party *
* shall have been informed of the possibility of such damages. This *
* limitation of liability shall not apply to liability for death or *
* personal injury resulting from such party's negligence to the *
* extent applicable law prohibits such limitation. Some *
* jurisdictions do not allow the exclusion or limitation of *
* incidental or consequential damages, so this exclusion and *
* limitation may not apply to You. *
* *
************************************************************************
8. Litigation
-------------
Any litigation relating to this License may be brought only in the
courts of a jurisdiction where the defendant maintains its principal
place of business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions.
Nothing in this Section shall prevent a party's ability to bring
cross-claims or counter-claims.
9. Miscellaneous
----------------
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides
that the language of a contract shall be construed against the drafter
shall not be used to construe this License against a Contributor.
10. Versions of the License
---------------------------
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
-------------------------------------------
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular
file, then You may include the notice in a location (such as a LICENSE
file in a relevant directory) where a recipient would be likely to look
for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
---------------------------------------------------------
This Source Code Form is "Incompatible With Secondary Licenses", as
defined by the Mozilla Public License, v. 2.0.

57
third_party/rust/cssparser-0.19.0/README.md поставляемый Normal file
Просмотреть файл

@ -0,0 +1,57 @@
rust-cssparser
==============
[![Build Status](https://travis-ci.org/servo/rust-cssparser.svg?branch=travis)](https://travis-ci.org/servo/rust-cssparser)
[Documentation](https://docs.rs/cssparser/)
Rust implementation of
[CSS Syntax Module Level 3](https://drafts.csswg.org/css-syntax/)
Overview
--------
Parsing CSS involves a series of steps:
* When parsing from bytes,
(e.g. reading a file or fetching an URL from the network,)
detect the character encoding
(based on a `Content-Type` HTTP header, an `@charset` rule, a BOM, etc.)
and decode to Unicode text.
rust-cssparser does not do this yet and just assumes UTF-8.
This step is skipped when parsing from Unicode, e.g. in an HTML `<style>` element.
* Tokenization, a.k.a. lexing.
The input, a stream of Unicode text, is transformed into a stream of *tokens*.
Tokenization never fails, although the output may contain *error tokens*.
* This flat stream of tokens is then transformed into a tree of *component values*,
which are either *preserved tokens*,
or blocks/functions (`{ … }`, `[ … ]`, `( … )`, `foo( … )`)
that contain more component values.
rust-cssparser does this at the same time as tokenization:
raw tokens are never materialized, you only get component values.
* Component values can then be parsed into generic rules or declarations.
The header and body of rules as well as the value of declarations
are still just lists of component values at this point.
See [the `ast` module](src/ast.rs) for the data structures.
* The last step of a full CSS parser is
parsing the remaining component values
into [Selectors](https://drafts.csswg.org/selectors/),
specific CSS properties, etc.
By design, rust-cssparser does not do this last step
which depends a lot on what you want to do:
which properties you want to support, what you want to do with selectors, etc.
It does however provide some helper functions to parse [CSS colors](src/color.rs)
and [An+B](src/nth.rs) (the argument to `:nth-child()` and related selectors.
See [Servos `style` crate](https://github.com/mozilla/servo/tree/master/components/style)
for an example of a parser based on rust-cssparser.

40
third_party/rust/cssparser-0.19.0/build.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,40 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
extern crate quote;
extern crate syn;
use std::env;
use std::path::Path;
#[cfg(feature = "dummy_match_byte")]
mod codegen {
use std::path::Path;
pub fn main(_: &Path) {}
}
#[cfg(not(feature = "dummy_match_byte"))]
#[path = "build/match_byte.rs"]
mod match_byte;
#[cfg(not(feature = "dummy_match_byte"))]
mod codegen {
use match_byte;
use std::env;
use std::path::Path;
pub fn main(tokenizer_rs: &Path) {
match_byte::expand(tokenizer_rs,
&Path::new(&env::var("OUT_DIR").unwrap()).join("tokenizer.rs"));
}
}
fn main() {
let manifest_dir = env::var("CARGO_MANIFEST_DIR").unwrap();
let tokenizer_rs = Path::new(&manifest_dir).join("src/tokenizer.rs");
codegen::main(&tokenizer_rs);
println!("cargo:rerun-if-changed={}", tokenizer_rs.display());
}

271
third_party/rust/cssparser-0.19.0/build/match_byte.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,271 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use quote::{ToTokens, Tokens};
use std::fs::File;
use std::io::{Read, Write};
use std::path::Path;
use std::vec;
use std::iter;
use syn;
pub fn expand(from: &Path, to: &Path) {
let mut source = String::new();
File::open(from).unwrap().read_to_string(&mut source).unwrap();
let tts = syn::parse_token_trees(&source).expect("Parsing rules.rs module");
let mut tokens = Tokens::new();
tokens.append_all(expand_tts(tts));
let code = tokens.to_string().replace("{ ", "{\n").replace(" }", "\n}");
File::create(to).unwrap().write_all(code.as_bytes()).unwrap();
}
fn expand_tts(tts: Vec<syn::TokenTree>) -> Vec<syn::TokenTree> {
use syn::*;
let mut expanded = Vec::new();
let mut tts = tts.into_iter();
while let Some(tt) = tts.next() {
match tt {
TokenTree::Token(Token::Ident(ident)) => {
if ident != "match_byte" {
expanded.push(TokenTree::Token(Token::Ident(ident)));
continue;
}
match tts.next() {
Some(TokenTree::Token(Token::Not)) => {},
other => {
expanded.push(TokenTree::Token(Token::Ident(ident)));
if let Some(other) = other {
expanded.push(other);
}
continue;
}
}
let tts = match tts.next() {
Some(TokenTree::Delimited(Delimited { tts, .. })) => tts,
other => {
expanded.push(TokenTree::Token(Token::Ident(ident)));
expanded.push(TokenTree::Token(Token::Not));
if let Some(other) = other {
expanded.push(other);
}
continue;
}
};
let (to_be_matched, table, cases, wildcard_binding) = parse_match_bytes_macro(tts);
let expr = expand_match_bytes_macro(to_be_matched,
&table,
cases,
wildcard_binding);
let tts = syn::parse_token_trees(&expr)
.expect("parsing macro expansion as token trees");
expanded.extend(expand_tts(tts));
}
TokenTree::Delimited(Delimited { delim, tts }) => {
expanded.push(TokenTree::Delimited(Delimited {
delim: delim,
tts: expand_tts(tts),
}))
}
other => expanded.push(other),
}
}
expanded
}
/// Parses a token tree corresponding to the `match_byte` macro.
///
/// ## Example
///
/// ```rust
/// match_byte! { tokenizer.next_byte_unchecked(),
/// b'a'..b'z' => { ... }
/// b'0'..b'9' => { ... }
/// b'\n' | b'\\' => { ... }
/// foo => { ... }
/// }
///
/// Returns:
/// * The token tree that contains the expression to be matched (in this case
/// `tokenizer.next_byte_unchecked()`.
///
/// * The table with the different cases per byte, each entry in the table
/// contains a non-zero integer representing a different arm of the
/// match expression.
///
/// * The list of cases containing the expansion of the arms of the match
/// expression.
///
/// * An optional identifier to which the wildcard pattern is matched (`foo` in
/// this case).
///
fn parse_match_bytes_macro(tts: Vec<syn::TokenTree>) -> (Vec<syn::TokenTree>, [u8; 256], Vec<Case>, Option<syn::Ident>) {
let mut tts = tts.into_iter();
// Grab the thing we're matching, until we find a comma.
let mut left_hand_side = vec![];
loop {
match tts.next() {
Some(syn::TokenTree::Token(syn::Token::Comma)) => break,
Some(other) => left_hand_side.push(other),
None => panic!("Expected not to run out of tokens looking for a comma"),
}
}
let mut cases = vec![];
let mut table = [0u8; 256];
let mut tts = tts.peekable();
let mut case_id: u8 = 1;
let mut binding = None;
while tts.len() > 0 {
cases.push(parse_case(&mut tts, &mut table, &mut binding, case_id));
// Allow an optional comma between cases.
match tts.peek() {
Some(&syn::TokenTree::Token(syn::Token::Comma)) => {
tts.next();
},
_ => {},
}
case_id += 1;
}
(left_hand_side, table, cases, binding)
}
#[derive(Debug)]
struct Case(Vec<syn::TokenTree>);
/// Parses a single pattern => expression, and returns the case, filling in the
/// table with the case id for every byte that matched.
///
/// The `binding` parameter is the identifier that is used by the wildcard
/// pattern.
fn parse_case(tts: &mut iter::Peekable<vec::IntoIter<syn::TokenTree>>,
table: &mut [u8; 256],
binding: &mut Option<syn::Ident>,
case_id: u8)
-> Case {
// The last byte checked, as part of this pattern, to properly detect
// ranges.
let mut last_byte: Option<u8> = None;
// Loop through the pattern filling with bytes the table.
loop {
match tts.next() {
Some(syn::TokenTree::Token(syn::Token::Literal(syn::Lit::Byte(byte)))) => {
table[byte as usize] = case_id;
last_byte = Some(byte);
}
Some(syn::TokenTree::Token(syn::Token::BinOp(syn::BinOpToken::Or))) => {
last_byte = None; // This pattern is over.
},
Some(syn::TokenTree::Token(syn::Token::DotDotDot)) => {
assert!(last_byte.is_some(), "Expected closed range!");
match tts.next() {
Some(syn::TokenTree::Token(syn::Token::Literal(syn::Lit::Byte(byte)))) => {
for b in last_byte.take().unwrap()..byte {
if table[b as usize] == 0 {
table[b as usize] = case_id;
}
}
if table[byte as usize] == 0 {
table[byte as usize] = case_id;
}
}
other => panic!("Expected closed range, got: {:?}", other),
}
},
Some(syn::TokenTree::Token(syn::Token::FatArrow)) => break,
Some(syn::TokenTree::Token(syn::Token::Ident(ident))) => {
assert_eq!(last_byte, None, "I don't support ranges with identifiers!");
assert_eq!(*binding, None);
for mut byte in table.iter_mut() {
if *byte == 0 {
*byte = case_id;
}
}
*binding = Some(ident)
}
Some(syn::TokenTree::Token(syn::Token::Underscore)) => {
assert_eq!(last_byte, None);
for mut byte in table.iter_mut() {
if *byte == 0 {
*byte = case_id;
}
}
},
other => panic!("Expected literal byte, got: {:?}", other),
}
}
match tts.next() {
Some(syn::TokenTree::Delimited(syn::Delimited { delim: syn::DelimToken::Brace, tts })) => {
Case(tts)
}
other => panic!("Expected case with braces after fat arrow, got: {:?}", other),
}
}
fn expand_match_bytes_macro(to_be_matched: Vec<syn::TokenTree>,
table: &[u8; 256],
cases: Vec<Case>,
binding: Option<syn::Ident>)
-> String {
use std::fmt::Write;
assert!(!to_be_matched.is_empty());
assert!(table.iter().all(|b| *b != 0), "Incomplete pattern? Bogus code!");
// We build the expression with text since it's easier.
let mut expr = "{\n".to_owned();
expr.push_str("enum Case {\n");
for (i, _) in cases.iter().enumerate() {
write!(&mut expr, "Case{} = {},", i + 1, i + 1).unwrap();
}
expr.push_str("}\n"); // enum Case
expr.push_str("static __CASES: [Case; 256] = [");
for byte in table.iter() {
write!(&mut expr, "Case::Case{}, ", *byte).unwrap();
}
expr.push_str("];\n");
let mut tokens = Tokens::new();
let to_be_matched = syn::Delimited {
delim: if binding.is_some() { syn::DelimToken::Brace } else { syn::DelimToken::Paren },
tts: to_be_matched
};
to_be_matched.to_tokens(&mut tokens);
if let Some(ref binding) = binding {
write!(&mut expr, "let {} = {};\n", binding.to_string(), tokens.as_str()).unwrap();
}
write!(&mut expr, "match __CASES[{} as usize] {{", match binding {
Some(binding) => binding.to_string(),
None => tokens.to_string(),
}).unwrap();
for (i, case) in cases.into_iter().enumerate() {
let mut case_tokens = Tokens::new();
let case = syn::Delimited {
delim: syn::DelimToken::Brace,
tts: case.0
};
case.to_tokens(&mut case_tokens);
write!(&mut expr, "Case::Case{} => {},\n", i + 1, case_tokens.as_str()).unwrap();
}
expr.push_str("}\n"); // match
expr.push_str("}\n"); // top
expr
}

0
third_party/rust/cssparser-0.19.0/docs/.nojekyll поставляемый Normal file
Просмотреть файл

3
third_party/rust/cssparser-0.19.0/docs/404.html поставляемый Normal file
Просмотреть файл

@ -0,0 +1,3 @@
<meta http-equiv="refresh" content="0; url=https://docs.rs/cssparser/">
<link rel="canonical" href="https://docs.rs/cssparser/">
<a href="https://docs.rs/cssparser/">Moved to docs.rs</a>

3
third_party/rust/cssparser-0.19.0/docs/index.html поставляемый Normal file
Просмотреть файл

@ -0,0 +1,3 @@
<meta http-equiv="refresh" content="0; url=https://docs.rs/cssparser/">
<link rel="canonical" href="https://docs.rs/cssparser/">
<a href="https://docs.rs/cssparser/">Moved to docs.rs</a>

553
third_party/rust/cssparser-0.19.0/src/color.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,553 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::fmt;
use std::f32::consts::PI;
use super::{Token, Parser, ToCss, ParseError, BasicParseError};
#[cfg(feature = "serde")]
use serde::{Deserialize, Deserializer, Serialize, Serializer};
/// A color with red, green, blue, and alpha components, in a byte each.
#[derive(Clone, Copy, PartialEq, Debug)]
pub struct RGBA {
/// The red component.
pub red: u8,
/// The green component.
pub green: u8,
/// The blue component.
pub blue: u8,
/// The alpha component.
pub alpha: u8,
}
impl RGBA {
/// Constructs a new RGBA value from float components. It expects the red,
/// green, blue and alpha channels in that order, and all values will be
/// clamped to the 0.0 ... 1.0 range.
#[inline]
pub fn from_floats(red: f32, green: f32, blue: f32, alpha: f32) -> Self {
Self::new(
clamp_unit_f32(red),
clamp_unit_f32(green),
clamp_unit_f32(blue),
clamp_unit_f32(alpha),
)
}
/// Returns a transparent color.
#[inline]
pub fn transparent() -> Self {
Self::new(0, 0, 0, 0)
}
/// Same thing, but with `u8` values instead of floats in the 0 to 1 range.
#[inline]
pub fn new(red: u8, green: u8, blue: u8, alpha: u8) -> Self {
RGBA { red: red, green: green, blue: blue, alpha: alpha }
}
/// Returns the red channel in a floating point number form, from 0 to 1.
#[inline]
pub fn red_f32(&self) -> f32 {
self.red as f32 / 255.0
}
/// Returns the green channel in a floating point number form, from 0 to 1.
#[inline]
pub fn green_f32(&self) -> f32 {
self.green as f32 / 255.0
}
/// Returns the blue channel in a floating point number form, from 0 to 1.
#[inline]
pub fn blue_f32(&self) -> f32 {
self.blue as f32 / 255.0
}
/// Returns the alpha channel in a floating point number form, from 0 to 1.
#[inline]
pub fn alpha_f32(&self) -> f32 {
self.alpha as f32 / 255.0
}
}
#[cfg(feature = "serde")]
impl Serialize for RGBA {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer
{
(self.red, self.green, self.blue, self.alpha).serialize(serializer)
}
}
#[cfg(feature = "serde")]
impl<'de> Deserialize<'de> for RGBA {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: Deserializer<'de>
{
let (r, g, b, a) = Deserialize::deserialize(deserializer)?;
Ok(RGBA::new(r, g, b, a))
}
}
#[cfg(feature = "heapsize")]
known_heap_size!(0, RGBA);
impl ToCss for RGBA {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result
where W: fmt::Write,
{
// Try first with two decimal places, then with three.
let mut rounded_alpha = (self.alpha_f32() * 100.).round() / 100.;
if clamp_unit_f32(rounded_alpha) != self.alpha {
rounded_alpha = (self.alpha_f32() * 1000.).round() / 1000.;
}
if self.alpha == 255 {
write!(dest, "rgb({}, {}, {})", self.red, self.green, self.blue)
} else {
write!(dest, "rgba({}, {}, {}, {})",
self.red, self.green, self.blue, rounded_alpha)
}
}
}
/// A <color> value.
#[derive(Clone, Copy, PartialEq, Debug)]
pub enum Color {
/// The 'currentcolor' keyword
CurrentColor,
/// Everything else gets converted to RGBA during parsing
RGBA(RGBA),
}
#[cfg(feature = "heapsize")]
known_heap_size!(0, Color);
impl ToCss for Color {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
match *self {
Color::CurrentColor => dest.write_str("currentcolor"),
Color::RGBA(ref rgba) => rgba.to_css(dest),
}
}
}
impl Color {
/// Parse a <color> value, per CSS Color Module Level 3.
///
/// FIXME(#2) Deprecated CSS2 System Colors are not supported yet.
pub fn parse<'i, 't>(input: &mut Parser<'i, 't>) -> Result<Color, BasicParseError<'i>> {
// FIXME: remove clone() when lifetimes are non-lexical
let token = input.next()?.clone();
match token {
Token::Hash(ref value) | Token::IDHash(ref value) => {
Color::parse_hash(value.as_bytes())
},
Token::Ident(ref value) => parse_color_keyword(&*value),
Token::Function(ref name) => {
return input.parse_nested_block(|arguments| {
parse_color_function(&*name, arguments)
.map_err(|e| ParseError::Basic(e))
}).map_err(ParseError::<()>::basic);
}
_ => Err(())
}.map_err(|()| BasicParseError::UnexpectedToken(token))
}
/// Parse a color hash, without the leading '#' character.
#[inline]
pub fn parse_hash(value: &[u8]) -> Result<Self, ()> {
match value.len() {
8 => Ok(rgba(
from_hex(value[0])? * 16 + from_hex(value[1])?,
from_hex(value[2])? * 16 + from_hex(value[3])?,
from_hex(value[4])? * 16 + from_hex(value[5])?,
from_hex(value[6])? * 16 + from_hex(value[7])?),
),
6 => Ok(rgb(
from_hex(value[0])? * 16 + from_hex(value[1])?,
from_hex(value[2])? * 16 + from_hex(value[3])?,
from_hex(value[4])? * 16 + from_hex(value[5])?),
),
4 => Ok(rgba(
from_hex(value[0])? * 17,
from_hex(value[1])? * 17,
from_hex(value[2])? * 17,
from_hex(value[3])? * 17),
),
3 => Ok(rgb(
from_hex(value[0])? * 17,
from_hex(value[1])? * 17,
from_hex(value[2])? * 17),
),
_ => Err(())
}
}
}
#[inline]
fn rgb(red: u8, green: u8, blue: u8) -> Color {
rgba(red, green, blue, 255)
}
#[inline]
fn rgba(red: u8, green: u8, blue: u8, alpha: u8) -> Color {
Color::RGBA(RGBA::new(red, green, blue, alpha))
}
/// Return the named color with the given name.
///
/// Matching is case-insensitive in the ASCII range.
/// CSS escaping (if relevant) should be resolved before calling this function.
/// (For example, the value of an `Ident` token is fine.)
#[inline]
pub fn parse_color_keyword(ident: &str) -> Result<Color, ()> {
macro_rules! rgb {
($red: expr, $green: expr, $blue: expr) => {
Color::RGBA(RGBA {
red: $red,
green: $green,
blue: $blue,
alpha: 255,
})
}
}
ascii_case_insensitive_phf_map! {
keyword -> Color = {
"black" => rgb!(0, 0, 0),
"silver" => rgb!(192, 192, 192),
"gray" => rgb!(128, 128, 128),
"white" => rgb!(255, 255, 255),
"maroon" => rgb!(128, 0, 0),
"red" => rgb!(255, 0, 0),
"purple" => rgb!(128, 0, 128),
"fuchsia" => rgb!(255, 0, 255),
"green" => rgb!(0, 128, 0),
"lime" => rgb!(0, 255, 0),
"olive" => rgb!(128, 128, 0),
"yellow" => rgb!(255, 255, 0),
"navy" => rgb!(0, 0, 128),
"blue" => rgb!(0, 0, 255),
"teal" => rgb!(0, 128, 128),
"aqua" => rgb!(0, 255, 255),
"aliceblue" => rgb!(240, 248, 255),
"antiquewhite" => rgb!(250, 235, 215),
"aquamarine" => rgb!(127, 255, 212),
"azure" => rgb!(240, 255, 255),
"beige" => rgb!(245, 245, 220),
"bisque" => rgb!(255, 228, 196),
"blanchedalmond" => rgb!(255, 235, 205),
"blueviolet" => rgb!(138, 43, 226),
"brown" => rgb!(165, 42, 42),
"burlywood" => rgb!(222, 184, 135),
"cadetblue" => rgb!(95, 158, 160),
"chartreuse" => rgb!(127, 255, 0),
"chocolate" => rgb!(210, 105, 30),
"coral" => rgb!(255, 127, 80),
"cornflowerblue" => rgb!(100, 149, 237),
"cornsilk" => rgb!(255, 248, 220),
"crimson" => rgb!(220, 20, 60),
"cyan" => rgb!(0, 255, 255),
"darkblue" => rgb!(0, 0, 139),
"darkcyan" => rgb!(0, 139, 139),
"darkgoldenrod" => rgb!(184, 134, 11),
"darkgray" => rgb!(169, 169, 169),
"darkgreen" => rgb!(0, 100, 0),
"darkgrey" => rgb!(169, 169, 169),
"darkkhaki" => rgb!(189, 183, 107),
"darkmagenta" => rgb!(139, 0, 139),
"darkolivegreen" => rgb!(85, 107, 47),
"darkorange" => rgb!(255, 140, 0),
"darkorchid" => rgb!(153, 50, 204),
"darkred" => rgb!(139, 0, 0),
"darksalmon" => rgb!(233, 150, 122),
"darkseagreen" => rgb!(143, 188, 143),
"darkslateblue" => rgb!(72, 61, 139),
"darkslategray" => rgb!(47, 79, 79),
"darkslategrey" => rgb!(47, 79, 79),
"darkturquoise" => rgb!(0, 206, 209),
"darkviolet" => rgb!(148, 0, 211),
"deeppink" => rgb!(255, 20, 147),
"deepskyblue" => rgb!(0, 191, 255),
"dimgray" => rgb!(105, 105, 105),
"dimgrey" => rgb!(105, 105, 105),
"dodgerblue" => rgb!(30, 144, 255),
"firebrick" => rgb!(178, 34, 34),
"floralwhite" => rgb!(255, 250, 240),
"forestgreen" => rgb!(34, 139, 34),
"gainsboro" => rgb!(220, 220, 220),
"ghostwhite" => rgb!(248, 248, 255),
"gold" => rgb!(255, 215, 0),
"goldenrod" => rgb!(218, 165, 32),
"greenyellow" => rgb!(173, 255, 47),
"grey" => rgb!(128, 128, 128),
"honeydew" => rgb!(240, 255, 240),
"hotpink" => rgb!(255, 105, 180),
"indianred" => rgb!(205, 92, 92),
"indigo" => rgb!(75, 0, 130),
"ivory" => rgb!(255, 255, 240),
"khaki" => rgb!(240, 230, 140),
"lavender" => rgb!(230, 230, 250),
"lavenderblush" => rgb!(255, 240, 245),
"lawngreen" => rgb!(124, 252, 0),
"lemonchiffon" => rgb!(255, 250, 205),
"lightblue" => rgb!(173, 216, 230),
"lightcoral" => rgb!(240, 128, 128),
"lightcyan" => rgb!(224, 255, 255),
"lightgoldenrodyellow" => rgb!(250, 250, 210),
"lightgray" => rgb!(211, 211, 211),
"lightgreen" => rgb!(144, 238, 144),
"lightgrey" => rgb!(211, 211, 211),
"lightpink" => rgb!(255, 182, 193),
"lightsalmon" => rgb!(255, 160, 122),
"lightseagreen" => rgb!(32, 178, 170),
"lightskyblue" => rgb!(135, 206, 250),
"lightslategray" => rgb!(119, 136, 153),
"lightslategrey" => rgb!(119, 136, 153),
"lightsteelblue" => rgb!(176, 196, 222),
"lightyellow" => rgb!(255, 255, 224),
"limegreen" => rgb!(50, 205, 50),
"linen" => rgb!(250, 240, 230),
"magenta" => rgb!(255, 0, 255),
"mediumaquamarine" => rgb!(102, 205, 170),
"mediumblue" => rgb!(0, 0, 205),
"mediumorchid" => rgb!(186, 85, 211),
"mediumpurple" => rgb!(147, 112, 219),
"mediumseagreen" => rgb!(60, 179, 113),
"mediumslateblue" => rgb!(123, 104, 238),
"mediumspringgreen" => rgb!(0, 250, 154),
"mediumturquoise" => rgb!(72, 209, 204),
"mediumvioletred" => rgb!(199, 21, 133),
"midnightblue" => rgb!(25, 25, 112),
"mintcream" => rgb!(245, 255, 250),
"mistyrose" => rgb!(255, 228, 225),
"moccasin" => rgb!(255, 228, 181),
"navajowhite" => rgb!(255, 222, 173),
"oldlace" => rgb!(253, 245, 230),
"olivedrab" => rgb!(107, 142, 35),
"orange" => rgb!(255, 165, 0),
"orangered" => rgb!(255, 69, 0),
"orchid" => rgb!(218, 112, 214),
"palegoldenrod" => rgb!(238, 232, 170),
"palegreen" => rgb!(152, 251, 152),
"paleturquoise" => rgb!(175, 238, 238),
"palevioletred" => rgb!(219, 112, 147),
"papayawhip" => rgb!(255, 239, 213),
"peachpuff" => rgb!(255, 218, 185),
"peru" => rgb!(205, 133, 63),
"pink" => rgb!(255, 192, 203),
"plum" => rgb!(221, 160, 221),
"powderblue" => rgb!(176, 224, 230),
"rebeccapurple" => rgb!(102, 51, 153),
"rosybrown" => rgb!(188, 143, 143),
"royalblue" => rgb!(65, 105, 225),
"saddlebrown" => rgb!(139, 69, 19),
"salmon" => rgb!(250, 128, 114),
"sandybrown" => rgb!(244, 164, 96),
"seagreen" => rgb!(46, 139, 87),
"seashell" => rgb!(255, 245, 238),
"sienna" => rgb!(160, 82, 45),
"skyblue" => rgb!(135, 206, 235),
"slateblue" => rgb!(106, 90, 205),
"slategray" => rgb!(112, 128, 144),
"slategrey" => rgb!(112, 128, 144),
"snow" => rgb!(255, 250, 250),
"springgreen" => rgb!(0, 255, 127),
"steelblue" => rgb!(70, 130, 180),
"tan" => rgb!(210, 180, 140),
"thistle" => rgb!(216, 191, 216),
"tomato" => rgb!(255, 99, 71),
"turquoise" => rgb!(64, 224, 208),
"violet" => rgb!(238, 130, 238),
"wheat" => rgb!(245, 222, 179),
"whitesmoke" => rgb!(245, 245, 245),
"yellowgreen" => rgb!(154, 205, 50),
"transparent" => Color::RGBA(RGBA { red: 0, green: 0, blue: 0, alpha: 0 }),
"currentcolor" => Color::CurrentColor,
}
}
keyword(ident).cloned().ok_or(())
}
#[inline]
fn from_hex(c: u8) -> Result<u8, ()> {
match c {
b'0' ... b'9' => Ok(c - b'0'),
b'a' ... b'f' => Ok(c - b'a' + 10),
b'A' ... b'F' => Ok(c - b'A' + 10),
_ => Err(())
}
}
fn clamp_unit_f32(val: f32) -> u8 {
// Whilst scaling by 256 and flooring would provide
// an equal distribution of integers to percentage inputs,
// this is not what Gecko does so we instead multiply by 255
// and round (adding 0.5 and flooring is equivalent to rounding)
//
// Chrome does something similar for the alpha value, but not
// the rgb values.
//
// See https://bugzilla.mozilla.org/show_bug.cgi?id=1340484
//
// Clamping to 256 and rounding after would let 1.0 map to 256, and
// `256.0_f32 as u8` is undefined behavior:
//
// https://github.com/rust-lang/rust/issues/10184
clamp_floor_256_f32(val * 255.)
}
fn clamp_floor_256_f32(val: f32) -> u8 {
val.round().max(0.).min(255.) as u8
}
#[inline]
fn parse_color_function<'i, 't>(name: &str, arguments: &mut Parser<'i, 't>) -> Result<Color, BasicParseError<'i>> {
let (red, green, blue, uses_commas) = match_ignore_ascii_case! { name,
"rgb" | "rgba" => parse_rgb_components_rgb(arguments)?,
"hsl" | "hsla" => parse_rgb_components_hsl(arguments)?,
_ => return Err(BasicParseError::UnexpectedToken(Token::Ident(name.to_owned().into()))),
};
let alpha = if !arguments.is_exhausted() {
if uses_commas {
arguments.expect_comma()?;
} else {
arguments.expect_delim('/')?;
};
match *arguments.next()? {
Token::Number { value: v, .. } => {
clamp_unit_f32(v)
}
Token::Percentage { unit_value: v, .. } => {
clamp_unit_f32(v)
}
ref t => {
return Err(BasicParseError::UnexpectedToken(t.clone()))
}
}
} else {
255
};
arguments.expect_exhausted()?;
Ok(rgba(red, green, blue, alpha))
}
#[inline]
fn parse_rgb_components_rgb<'i, 't>(arguments: &mut Parser<'i, 't>) -> Result<(u8, u8, u8, bool), BasicParseError<'i>> {
let red: u8;
let green: u8;
let blue: u8;
let mut uses_commas = false;
// Either integers or percentages, but all the same type.
// https://drafts.csswg.org/css-color/#rgb-functions
match arguments.next()?.clone() {
Token::Number { value: v, .. } => {
red = clamp_floor_256_f32(v);
green = clamp_floor_256_f32(match arguments.next()?.clone() {
Token::Number { value: v, .. } => v,
Token::Comma => {
uses_commas = true;
arguments.expect_number()?
}
t => return Err(BasicParseError::UnexpectedToken(t))
});
if uses_commas {
arguments.expect_comma()?;
}
blue = clamp_floor_256_f32(arguments.expect_number()?);
}
Token::Percentage { unit_value, .. } => {
red = clamp_unit_f32(unit_value);
green = clamp_unit_f32(match arguments.next()?.clone() {
Token::Percentage { unit_value, .. } => unit_value,
Token::Comma => {
uses_commas = true;
arguments.expect_percentage()?
}
t => return Err(BasicParseError::UnexpectedToken(t))
});
if uses_commas {
arguments.expect_comma()?;
}
blue = clamp_unit_f32(arguments.expect_percentage()?);
}
t => return Err(BasicParseError::UnexpectedToken(t))
};
return Ok((red, green, blue, uses_commas));
}
#[inline]
fn parse_rgb_components_hsl<'i, 't>(arguments: &mut Parser<'i, 't>) -> Result<(u8, u8, u8, bool), BasicParseError<'i>> {
let mut uses_commas = false;
// Hue given as an angle
// https://drafts.csswg.org/css-values/#angles
let hue_degrees = match *arguments.next()? {
Token::Number { value: v, .. } => v,
Token::Dimension { value: v, ref unit, .. } => {
match_ignore_ascii_case! { &*unit,
"deg" => v,
"grad" => v * 360. / 400.,
"rad" => v * 360. / (2. * PI),
"turn" => v * 360.,
_ => return Err(BasicParseError::UnexpectedToken(Token::Ident(unit.clone()))),
}
}
ref t => return Err(BasicParseError::UnexpectedToken(t.clone()))
};
// Subtract an integer before rounding, to avoid some rounding errors:
let hue_normalized_degrees = hue_degrees - 360. * (hue_degrees / 360.).floor();
let hue = hue_normalized_degrees / 360.;
// Saturation and lightness are clamped to 0% ... 100%
// https://drafts.csswg.org/css-color/#the-hsl-notation
let saturation = match arguments.next()?.clone() {
Token::Percentage { unit_value, .. } => unit_value,
Token::Comma => {
uses_commas = true;
arguments.expect_percentage()?
}
t => return Err(BasicParseError::UnexpectedToken(t))
};
let saturation = saturation.max(0.).min(1.);
if uses_commas {
arguments.expect_comma()?;
}
let lightness = arguments.expect_percentage()?;
let lightness = lightness.max(0.).min(1.);
// https://drafts.csswg.org/css-color/#hsl-color
// except with h pre-multiplied by 3, to avoid some rounding errors.
fn hue_to_rgb(m1: f32, m2: f32, mut h3: f32) -> f32 {
if h3 < 0. { h3 += 3. }
if h3 > 3. { h3 -= 3. }
if h3 * 2. < 1. { m1 + (m2 - m1) * h3 * 2. }
else if h3 * 2. < 3. { m2 }
else if h3 < 2. { m1 + (m2 - m1) * (2. - h3) * 2. }
else { m1 }
}
let m2 = if lightness <= 0.5 { lightness * (saturation + 1.) }
else { lightness + saturation - lightness * saturation };
let m1 = lightness * 2. - m2;
let hue_times_3 = hue * 3.;
let red = clamp_unit_f32(hue_to_rgb(m1, m2, hue_times_3 + 1.));
let green = clamp_unit_f32(hue_to_rgb(m1, m2, hue_times_3));
let blue = clamp_unit_f32(hue_to_rgb(m1, m2, hue_times_3 - 1.));
return Ok((red, green, blue, uses_commas));
}

207
third_party/rust/cssparser-0.19.0/src/cow_rc_str.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,207 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::borrow::{Borrow, Cow};
use std::cmp;
use std::fmt;
use std::hash;
use std::marker::PhantomData;
use std::mem;
use std::ops::Deref;
use std::rc::Rc;
use std::slice;
use std::str;
use std::usize;
/// A string that is either shared (heap-allocated and reference-counted) or borrowed.
///
/// Equivalent to `enum { Borrowed(&'a str), Shared(Rc<String>) }`, but stored more compactly.
///
/// FIXME(https://github.com/rust-lang/rfcs/issues/1230): use an actual enum if/when
/// the compiler can do this layout optimization.
pub struct CowRcStr<'a> {
/// FIXME: https://github.com/rust-lang/rust/issues/27730 use NonZero or Shared.
/// In the meantime we abuse `&'static _` to get the effect of `NonZero<*const _>`.
/// `ptr` doesnt really have the 'static lifetime!
ptr: &'static (),
/// * If `borrowed_len_or_max == usize::MAX`, then `ptr` represents `NonZero<*const String>`
/// from `Rc::into_raw`.
/// The lifetime parameter `'a` is irrelevant in this case.
///
/// * Otherwise, `ptr` represents the `NonZero<*const u8>` data component of `&'a str`,
/// and `borrowed_len_or_max` its length.
borrowed_len_or_max: usize,
phantom: PhantomData<Result<&'a str, Rc<String>>>,
}
fn _static_assert_same_size<'a>() {
// "Instantiate" the generic function without calling it.
let _ = mem::transmute::<CowRcStr<'a>, Option<CowRcStr<'a>>>;
}
impl<'a> From<Cow<'a, str>> for CowRcStr<'a> {
#[inline]
fn from(s: Cow<'a, str>) -> Self {
match s {
Cow::Borrowed(s) => CowRcStr::from(s),
Cow::Owned(s) => CowRcStr::from(s),
}
}
}
impl<'a> From<&'a str> for CowRcStr<'a> {
#[inline]
fn from(s: &'a str) -> Self {
let len = s.len();
assert!(len < usize::MAX);
CowRcStr {
ptr: unsafe { &*(s.as_ptr() as *const ()) },
borrowed_len_or_max: len,
phantom: PhantomData,
}
}
}
impl<'a> From<String> for CowRcStr<'a> {
#[inline]
fn from(s: String) -> Self {
CowRcStr::from_rc(Rc::new(s))
}
}
impl<'a> CowRcStr<'a> {
#[inline]
fn from_rc(s: Rc<String>) -> Self {
let ptr = unsafe { &*(Rc::into_raw(s) as *const ()) };
CowRcStr {
ptr: ptr,
borrowed_len_or_max: usize::MAX,
phantom: PhantomData,
}
}
#[inline]
fn unpack(&self) -> Result<&'a str, *const String> {
if self.borrowed_len_or_max == usize::MAX {
Err(self.ptr as *const () as *const String)
} else {
unsafe {
Ok(str::from_utf8_unchecked(slice::from_raw_parts(
self.ptr as *const () as *const u8,
self.borrowed_len_or_max,
)))
}
}
}
}
impl<'a> Clone for CowRcStr<'a> {
#[inline]
fn clone(&self) -> Self {
match self.unpack() {
Err(ptr) => {
let rc = unsafe {
Rc::from_raw(ptr)
};
let new_rc = rc.clone();
mem::forget(rc); // Dont actually take ownership of this strong reference
CowRcStr::from_rc(new_rc)
}
Ok(_) => {
CowRcStr { ..*self }
}
}
}
}
impl<'a> Drop for CowRcStr<'a> {
#[inline]
fn drop(&mut self) {
if let Err(ptr) = self.unpack() {
mem::drop(unsafe {
Rc::from_raw(ptr)
})
}
}
}
impl<'a> Deref for CowRcStr<'a> {
type Target = str;
#[inline]
fn deref(&self) -> &str {
self.unpack().unwrap_or_else(|ptr| unsafe {
&**ptr
})
}
}
// Boilerplate / trivial impls below.
impl<'a> AsRef<str> for CowRcStr<'a> {
#[inline]
fn as_ref(&self) -> &str {
self
}
}
impl<'a> Borrow<str> for CowRcStr<'a> {
#[inline]
fn borrow(&self) -> &str {
self
}
}
impl<'a> Default for CowRcStr<'a> {
#[inline]
fn default() -> Self {
Self::from("")
}
}
impl<'a> hash::Hash for CowRcStr<'a> {
#[inline]
fn hash<H: hash::Hasher>(&self, hasher: &mut H) {
str::hash(self, hasher)
}
}
impl<'a, T: AsRef<str>> PartialEq<T> for CowRcStr<'a> {
#[inline]
fn eq(&self, other: &T) -> bool {
str::eq(self, other.as_ref())
}
}
impl<'a, T: AsRef<str>> PartialOrd<T> for CowRcStr<'a> {
#[inline]
fn partial_cmp(&self, other: &T) -> Option<cmp::Ordering> {
str::partial_cmp(self, other.as_ref())
}
}
impl<'a> Eq for CowRcStr<'a> {}
impl<'a> Ord for CowRcStr<'a> {
#[inline]
fn cmp(&self, other: &Self) -> cmp::Ordering {
str::cmp(self, other)
}
}
impl<'a> fmt::Display for CowRcStr<'a> {
#[inline]
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
str::fmt(self, formatter)
}
}
impl<'a> fmt::Debug for CowRcStr<'a> {
#[inline]
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
str::fmt(self, formatter)
}
}

64
third_party/rust/cssparser-0.19.0/src/from_bytes.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,64 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/// Abstraction for avoiding a dependency from cssparser to an encoding library
pub trait EncodingSupport {
/// One character encoding
type Encoding;
/// https://encoding.spec.whatwg.org/#concept-encoding-get
fn from_label(ascii_label: &[u8]) -> Option<Self::Encoding>;
/// Return the UTF-8 encoding
fn utf8() -> Self::Encoding;
/// Whether the given encoding is UTF-16BE or UTF-16LE
fn is_utf16_be_or_le(encoding: &Self::Encoding) -> bool;
}
/// Determine the character encoding of a CSS stylesheet.
///
/// This is based on the presence of a BOM (Byte Order Mark), an `@charset` rule, and
/// encoding meta-information.
///
/// * `css_bytes`: A byte string.
/// * `protocol_encoding`: The encoding label, if any, defined by HTTP or equivalent protocol.
/// (e.g. via the `charset` parameter of the `Content-Type` header.)
/// * `environment_encoding`: An optional `Encoding` object for the [environment encoding]
/// (https://drafts.csswg.org/css-syntax/#environment-encoding), if any.
///
/// Returns the encoding to use.
pub fn stylesheet_encoding<E>(css: &[u8], protocol_encoding_label: Option<&[u8]>,
environment_encoding: Option<E::Encoding>)
-> E::Encoding
where E: EncodingSupport {
// https://drafts.csswg.org/css-syntax/#the-input-byte-stream
match protocol_encoding_label {
None => (),
Some(label) => match E::from_label(label) {
None => (),
Some(protocol_encoding) => return protocol_encoding
}
}
let prefix = b"@charset \"";
if css.starts_with(prefix) {
let rest = &css[prefix.len()..];
match rest.iter().position(|&b| b == b'"') {
None => (),
Some(label_length) => if rest[label_length..].starts_with(b"\";") {
let label = &rest[..label_length];
match E::from_label(label) {
None => (),
Some(charset_encoding) => if E::is_utf16_be_or_le(&charset_encoding) {
return E::utf8()
} else {
return charset_encoding
}
}
}
}
}
environment_encoding.unwrap_or_else(E::utf8)
}

123
third_party/rust/cssparser-0.19.0/src/lib.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,123 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![crate_name = "cssparser"]
#![crate_type = "rlib"]
#![cfg_attr(feature = "bench", feature(test))]
#![deny(missing_docs)]
/*!
Implementation of [CSS Syntax Module Level 3](https://drafts.csswg.org/css-syntax/) for Rust.
# Input
Everything is based on `Parser` objects, which borrow a `&str` input.
If you have bytes (from a file, the network, or something)
and want to support character encodings other than UTF-8,
see the `stylesheet_encoding` function,
which can be used together with rust-encoding or encoding-rs.
# Conventions for parsing functions
* Take (at least) a `input: &mut cssparser::Parser` parameter
* Return `Result<_, ()>`
* When returning `Ok(_)`,
the function must have consumed exactly the amount of input that represents the parsed value.
* When returning `Err(())`, any amount of input may have been consumed.
As a consequence, when calling another parsing function, either:
* Any `Err(())` return value must be propagated.
This happens by definition for tail calls,
and can otherwise be done with the `try!` macro.
* Or the call must be wrapped in a `Parser::try` call.
`try` takes a closure that takes a `Parser` and returns a `Result`,
calls it once,
and returns itself that same result.
If the result is `Err`,
it restores the position inside the input to the one saved before calling the closure.
Examples:
```{rust,ignore}
// 'none' | <image>
fn parse_background_image(context: &ParserContext, input: &mut Parser)
-> Result<Option<Image>, ()> {
if input.try(|input| input.expect_ident_matching("none")).is_ok() {
Ok(None)
} else {
Image::parse(context, input).map(Some) // tail call
}
}
```
```{rust,ignore}
// [ <length> | <percentage> ] [ <length> | <percentage> ]?
fn parse_border_spacing(_context: &ParserContext, input: &mut Parser)
-> Result<(LengthOrPercentage, LengthOrPercentage), ()> {
let first = try!(LengthOrPercentage::parse);
let second = input.try(LengthOrPercentage::parse).unwrap_or(first);
(first, second)
}
```
*/
#![recursion_limit="200"] // For color::parse_color_keyword
#[macro_use] extern crate cssparser_macros;
#[macro_use] extern crate matches;
#[macro_use] extern crate procedural_masquerade;
#[doc(hidden)] pub extern crate phf as _internal__phf;
#[cfg(test)] extern crate encoding_rs;
#[cfg(test)] extern crate difference;
#[cfg(test)] extern crate rustc_serialize;
#[cfg(feature = "serde")] extern crate serde;
#[cfg(feature = "heapsize")] #[macro_use] extern crate heapsize;
pub use cssparser_macros::*;
pub use tokenizer::{Token, SourcePosition, SourceLocation};
pub use rules_and_declarations::{parse_important};
pub use rules_and_declarations::{DeclarationParser, DeclarationListParser, parse_one_declaration};
pub use rules_and_declarations::{RuleListParser, parse_one_rule, PreciseParseError};
pub use rules_and_declarations::{AtRuleType, QualifiedRuleParser, AtRuleParser};
pub use from_bytes::{stylesheet_encoding, EncodingSupport};
pub use color::{RGBA, Color, parse_color_keyword};
pub use nth::parse_nth;
pub use serializer::{ToCss, CssStringWriter, serialize_identifier, serialize_string, TokenSerializationType};
pub use parser::{Parser, Delimiter, Delimiters, ParserState, ParseError, BasicParseError, ParserInput};
pub use unicode_range::UnicodeRange;
pub use cow_rc_str::CowRcStr;
// For macros
#[doc(hidden)] pub use macros::_internal__to_lowercase;
// For macros when used in this crate. Unsure how $crate works with procedural-masquerade.
mod cssparser { pub use _internal__phf; }
#[macro_use]
mod macros;
mod rules_and_declarations;
#[cfg(feature = "dummy_match_byte")]
mod tokenizer;
#[cfg(not(feature = "dummy_match_byte"))]
mod tokenizer {
include!(concat!(env!("OUT_DIR"), "/tokenizer.rs"));
}
mod parser;
mod from_bytes;
mod color;
mod nth;
mod serializer;
mod unicode_range;
mod cow_rc_str;
#[cfg(test)] mod tests;
#[cfg(test)] mod size_of_tests;

163
third_party/rust/cssparser-0.19.0/src/macros.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,163 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/// See docs of the `procedural-masquerade` crate.
define_invoke_proc_macro!(cssparser_internal__invoke_proc_macro);
/// Expands to a `match` expression with string patterns,
/// matching case-insensitively in the ASCII range.
///
/// The patterns must not contain ASCII upper case letters. (They must be already be lower-cased.)
///
/// # Example
///
/// ```rust
/// #[macro_use] extern crate cssparser;
///
/// # fn main() {} // Make doctest not wrap everythig in its own main
/// # fn dummy(function_name: &String) { let _ =
/// match_ignore_ascii_case! { &function_name,
/// "rgb" => parse_rgb(..),
/// "rgba" => parse_rgba(..),
/// "hsl" => parse_hsl(..),
/// "hsla" => parse_hsla(..),
/// _ => Err(format!("unknown function: {}", function_name))
/// }
/// # ;}
/// # use std::ops::RangeFull;
/// # fn parse_rgb(_: RangeFull) -> Result<(), String> { Ok(()) }
/// # fn parse_rgba(_: RangeFull) -> Result<(), String> { Ok(()) }
/// # fn parse_hsl(_: RangeFull) -> Result<(), String> { Ok(()) }
/// # fn parse_hsla(_: RangeFull) -> Result<(), String> { Ok(()) }
/// ```
#[macro_export]
macro_rules! match_ignore_ascii_case {
( $input:expr, $( $match_body:tt )* ) => {
{
cssparser_internal__invoke_proc_macro! {
cssparser_internal__assert_ascii_lowercase__max_len!( $( $match_body )* )
}
{
// MAX_LENGTH is generated by cssparser_internal__assert_ascii_lowercase__max_len
cssparser_internal__to_lowercase!($input, MAX_LENGTH => lowercase);
// "A" is a short string that we know is different for every string pattern,
// since weve verified that none of them include ASCII upper case letters.
match lowercase.unwrap_or("A") {
$( $match_body )*
}
}
}
};
}
/// Define a function `$name(&str) -> Option<&'static $ValueType>`
///
/// The function finds a match for the input string
/// in a [`phf` map](https://github.com/sfackler/rust-phf)
/// and returns a reference to the corresponding value.
/// Matching is case-insensitive in the ASCII range.
///
/// ## Example:
///
/// ```rust
/// #[macro_use] extern crate cssparser;
///
/// # fn main() {} // Make doctest not wrap everything in its own main
///
/// fn color_rgb(input: &str) -> Option<(u8, u8, u8)> {
/// ascii_case_insensitive_phf_map! {
/// keyword -> (u8, u8, u8) = {
/// "red" => (255, 0, 0),
/// "green" => (0, 255, 0),
/// "blue" => (0, 0, 255),
/// }
/// }
/// keyword(input).cloned()
/// }
#[macro_export]
macro_rules! ascii_case_insensitive_phf_map {
($name: ident -> $ValueType: ty = { $( $key: expr => $value: expr ),* }) => {
ascii_case_insensitive_phf_map!($name -> $ValueType = { $( $key => $value, )* })
};
($name: ident -> $ValueType: ty = { $( $key: expr => $value: expr, )* }) => {
pub fn $name(input: &str) -> Option<&'static $ValueType> {
cssparser_internal__invoke_proc_macro! {
cssparser_internal__phf_map!( ($ValueType) $( $key ($value) )+ )
}
{
cssparser_internal__invoke_proc_macro! {
cssparser_internal__max_len!( $( $key )+ )
}
// MAX_LENGTH is generated by cssparser_internal__max_len
cssparser_internal__to_lowercase!(input, MAX_LENGTH => lowercase);
lowercase.and_then(|s| MAP.get(s))
}
}
}
}
/// Implementation detail of match_ignore_ascii_case! and ascii_case_insensitive_phf_map! macros.
///
/// **This macro is not part of the public API. It can change or be removed between any versions.**
///
/// Define a local variable named `$output`
/// and assign it the result of calling `_internal__to_lowercase`
/// with a stack-allocated buffer of length `$BUFFER_SIZE`.
#[macro_export]
#[doc(hidden)]
macro_rules! cssparser_internal__to_lowercase {
($input: expr, $BUFFER_SIZE: expr => $output: ident) => {
// mem::uninitialized() is ok because `buffer` is only used in `_internal__to_lowercase`,
// which initializes with `copy_from_slice` the part of the buffer it uses,
// before it uses it.
#[allow(unsafe_code)]
let mut buffer: [u8; $BUFFER_SIZE] = unsafe {
::std::mem::uninitialized()
};
let input: &str = $input;
let $output = $crate::_internal__to_lowercase(&mut buffer, input);
}
}
/// Implementation detail of match_ignore_ascii_case! and ascii_case_insensitive_phf_map! macros.
///
/// **This function is not part of the public API. It can change or be removed between any verisons.**
///
/// If `input` is larger than buffer, return `None`.
/// Otherwise, return `input` ASCII-lowercased, using `buffer` as temporary space if necessary.
#[doc(hidden)]
#[allow(non_snake_case)]
pub fn _internal__to_lowercase<'a>(buffer: &'a mut [u8], input: &'a str) -> Option<&'a str> {
if let Some(buffer) = buffer.get_mut(..input.len()) {
if let Some(first_uppercase) = input.bytes().position(|byte| matches!(byte, b'A'...b'Z')) {
buffer.copy_from_slice(input.as_bytes());
::std::ascii::AsciiExt::make_ascii_lowercase(&mut buffer[first_uppercase..]);
// `buffer` was initialized to a copy of `input` (which is &str so well-formed UTF-8)
// then lowercased (which preserves UTF-8 well-formedness)
unsafe {
Some(::std::str::from_utf8_unchecked(buffer))
}
} else {
// Input is already lower-case
Some(input)
}
} else {
// Input is longer than buffer, which has the length of the longest expected string:
// none of the expected strings would match.
None
}
}
#[cfg(feature = "dummy_match_byte")]
macro_rules! match_byte {
($value:expr, $($rest:tt)* ) => {
match $value {
$(
$rest
)+
}
};
}

100
third_party/rust/cssparser-0.19.0/src/nth.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,100 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::ascii::AsciiExt;
use super::{Token, Parser, BasicParseError};
/// Parse the *An+B* notation, as found in the `:nth-child()` selector.
/// The input is typically the arguments of a function,
/// in which case the caller needs to check if the arguments parser is exhausted.
/// Return `Ok((A, B))`, or `Err(())` for a syntax error.
pub fn parse_nth<'i, 't>(input: &mut Parser<'i, 't>) -> Result<(i32, i32), BasicParseError<'i>> {
// FIXME: remove .clone() when lifetimes are non-lexical.
match input.next()?.clone() {
Token::Number { int_value: Some(b), .. } => {
Ok((0, b))
}
Token::Dimension { int_value: Some(a), unit, .. } => {
match_ignore_ascii_case! {
&unit,
"n" => Ok(try!(parse_b(input, a))),
"n-" => Ok(try!(parse_signless_b(input, a, -1))),
_ => match parse_n_dash_digits(&*unit) {
Ok(b) => Ok((a, b)),
Err(()) => Err(BasicParseError::UnexpectedToken(Token::Ident(unit.clone())))
}
}
}
Token::Ident(value) => {
match_ignore_ascii_case! { &value,
"even" => Ok((2, 0)),
"odd" => Ok((2, 1)),
"n" => Ok(try!(parse_b(input, 1))),
"-n" => Ok(try!(parse_b(input, -1))),
"n-" => Ok(try!(parse_signless_b(input, 1, -1))),
"-n-" => Ok(try!(parse_signless_b(input, -1, -1))),
_ => {
let (slice, a) = if value.starts_with("-") {
(&value[1..], -1)
} else {
(&*value, 1)
};
match parse_n_dash_digits(slice) {
Ok(b) => Ok((a, b)),
Err(()) => Err(BasicParseError::UnexpectedToken(Token::Ident(value.clone())))
}
}
}
}
// FIXME: remove .clone() when lifetimes are non-lexical.
Token::Delim('+') => match input.next_including_whitespace()?.clone() {
Token::Ident(value) => {
match_ignore_ascii_case! { &value,
"n" => parse_b(input, 1),
"n-" => parse_signless_b(input, 1, -1),
_ => match parse_n_dash_digits(&*value) {
Ok(b) => Ok((1, b)),
Err(()) => Err(BasicParseError::UnexpectedToken(Token::Ident(value.clone())))
}
}
}
token => Err(BasicParseError::UnexpectedToken(token)),
},
token => Err(BasicParseError::UnexpectedToken(token)),
}
}
fn parse_b<'i, 't>(input: &mut Parser<'i, 't>, a: i32) -> Result<(i32, i32), BasicParseError<'i>> {
let start = input.state();
match input.next() {
Ok(&Token::Delim('+')) => parse_signless_b(input, a, 1),
Ok(&Token::Delim('-')) => parse_signless_b(input, a, -1),
Ok(&Token::Number { has_sign: true, int_value: Some(b), .. }) => Ok((a, b)),
_ => {
input.reset(&start);
Ok((a, 0))
}
}
}
fn parse_signless_b<'i, 't>(input: &mut Parser<'i, 't>, a: i32, b_sign: i32) -> Result<(i32, i32), BasicParseError<'i>> {
match *input.next()? {
Token::Number { has_sign: false, int_value: Some(b), .. } => Ok((a, b_sign * b)),
ref token => Err(BasicParseError::UnexpectedToken(token.clone()))
}
}
fn parse_n_dash_digits(string: &str) -> Result<i32, ()> {
if string.len() >= 3
&& string[..2].eq_ignore_ascii_case("n-")
&& string[2..].chars().all(|c| matches!(c, '0'...'9'))
{
Ok(string[1..].parse().unwrap()) // Include the minus sign
} else {
Err(())
}
}

839
third_party/rust/cssparser-0.19.0/src/parser.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,839 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use cow_rc_str::CowRcStr;
use std::ops::Range;
use std::ascii::AsciiExt;
use std::ops::BitOr;
use tokenizer::{Token, Tokenizer, SourcePosition, SourceLocation};
/// A capture of the internal state of a `Parser` (including the position within the input),
/// obtained from the `Parser::position` method.
///
/// Can be used with the `Parser::reset` method to restore that state.
/// Should only be used with the `Parser` instance it came from.
#[derive(Debug, Clone)]
pub struct ParserState {
pub(crate) position: usize,
pub(crate) current_line_start_position: usize,
pub(crate) current_line_number: u32,
pub(crate) at_start_of: Option<BlockType>,
}
impl ParserState {
/// The position from the start of the input, counted in UTF-8 bytes.
#[inline]
pub fn position(&self) -> SourcePosition {
SourcePosition(self.position)
}
/// The line number and column number
#[inline]
pub fn source_location(&self) -> SourceLocation {
SourceLocation {
line: self.current_line_number,
column: (self.position - self.current_line_start_position) as u32,
}
}
}
/// The funamental parsing errors that can be triggered by built-in parsing routines.
#[derive(Clone, Debug, PartialEq)]
pub enum BasicParseError<'a> {
/// An unexpected token was encountered.
UnexpectedToken(Token<'a>),
/// The end of the input was encountered unexpectedly.
EndOfInput,
/// An `@` rule was encountered that was invalid.
AtRuleInvalid(CowRcStr<'a>),
/// The body of an '@' rule was invalid.
AtRuleBodyInvalid,
/// A qualified rule was encountered that was invalid.
QualifiedRuleInvalid,
}
impl<'a, T> From<BasicParseError<'a>> for ParseError<'a, T> {
fn from(this: BasicParseError<'a>) -> ParseError<'a, T> {
ParseError::Basic(this)
}
}
/// Extensible parse errors that can be encountered by client parsing implementations.
#[derive(Clone, Debug, PartialEq)]
pub enum ParseError<'a, T: 'a> {
/// A fundamental parse error from a built-in parsing routine.
Basic(BasicParseError<'a>),
/// A parse error reported by downstream consumer code.
Custom(T),
}
impl<'a, T> ParseError<'a, T> {
/// Extract the fundamental parse error from an extensible error.
pub fn basic(self) -> BasicParseError<'a> {
match self {
ParseError::Basic(e) => e,
ParseError::Custom(_) => panic!("Not a basic parse error"),
}
}
}
/// The owned input for a parser.
pub struct ParserInput<'i> {
tokenizer: Tokenizer<'i>,
cached_token: Option<CachedToken<'i>>,
}
struct CachedToken<'i> {
token: Token<'i>,
start_position: SourcePosition,
end_state: ParserState,
}
impl<'i> ParserInput<'i> {
/// Create a new input for a parser.
pub fn new(input: &'i str) -> ParserInput<'i> {
ParserInput {
tokenizer: Tokenizer::new(input),
cached_token: None,
}
}
#[inline]
fn cached_token_ref(&self) -> &Token<'i> {
&self.cached_token.as_ref().unwrap().token
}
}
/// A CSS parser that borrows its `&str` input,
/// yields `Token`s,
/// and keeps track of nested blocks and functions.
pub struct Parser<'i: 't, 't> {
input: &'t mut ParserInput<'i>,
/// If `Some(_)`, .parse_nested_block() can be called.
at_start_of: Option<BlockType>,
/// For parsers from `parse_until` or `parse_nested_block`
stop_before: Delimiters,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub(crate) enum BlockType {
Parenthesis,
SquareBracket,
CurlyBracket,
}
impl BlockType {
fn opening(token: &Token) -> Option<BlockType> {
match *token {
Token::Function(_) |
Token::ParenthesisBlock => Some(BlockType::Parenthesis),
Token::SquareBracketBlock => Some(BlockType::SquareBracket),
Token::CurlyBracketBlock => Some(BlockType::CurlyBracket),
_ => None
}
}
fn closing(token: &Token) -> Option<BlockType> {
match *token {
Token::CloseParenthesis => Some(BlockType::Parenthesis),
Token::CloseSquareBracket => Some(BlockType::SquareBracket),
Token::CloseCurlyBracket => Some(BlockType::CurlyBracket),
_ => None
}
}
}
/// A set of characters, to be used with the `Parser::parse_until*` methods.
///
/// The union of two sets can be obtained with the `|` operator. Example:
///
/// ```{rust,ignore}
/// input.parse_until_before(Delimiter::CurlyBracketBlock | Delimiter::Semicolon)
/// ```
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub struct Delimiters {
bits: u8,
}
/// `Delimiters` constants.
#[allow(non_upper_case_globals, non_snake_case)]
pub mod Delimiter {
use super::Delimiters;
/// The empty delimiter set
pub const None: Delimiters = Delimiters { bits: 0 };
/// The delimiter set with only the `{` opening curly bracket
pub const CurlyBracketBlock: Delimiters = Delimiters { bits: 1 << 1 };
/// The delimiter set with only the `;` semicolon
pub const Semicolon: Delimiters = Delimiters { bits: 1 << 2 };
/// The delimiter set with only the `!` exclamation point
pub const Bang: Delimiters = Delimiters { bits: 1 << 3 };
/// The delimiter set with only the `,` comma
pub const Comma: Delimiters = Delimiters { bits: 1 << 4 };
}
#[allow(non_upper_case_globals, non_snake_case)]
mod ClosingDelimiter {
use super::Delimiters;
pub const CloseCurlyBracket: Delimiters = Delimiters { bits: 1 << 5 };
pub const CloseSquareBracket: Delimiters = Delimiters { bits: 1 << 6 };
pub const CloseParenthesis: Delimiters = Delimiters { bits: 1 << 7 };
}
impl BitOr<Delimiters> for Delimiters {
type Output = Delimiters;
fn bitor(self, other: Delimiters) -> Delimiters {
Delimiters { bits: self.bits | other.bits }
}
}
impl Delimiters {
fn contains(self, other: Delimiters) -> bool {
(self.bits & other.bits) != 0
}
fn from_byte(byte: Option<u8>) -> Delimiters {
match byte {
Some(b';') => Delimiter::Semicolon,
Some(b'!') => Delimiter::Bang,
Some(b',') => Delimiter::Comma,
Some(b'{') => Delimiter::CurlyBracketBlock,
Some(b'}') => ClosingDelimiter::CloseCurlyBracket,
Some(b']') => ClosingDelimiter::CloseSquareBracket,
Some(b')') => ClosingDelimiter::CloseParenthesis,
_ => Delimiter::None,
}
}
}
impl<'i: 't, 't> Parser<'i, 't> {
/// Create a new parser
#[inline]
pub fn new(input: &'t mut ParserInput<'i>) -> Parser<'i, 't> {
Parser {
input: input,
at_start_of: None,
stop_before: Delimiter::None,
}
}
/// Return the current line that is being parsed.
pub fn current_line(&self) -> &'i str {
self.input.tokenizer.current_source_line()
}
/// Check whether the input is exhausted. That is, if `.next()` would return a token.
///
/// This ignores whitespace and comments.
#[inline]
pub fn is_exhausted(&mut self) -> bool {
self.expect_exhausted().is_ok()
}
/// Check whether the input is exhausted. That is, if `.next()` would return a token.
/// Return a `Result` so that the `try!` macro can be used: `try!(input.expect_exhausted())`
///
/// This ignores whitespace and comments.
#[inline]
pub fn expect_exhausted(&mut self) -> Result<(), BasicParseError<'i>> {
let start = self.state();
let result = match self.next() {
Err(BasicParseError::EndOfInput) => Ok(()),
Err(e) => unreachable!("Unexpected error encountered: {:?}", e),
Ok(t) => Err(BasicParseError::UnexpectedToken(t.clone())),
};
self.reset(&start);
result
}
/// Return the current position within the input.
///
/// This can be used with the `Parser::slice` and `slice_from` methods.
#[inline]
pub fn position(&self) -> SourcePosition {
self.input.tokenizer.position()
}
/// The current line number and column number.
#[inline]
pub fn current_source_location(&self) -> SourceLocation {
self.input.tokenizer.current_source_location()
}
/// Return the current internal state of the parser (including position within the input).
///
/// This state can later be restored with the `Parser::reset` method.
#[inline]
pub fn state(&self) -> ParserState {
ParserState {
at_start_of: self.at_start_of,
.. self.input.tokenizer.state()
}
}
/// Restore the internal state of the parser (including position within the input)
/// to what was previously saved by the `Parser::position` method.
///
/// Should only be used with `SourcePosition` values from the same `Parser` instance.
#[inline]
pub fn reset(&mut self, state: &ParserState) {
self.input.tokenizer.reset(state);
self.at_start_of = state.at_start_of;
}
/// Start looking for `var()` functions. (See the `.seen_var_functions()` method.)
#[inline]
pub fn look_for_var_functions(&mut self) {
self.input.tokenizer.look_for_var_functions()
}
/// Return whether a `var()` function has been seen by the tokenizer since
/// either `look_for_var_functions` was called, and stop looking.
#[inline]
pub fn seen_var_functions(&mut self) -> bool {
self.input.tokenizer.seen_var_functions()
}
/// Start looking for viewport percentage lengths. (See the `seen_viewport_percentages`
/// method.)
#[inline]
pub fn look_for_viewport_percentages(&mut self) {
self.input.tokenizer.look_for_viewport_percentages()
}
/// Return whether a `vh`, `vw`, `vmin`, or `vmax` dimension has been seen by the tokenizer
/// since `look_for_viewport_percentages` was called, and stop looking.
#[inline]
pub fn seen_viewport_percentages(&mut self) -> bool {
self.input.tokenizer.seen_viewport_percentages()
}
/// Execute the given closure, passing it the parser.
/// If the result (returned unchanged) is `Err`,
/// the internal state of the parser (including position within the input)
/// is restored to what it was before the call.
#[inline]
pub fn try<F, T, E>(&mut self, thing: F) -> Result<T, E>
where F: FnOnce(&mut Parser<'i, 't>) -> Result<T, E> {
let start = self.state();
let result = thing(self);
if result.is_err() {
self.reset(&start)
}
result
}
/// Return a slice of the CSS input
#[inline]
pub fn slice(&self, range: Range<SourcePosition>) -> &'i str {
self.input.tokenizer.slice(range)
}
/// Return a slice of the CSS input, from the given position to the current one.
#[inline]
pub fn slice_from(&self, start_position: SourcePosition) -> &'i str {
self.input.tokenizer.slice_from(start_position)
}
/// Return the next token in the input that is neither whitespace or a comment,
/// and advance the position accordingly.
///
/// After returning a `Function`, `ParenthesisBlock`,
/// `CurlyBracketBlock`, or `SquareBracketBlock` token,
/// the next call will skip until after the matching `CloseParenthesis`,
/// `CloseCurlyBracket`, or `CloseSquareBracket` token.
///
/// See the `Parser::parse_nested_block` method to parse the content of functions or blocks.
///
/// This only returns a closing token when it is unmatched (and therefore an error).
pub fn next(&mut self) -> Result<&Token<'i>, BasicParseError<'i>> {
loop {
match self.next_including_whitespace_and_comments() {
Err(e) => return Err(e),
Ok(&Token::WhiteSpace(_)) | Ok(&Token::Comment(_)) => {},
_ => break
}
}
Ok(self.input.cached_token_ref())
}
/// Same as `Parser::next`, but does not skip whitespace tokens.
pub fn next_including_whitespace(&mut self) -> Result<&Token<'i>, BasicParseError<'i>> {
loop {
match self.next_including_whitespace_and_comments() {
Err(e) => return Err(e),
Ok(&Token::Comment(_)) => {},
_ => break
}
}
Ok(self.input.cached_token_ref())
}
/// Same as `Parser::next`, but does not skip whitespace or comment tokens.
///
/// **Note**: This should only be used in contexts like a CSS pre-processor
/// where comments are preserved.
/// When parsing higher-level values, per the CSS Syntax specification,
/// comments should always be ignored between tokens.
pub fn next_including_whitespace_and_comments(&mut self) -> Result<&Token<'i>, BasicParseError<'i>> {
if let Some(block_type) = self.at_start_of.take() {
consume_until_end_of_block(block_type, &mut self.input.tokenizer);
}
let byte = self.input.tokenizer.next_byte();
if self.stop_before.contains(Delimiters::from_byte(byte)) {
return Err(BasicParseError::EndOfInput)
}
let token_start_position = self.input.tokenizer.position();
let token;
match self.input.cached_token {
Some(ref cached_token)
if cached_token.start_position == token_start_position => {
self.input.tokenizer.reset(&cached_token.end_state);
match cached_token.token {
Token::Dimension { ref unit, .. } => self.input.tokenizer.see_dimension(unit),
Token::Function(ref name) => self.input.tokenizer.see_function(name),
_ => {}
}
token = &cached_token.token
}
_ => {
let new_token = self.input.tokenizer.next().map_err(|()| BasicParseError::EndOfInput)?;
self.input.cached_token = Some(CachedToken {
token: new_token,
start_position: token_start_position,
end_state: self.input.tokenizer.state(),
});
token = self.input.cached_token_ref()
}
}
if let Some(block_type) = BlockType::opening(token) {
self.at_start_of = Some(block_type);
}
Ok(token)
}
/// Have the given closure parse something, then check the the input is exhausted.
/// The result is overridden to `Err(())` if some input remains.
///
/// This can help tell e.g. `color: green;` from `color: green 4px;`
#[inline]
pub fn parse_entirely<F, T, E>(&mut self, parse: F) -> Result<T, ParseError<'i, E>>
where F: FnOnce(&mut Parser<'i, 't>) -> Result<T, ParseError<'i, E>> {
let result = parse(self)?;
self.expect_exhausted()?;
Ok(result)
}
/// Parse a list of comma-separated values, all with the same syntax.
///
/// The given closure is called repeatedly with a "delimited" parser
/// (see the `Parser::parse_until_before` method)
/// so that it can over consume the input past a comma at this block/function nesting level.
///
/// Successful results are accumulated in a vector.
///
/// This method retuns `Err(())` the first time that a closure call does,
/// or if a closure call leaves some input before the next comma or the end of the input.
#[inline]
pub fn parse_comma_separated<F, T, E>(&mut self, mut parse_one: F) -> Result<Vec<T>, ParseError<'i, E>>
where F: for<'tt> FnMut(&mut Parser<'i, 'tt>) -> Result<T, ParseError<'i, E>> {
let mut values = vec![];
loop {
values.push(self.parse_until_before(Delimiter::Comma, &mut parse_one)?);
match self.next() {
Err(_) => return Ok(values),
Ok(&Token::Comma) => continue,
Ok(_) => unreachable!(),
}
}
}
/// Parse the content of a block or function.
///
/// This method panics if the last token yielded by this parser
/// (from one of the `next*` methods)
/// is not a on that marks the start of a block or function:
/// a `Function`, `ParenthesisBlock`, `CurlyBracketBlock`, or `SquareBracketBlock`.
///
/// The given closure is called with a "delimited" parser
/// that stops at the end of the block or function (at the matching closing token).
///
/// The result is overridden to `Err(())` if the closure leaves some input before that point.
#[inline]
pub fn parse_nested_block<F, T, E>(&mut self, parse: F) -> Result <T, ParseError<'i, E>>
where F: for<'tt> FnOnce(&mut Parser<'i, 'tt>) -> Result<T, ParseError<'i, E>> {
parse_nested_block(self, parse)
}
/// Limit parsing to until a given delimiter or the end of the input. (E.g.
/// a semicolon for a property value.)
///
/// The given closure is called with a "delimited" parser
/// that stops before the first character at this block/function nesting level
/// that matches the given set of delimiters, or at the end of the input.
///
/// The result is overridden to `Err(())` if the closure leaves some input before that point.
#[inline]
pub fn parse_until_before<F, T, E>(&mut self, delimiters: Delimiters, parse: F)
-> Result <T, ParseError<'i, E>>
where F: for<'tt> FnOnce(&mut Parser<'i, 'tt>) -> Result<T, ParseError<'i, E>> {
parse_until_before(self, delimiters, parse)
}
/// Like `parse_until_before`, but also consume the delimiter token.
///
/// This can be useful when you dont need to know which delimiter it was
/// (e.g. if these is only one in the given set)
/// or if it was there at all (as opposed to reaching the end of the input).
#[inline]
pub fn parse_until_after<F, T, E>(&mut self, delimiters: Delimiters, parse: F)
-> Result <T, ParseError<'i, E>>
where F: for<'tt> FnOnce(&mut Parser<'i, 'tt>) -> Result<T, ParseError<'i, E>> {
parse_until_after(self, delimiters, parse)
}
/// Parse a <whitespace-token> and return its value.
#[inline]
pub fn expect_whitespace(&mut self) -> Result<&'i str, BasicParseError<'i>> {
match *self.next_including_whitespace()? {
Token::WhiteSpace(value) => Ok(value),
ref t => Err(BasicParseError::UnexpectedToken(t.clone()))
}
}
/// Parse a <ident-token> and return the unescaped value.
#[inline]
pub fn expect_ident(&mut self) -> Result<&CowRcStr<'i>, BasicParseError<'i>> {
match *self.next()? {
Token::Ident(ref value) => Ok(value),
ref t => Err(BasicParseError::UnexpectedToken(t.clone()))
}
}
/// expect_ident, but clone the CowRcStr
#[inline]
pub fn expect_ident_cloned(&mut self) -> Result<CowRcStr<'i>, BasicParseError<'i>> {
self.expect_ident().map(|s| s.clone())
}
/// Parse a <ident-token> whose unescaped value is an ASCII-insensitive match for the given value.
#[inline]
pub fn expect_ident_matching(&mut self, expected_value: &str) -> Result<(), BasicParseError<'i>> {
match *self.next()? {
Token::Ident(ref value) if value.eq_ignore_ascii_case(expected_value) => Ok(()),
ref t => Err(BasicParseError::UnexpectedToken(t.clone()))
}
}
/// Parse a <string-token> and return the unescaped value.
#[inline]
pub fn expect_string(&mut self) -> Result<&CowRcStr<'i>, BasicParseError<'i>> {
match *self.next()? {
Token::QuotedString(ref value) => Ok(value),
ref t => Err(BasicParseError::UnexpectedToken(t.clone()))
}
}
/// expect_string, but clone the CowRcStr
#[inline]
pub fn expect_string_cloned(&mut self) -> Result<CowRcStr<'i>, BasicParseError<'i>> {
self.expect_string().map(|s| s.clone())
}
/// Parse either a <ident-token> or a <string-token>, and return the unescaped value.
#[inline]
pub fn expect_ident_or_string(&mut self) -> Result<&CowRcStr<'i>, BasicParseError<'i>> {
match *self.next()? {
Token::Ident(ref value) => Ok(value),
Token::QuotedString(ref value) => Ok(value),
ref t => Err(BasicParseError::UnexpectedToken(t.clone()))
}
}
/// Parse a <url-token> and return the unescaped value.
#[inline]
pub fn expect_url(&mut self) -> Result<CowRcStr<'i>, BasicParseError<'i>> {
// FIXME: revert early returns when lifetimes are non-lexical
match *self.next()? {
Token::UnquotedUrl(ref value) => return Ok(value.clone()),
Token::Function(ref name) if name.eq_ignore_ascii_case("url") => {}
ref t => return Err(BasicParseError::UnexpectedToken(t.clone()))
}
self.parse_nested_block(|input| input.expect_string().map_err(ParseError::Basic).map(|s| s.clone()))
.map_err(ParseError::<()>::basic)
}
/// Parse either a <url-token> or a <string-token>, and return the unescaped value.
#[inline]
pub fn expect_url_or_string(&mut self) -> Result<CowRcStr<'i>, BasicParseError<'i>> {
// FIXME: revert early returns when lifetimes are non-lexical
match *self.next()? {
Token::UnquotedUrl(ref value) => return Ok(value.clone()),
Token::QuotedString(ref value) => return Ok(value.clone()),
Token::Function(ref name) if name.eq_ignore_ascii_case("url") => {}
ref t => return Err(BasicParseError::UnexpectedToken(t.clone()))
}
self.parse_nested_block(|input| input.expect_string().map_err(ParseError::Basic).map(|s| s.clone()))
.map_err(ParseError::<()>::basic)
}
/// Parse a <number-token> and return the integer value.
#[inline]
pub fn expect_number(&mut self) -> Result<f32, BasicParseError<'i>> {
match *self.next()? {
Token::Number { value, .. } => Ok(value),
ref t => Err(BasicParseError::UnexpectedToken(t.clone()))
}
}
/// Parse a <number-token> that does not have a fractional part, and return the integer value.
#[inline]
pub fn expect_integer(&mut self) -> Result<i32, BasicParseError<'i>> {
match *self.next()? {
Token::Number { int_value: Some(int_value), .. } => {
Ok(int_value)
}
ref t => Err(BasicParseError::UnexpectedToken(t.clone()))
}
}
/// Parse a <percentage-token> and return the value.
/// `0%` and `100%` map to `0.0` and `1.0` (not `100.0`), respectively.
#[inline]
pub fn expect_percentage(&mut self) -> Result<f32, BasicParseError<'i>> {
match *self.next()? {
Token::Percentage { unit_value, .. } => Ok(unit_value),
ref t => Err(BasicParseError::UnexpectedToken(t.clone()))
}
}
/// Parse a `:` <colon-token>.
#[inline]
pub fn expect_colon(&mut self) -> Result<(), BasicParseError<'i>> {
match *self.next()? {
Token::Colon => Ok(()),
ref t => Err(BasicParseError::UnexpectedToken(t.clone()))
}
}
/// Parse a `;` <semicolon-token>.
#[inline]
pub fn expect_semicolon(&mut self) -> Result<(), BasicParseError<'i>> {
match *self.next()? {
Token::Semicolon => Ok(()),
ref t => Err(BasicParseError::UnexpectedToken(t.clone()))
}
}
/// Parse a `,` <comma-token>.
#[inline]
pub fn expect_comma(&mut self) -> Result<(), BasicParseError<'i>> {
match *self.next()? {
Token::Comma => Ok(()),
ref t => Err(BasicParseError::UnexpectedToken(t.clone()))
}
}
/// Parse a <delim-token> with the given value.
#[inline]
pub fn expect_delim(&mut self, expected_value: char) -> Result<(), BasicParseError<'i>> {
match *self.next()? {
Token::Delim(value) if value == expected_value => Ok(()),
ref t => Err(BasicParseError::UnexpectedToken(t.clone()))
}
}
/// Parse a `{ /* ... */ }` curly brackets block.
///
/// If the result is `Ok`, you can then call the `Parser::parse_nested_block` method.
#[inline]
pub fn expect_curly_bracket_block(&mut self) -> Result<(), BasicParseError<'i>> {
match *self.next()? {
Token::CurlyBracketBlock => Ok(()),
ref t => Err(BasicParseError::UnexpectedToken(t.clone()))
}
}
/// Parse a `[ /* ... */ ]` square brackets block.
///
/// If the result is `Ok`, you can then call the `Parser::parse_nested_block` method.
#[inline]
pub fn expect_square_bracket_block(&mut self) -> Result<(), BasicParseError<'i>> {
match *self.next()? {
Token::SquareBracketBlock => Ok(()),
ref t => Err(BasicParseError::UnexpectedToken(t.clone()))
}
}
/// Parse a `( /* ... */ )` parenthesis block.
///
/// If the result is `Ok`, you can then call the `Parser::parse_nested_block` method.
#[inline]
pub fn expect_parenthesis_block(&mut self) -> Result<(), BasicParseError<'i>> {
match *self.next()? {
Token::ParenthesisBlock => Ok(()),
ref t => Err(BasicParseError::UnexpectedToken(t.clone()))
}
}
/// Parse a <function> token and return its name.
///
/// If the result is `Ok`, you can then call the `Parser::parse_nested_block` method.
#[inline]
pub fn expect_function(&mut self) -> Result<&CowRcStr<'i>, BasicParseError<'i>> {
match *self.next()? {
Token::Function(ref name) => Ok(name),
ref t => Err(BasicParseError::UnexpectedToken(t.clone()))
}
}
/// Parse a <function> token whose name is an ASCII-insensitive match for the given value.
///
/// If the result is `Ok`, you can then call the `Parser::parse_nested_block` method.
#[inline]
pub fn expect_function_matching(&mut self, expected_name: &str) -> Result<(), BasicParseError<'i>> {
match *self.next()? {
Token::Function(ref name) if name.eq_ignore_ascii_case(expected_name) => Ok(()),
ref t => Err(BasicParseError::UnexpectedToken(t.clone()))
}
}
/// Parse the input until exhaustion and check that it contains no “error” token.
///
/// See `Token::is_parse_error`. This also checks nested blocks and functions recursively.
#[inline]
pub fn expect_no_error_token(&mut self) -> Result<(), BasicParseError<'i>> {
loop {
match self.next_including_whitespace_and_comments() {
Ok(&Token::Function(_)) |
Ok(&Token::ParenthesisBlock) |
Ok(&Token::SquareBracketBlock) |
Ok(&Token::CurlyBracketBlock) => {}
Ok(token) => {
if token.is_parse_error() {
//FIXME: maybe these should be separate variants of BasicParseError instead?
return Err(BasicParseError::UnexpectedToken(token.clone()))
}
continue
}
Err(_) => return Ok(())
}
let result = self.parse_nested_block(|input| input.expect_no_error_token()
.map_err(|e| ParseError::Basic(e)));
result.map_err(ParseError::<()>::basic)?
}
}
}
pub fn parse_until_before<'i: 't, 't, F, T, E>(parser: &mut Parser<'i, 't>,
delimiters: Delimiters,
parse: F)
-> Result <T, ParseError<'i, E>>
where F: for<'tt> FnOnce(&mut Parser<'i, 'tt>) -> Result<T, ParseError<'i, E>> {
let delimiters = parser.stop_before | delimiters;
let result;
// Introduce a new scope to limit duration of nested_parsers borrow
{
let mut delimited_parser = Parser {
input: parser.input,
at_start_of: parser.at_start_of.take(),
stop_before: delimiters,
};
result = delimited_parser.parse_entirely(parse);
if let Some(block_type) = delimited_parser.at_start_of {
consume_until_end_of_block(block_type, &mut delimited_parser.input.tokenizer);
}
}
// FIXME: have a special-purpose tokenizer method for this that does less work.
loop {
if delimiters.contains(Delimiters::from_byte((parser.input.tokenizer).next_byte())) {
break
}
if let Ok(token) = (parser.input.tokenizer).next() {
if let Some(block_type) = BlockType::opening(&token) {
consume_until_end_of_block(block_type, &mut parser.input.tokenizer);
}
} else {
break
}
}
result
}
pub fn parse_until_after<'i: 't, 't, F, T, E>(parser: &mut Parser<'i, 't>,
delimiters: Delimiters,
parse: F)
-> Result <T, ParseError<'i, E>>
where F: for<'tt> FnOnce(&mut Parser<'i, 'tt>) -> Result<T, ParseError<'i, E>> {
let result = parser.parse_until_before(delimiters, parse);
let next_byte = (parser.input.tokenizer).next_byte();
if next_byte.is_some() && !parser.stop_before.contains(Delimiters::from_byte(next_byte)) {
debug_assert!(delimiters.contains(Delimiters::from_byte(next_byte)));
(parser.input.tokenizer).advance(1);
if next_byte == Some(b'{') {
consume_until_end_of_block(BlockType::CurlyBracket, &mut parser.input.tokenizer);
}
}
result
}
pub fn parse_nested_block<'i: 't, 't, F, T, E>(parser: &mut Parser<'i, 't>, parse: F)
-> Result <T, ParseError<'i, E>>
where F: for<'tt> FnOnce(&mut Parser<'i, 'tt>) -> Result<T, ParseError<'i, E>> {
let block_type = parser.at_start_of.take().expect("\
A nested parser can only be created when a Function, \
ParenthesisBlock, SquareBracketBlock, or CurlyBracketBlock \
token was just consumed.\
");
let closing_delimiter = match block_type {
BlockType::CurlyBracket => ClosingDelimiter::CloseCurlyBracket,
BlockType::SquareBracket => ClosingDelimiter::CloseSquareBracket,
BlockType::Parenthesis => ClosingDelimiter::CloseParenthesis,
};
let result;
// Introduce a new scope to limit duration of nested_parsers borrow
{
let mut nested_parser = Parser {
input: parser.input,
at_start_of: None,
stop_before: closing_delimiter,
};
result = nested_parser.parse_entirely(parse);
if let Some(block_type) = nested_parser.at_start_of {
consume_until_end_of_block(block_type, &mut nested_parser.input.tokenizer);
}
}
consume_until_end_of_block(block_type, &mut parser.input.tokenizer);
result
}
fn consume_until_end_of_block(block_type: BlockType, tokenizer: &mut Tokenizer) {
let mut stack = vec![block_type];
// FIXME: have a special-purpose tokenizer method for this that does less work.
while let Ok(ref token) = tokenizer.next() {
if let Some(b) = BlockType::closing(token) {
if *stack.last().unwrap() == b {
stack.pop();
if stack.is_empty() {
return;
}
}
}
if let Some(block_type) = BlockType::opening(token) {
stack.push(block_type);
}
}
}

525
third_party/rust/cssparser-0.19.0/src/rules_and_declarations.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,525 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// https://drafts.csswg.org/css-syntax/#parsing
use cow_rc_str::CowRcStr;
use parser::{parse_until_before, parse_until_after, parse_nested_block, ParserState};
use std::ascii::AsciiExt;
use super::{Token, Parser, Delimiter, ParseError, BasicParseError, SourceLocation};
/// Parse `!important`.
///
/// Typical usage is `input.try(parse_important).is_ok()`
/// at the end of a `DeclarationParser::parse_value` implementation.
pub fn parse_important<'i, 't>(input: &mut Parser<'i, 't>) -> Result<(), BasicParseError<'i>> {
input.expect_delim('!')?;
input.expect_ident_matching("important")
}
/// The return value for `AtRuleParser::parse_prelude`.
/// Indicates whether the at-rule is expected to have a `{ /* ... */ }` block
/// or end with a `;` semicolon.
pub enum AtRuleType<P, R> {
/// The at-rule is expected to end with a `;` semicolon. Example: `@import`.
///
/// The value is the finished representation of an at-rule
/// as returned by `RuleListParser::next` or `DeclarationListParser::next`.
WithoutBlock(R),
/// The at-rule is expected to have a a `{ /* ... */ }` block. Example: `@media`
///
/// The value is the representation of the "prelude" part of the rule.
WithBlock(P),
/// The at-rule may either have a block or end with a semicolon.
///
/// This is mostly for testing. As of this writing no real CSS at-rule behaves like this.
///
/// The value is the representation of the "prelude" part of the rule.
OptionalBlock(P),
}
/// A trait to provide various parsing of declaration values.
///
/// For example, there could be different implementations for property declarations in style rules
/// and for descriptors in `@font-face` rules.
pub trait DeclarationParser<'i> {
/// The finished representation of a declaration.
type Declaration;
/// The error type that is included in the ParseError value that can be returned.
type Error: 'i;
/// Parse the value of a declaration with the given `name`.
///
/// Return the finished representation for the declaration
/// as returned by `DeclarationListParser::next`,
/// or `Err(())` to ignore the entire declaration as invalid.
///
/// Declaration name matching should be case-insensitive in the ASCII range.
/// This can be done with `std::ascii::Ascii::eq_ignore_ascii_case`,
/// or with the `match_ignore_ascii_case!` macro.
///
/// The given `input` is a "delimited" parser
/// that ends wherever the declaration value should end.
/// (In declaration lists, before the next semicolon or end of the current block.)
///
/// If `!important` can be used in a given context,
/// `input.try(parse_important).is_ok()` should be used at the end
/// of the implementation of this method and the result should be part of the return value.
fn parse_value<'t>(&mut self, name: CowRcStr<'i>, input: &mut Parser<'i, 't>)
-> Result<Self::Declaration, ParseError<'i, Self::Error>>;
}
/// A trait to provide various parsing of at-rules.
///
/// For example, there could be different implementations for top-level at-rules
/// (`@media`, `@font-face`, …)
/// and for page-margin rules inside `@page`.
///
/// Default implementations that reject all at-rules are provided,
/// so that `impl AtRuleParser<(), ()> for ... {}` can be used
/// for using `DeclarationListParser` to parse a declartions list with only qualified rules.
pub trait AtRuleParser<'i> {
/// The intermediate representation of an at-rule prelude.
type Prelude;
/// The finished representation of an at-rule.
type AtRule;
/// The error type that is included in the ParseError value that can be returned.
type Error: 'i;
/// Parse the prelude of an at-rule with the given `name`.
///
/// Return the representation of the prelude and the type of at-rule,
/// or `Err(())` to ignore the entire at-rule as invalid.
///
/// See `AtRuleType`s documentation for the return value.
///
/// The prelude is the part after the at-keyword
/// and before the `;` semicolon or `{ /* ... */ }` block.
///
/// At-rule name matching should be case-insensitive in the ASCII range.
/// This can be done with `std::ascii::Ascii::eq_ignore_ascii_case`,
/// or with the `match_ignore_ascii_case!` macro.
///
/// The given `input` is a "delimited" parser
/// that ends wherever the prelude should end.
/// (Before the next semicolon, the next `{`, or the end of the current block.)
fn parse_prelude<'t>(&mut self, name: CowRcStr<'i>, input: &mut Parser<'i, 't>)
-> Result<AtRuleType<Self::Prelude, Self::AtRule>, ParseError<'i, Self::Error>> {
let _ = name;
let _ = input;
Err(ParseError::Basic(BasicParseError::AtRuleInvalid(name)))
}
/// Parse the content of a `{ /* ... */ }` block for the body of the at-rule.
///
/// Return the finished representation of the at-rule
/// as returned by `RuleListParser::next` or `DeclarationListParser::next`,
/// or `Err(())` to ignore the entire at-rule as invalid.
///
/// This is only called when `parse_prelude` returned `WithBlock` or `OptionalBlock`,
/// and a block was indeed found following the prelude.
fn parse_block<'t>(&mut self, prelude: Self::Prelude, input: &mut Parser<'i, 't>)
-> Result<Self::AtRule, ParseError<'i, Self::Error>> {
let _ = prelude;
let _ = input;
Err(ParseError::Basic(BasicParseError::AtRuleBodyInvalid))
}
/// An `OptionalBlock` prelude was followed by `;`.
///
/// Convert the prelude into the finished representation of the at-rule
/// as returned by `RuleListParser::next` or `DeclarationListParser::next`.
fn rule_without_block(&mut self, prelude: Self::Prelude) -> Self::AtRule {
let _ = prelude;
panic!("The `AtRuleParser::rule_without_block` method must be overriden \
if `AtRuleParser::parse_prelude` ever returns `AtRuleType::OptionalBlock`.")
}
}
/// A trait to provide various parsing of qualified rules.
///
/// For example, there could be different implementations
/// for top-level qualified rules (i.e. style rules with Selectors as prelude)
/// and for qualified rules inside `@keyframes` (keyframe rules with keyframe selectors as prelude).
///
/// Default implementations that reject all qualified rules are provided,
/// so that `impl QualifiedRuleParser<(), ()> for ... {}` can be used
/// for example for using `RuleListParser` to parse a rule list with only at-rules
/// (such as inside `@font-feature-values`).
pub trait QualifiedRuleParser<'i> {
/// The intermediate representation of a qualified rule prelude.
type Prelude;
/// The finished representation of a qualified rule.
type QualifiedRule;
/// The error type that is included in the ParseError value that can be returned.
type Error: 'i;
/// Parse the prelude of a qualified rule. For style rules, this is as Selector list.
///
/// Return the representation of the prelude,
/// or `Err(())` to ignore the entire at-rule as invalid.
///
/// The prelude is the part before the `{ /* ... */ }` block.
///
/// The given `input` is a "delimited" parser
/// that ends where the prelude should end (before the next `{`).
fn parse_prelude<'t>(&mut self, input: &mut Parser<'i, 't>)
-> Result<Self::Prelude, ParseError<'i, Self::Error>> {
let _ = input;
Err(ParseError::Basic(BasicParseError::QualifiedRuleInvalid))
}
/// Parse the content of a `{ /* ... */ }` block for the body of the qualified rule.
///
/// Return the finished representation of the qualified rule
/// as returned by `RuleListParser::next`,
/// or `Err(())` to ignore the entire at-rule as invalid.
fn parse_block<'t>(&mut self, prelude: Self::Prelude, input: &mut Parser<'i, 't>)
-> Result<Self::QualifiedRule, ParseError<'i, Self::Error>> {
let _ = prelude;
let _ = input;
Err(ParseError::Basic(BasicParseError::QualifiedRuleInvalid))
}
}
/// Provides an iterator for declaration list parsing.
pub struct DeclarationListParser<'i: 't, 't: 'a, 'a, P> {
/// The input given to `DeclarationListParser::new`
pub input: &'a mut Parser<'i, 't>,
/// The parser given to `DeclarationListParser::new`
pub parser: P,
}
impl<'i: 't, 't: 'a, 'a, I, P, E: 'i> DeclarationListParser<'i, 't, 'a, P>
where P: DeclarationParser<'i, Declaration = I, Error = E> +
AtRuleParser<'i, AtRule = I, Error = E> {
/// Create a new `DeclarationListParser` for the given `input` and `parser`.
///
/// Note that all CSS declaration lists can on principle contain at-rules.
/// Even if no such valid at-rule exists (yet),
/// this affects error handling: at-rules end at `{}` blocks, not just semicolons.
///
/// The given `parser` therefore needs to implement
/// both `DeclarationParser` and `AtRuleParser` traits.
/// However, the latter can be an empty `impl`
/// since `AtRuleParser` provides default implementations of its methods.
///
/// The return type for finished declarations and at-rules also needs to be the same,
/// since `<DeclarationListParser as Iterator>::next` can return either.
/// It could be a custom enum.
pub fn new(input: &'a mut Parser<'i, 't>, parser: P) -> Self {
DeclarationListParser {
input: input,
parser: parser,
}
}
}
/// `DeclarationListParser` is an iterator that yields `Ok(_)` for a valid declaration or at-rule
/// or `Err(())` for an invalid one.
impl<'i: 't, 't: 'a, 'a, I, P, E: 'i> Iterator for DeclarationListParser<'i, 't, 'a, P>
where P: DeclarationParser<'i, Declaration = I, Error = E> +
AtRuleParser<'i, AtRule = I, Error = E> {
type Item = Result<I, PreciseParseError<'i, E>>;
fn next(&mut self) -> Option<Result<I, PreciseParseError<'i, E>>> {
loop {
let start = self.input.state();
// FIXME: remove intermediate variable when lifetimes are non-lexical
let ident = match self.input.next_including_whitespace_and_comments() {
Ok(&Token::WhiteSpace(_)) | Ok(&Token::Comment(_)) | Ok(&Token::Semicolon) => continue,
Ok(&Token::Ident(ref name)) => Ok(Ok(name.clone())),
Ok(&Token::AtKeyword(ref name)) => Ok(Err(name.clone())),
Ok(token) => Err(token.clone()),
Err(_) => return None,
};
match ident {
Ok(Ok(name)) => {
// Ident
return Some({
let parser = &mut self.parser;
// FIXME: https://github.com/rust-lang/rust/issues/42508
parse_until_after::<'i, 't, _, _, _>(self.input, Delimiter::Semicolon, |input| {
input.expect_colon()?;
parser.parse_value(name, input)
})
}.map_err(|e| PreciseParseError {
error: e,
slice: self.input.slice_from(start.position()),
location: start.source_location(),
}))
}
Ok(Err(name)) => {
// At-keyword
return Some(parse_at_rule(&start, name, self.input, &mut self.parser))
}
Err(token) => {
return Some(self.input.parse_until_after(Delimiter::Semicolon,
|_| Err(ParseError::Basic(BasicParseError::UnexpectedToken(token.clone()))))
.map_err(|e| PreciseParseError {
error: e,
slice: self.input.slice_from(start.position()),
location: start.source_location(),
}))
}
}
}
}
}
/// Provides an iterator for rule list parsing.
pub struct RuleListParser<'i: 't, 't: 'a, 'a, P> {
/// The input given to `RuleListParser::new`
pub input: &'a mut Parser<'i, 't>,
/// The parser given to `RuleListParser::new`
pub parser: P,
is_stylesheet: bool,
any_rule_so_far: bool,
}
impl<'i: 't, 't: 'a, 'a, R, P, E: 'i> RuleListParser<'i, 't, 'a, P>
where P: QualifiedRuleParser<'i, QualifiedRule = R, Error = E> +
AtRuleParser<'i, AtRule = R, Error = E> {
/// Create a new `RuleListParser` for the given `input` at the top-level of a stylesheet
/// and the given `parser`.
///
/// The given `parser` needs to implement both `QualifiedRuleParser` and `AtRuleParser` traits.
/// However, either of them can be an empty `impl`
/// since the traits provide default implementations of their methods.
///
/// The return type for finished qualified rules and at-rules also needs to be the same,
/// since `<RuleListParser as Iterator>::next` can return either.
/// It could be a custom enum.
pub fn new_for_stylesheet(input: &'a mut Parser<'i, 't>, parser: P) -> Self {
RuleListParser {
input: input,
parser: parser,
is_stylesheet: true,
any_rule_so_far: false,
}
}
/// Same is `new_for_stylesheet`, but should be used for rule lists inside a block
/// such as the body of an `@media` rule.
///
/// This differs in that `<!--` and `-->` tokens
/// should only be ignored at the stylesheet top-level.
/// (This is to deal with legacy work arounds for `<style>` HTML element parsing.)
pub fn new_for_nested_rule(input: &'a mut Parser<'i, 't>, parser: P) -> Self {
RuleListParser {
input: input,
parser: parser,
is_stylesheet: false,
any_rule_so_far: false,
}
}
}
/// `RuleListParser` is an iterator that yields `Ok(_)` for a rule or `Err(())` for an invalid one.
impl<'i: 't, 't: 'a, 'a, R, P, E: 'i> Iterator for RuleListParser<'i, 't, 'a, P>
where P: QualifiedRuleParser<'i, QualifiedRule = R, Error = E> +
AtRuleParser<'i, AtRule = R, Error = E> {
type Item = Result<R, PreciseParseError<'i, E>>;
fn next(&mut self) -> Option<Result<R, PreciseParseError<'i, E>>> {
loop {
let start = self.input.state();
// FIXME: remove intermediate variable when lifetimes are non-lexical
let at_keyword = match self.input.next_including_whitespace_and_comments() {
Ok(&Token::WhiteSpace(_)) | Ok(&Token::Comment(_)) => continue,
Ok(&Token::CDO) | Ok(&Token::CDC) if self.is_stylesheet => continue,
Ok(&Token::AtKeyword(ref name)) => Some(name.clone()),
Ok(_) => None,
Err(_) => return None,
};
if let Some(name) = at_keyword {
let first_stylesheet_rule = self.is_stylesheet && !self.any_rule_so_far;
self.any_rule_so_far = true;
if first_stylesheet_rule && name.eq_ignore_ascii_case("charset") {
let delimiters = Delimiter::Semicolon | Delimiter::CurlyBracketBlock;
let _: Result<(), ParseError<()>> = self.input.parse_until_after(delimiters, |_| Ok(()));
} else {
return Some(parse_at_rule(&start, name.clone(), self.input, &mut self.parser))
}
} else {
self.any_rule_so_far = true;
self.input.reset(&start);
return Some(parse_qualified_rule(self.input, &mut self.parser)
.map_err(|e| PreciseParseError {
error: e,
slice: self.input.slice_from(start.position()),
location: start.source_location(),
}))
}
}
}
}
/// Parse a single declaration, such as an `( /* ... */ )` parenthesis in an `@supports` prelude.
pub fn parse_one_declaration<'i, 't, P, E>(input: &mut Parser<'i, 't>, parser: &mut P)
-> Result<<P as DeclarationParser<'i>>::Declaration,
PreciseParseError<'i, E>>
where P: DeclarationParser<'i, Error = E> {
let start_position = input.position();
let start_location = input.current_source_location();
input.parse_entirely(|input| {
let name = input.expect_ident()?.clone();
input.expect_colon()?;
parser.parse_value(name, input)
}).map_err(|e| PreciseParseError {
error: e,
slice: input.slice_from(start_position),
location: start_location,
})
}
/// Parse a single rule, such as for CSSOMs `CSSStyleSheet.insertRule`.
pub fn parse_one_rule<'i, 't, R, P, E>(input: &mut Parser<'i, 't>, parser: &mut P)
-> Result<R, ParseError<'i, E>>
where P: QualifiedRuleParser<'i, QualifiedRule = R, Error = E> +
AtRuleParser<'i, AtRule = R, Error = E> {
input.parse_entirely(|input| {
loop {
let start = input.state();
// FIXME: remove intermediate variable when lifetimes are non-lexical
let at_keyword = match *input.next_including_whitespace_and_comments()? {
Token::WhiteSpace(_) | Token::Comment(_) => continue,
Token::AtKeyword(ref name) => Some(name.clone()),
_ => None
};
if let Some(name) = at_keyword {
return parse_at_rule(&start, name, input, parser).map_err(|e| e.error)
} else {
input.reset(&start);
return parse_qualified_rule(input, parser)
}
}
})
}
/// A parse error with details of where it occured
pub struct PreciseParseError<'i, E: 'i> {
/// Error details
pub error: ParseError<'i, E>,
/// The relevant slice of the input.
pub slice: &'i str,
/// The line number and column number of the start of the relevant input slice.
pub location: SourceLocation,
}
fn parse_at_rule<'i: 't, 't, P, E>(start: &ParserState, name: CowRcStr<'i>,
input: &mut Parser<'i, 't>, parser: &mut P)
-> Result<<P as AtRuleParser<'i>>::AtRule, PreciseParseError<'i, E>>
where P: AtRuleParser<'i, Error = E> {
let delimiters = Delimiter::Semicolon | Delimiter::CurlyBracketBlock;
// FIXME: https://github.com/rust-lang/rust/issues/42508
let result = parse_until_before::<'i, 't, _, _, _>(input, delimiters, |input| {
parser.parse_prelude(name, input)
});
match result {
Ok(AtRuleType::WithoutBlock(rule)) => {
match input.next() {
Ok(&Token::Semicolon) | Err(_) => Ok(rule),
Ok(&Token::CurlyBracketBlock) => Err(PreciseParseError {
error: ParseError::Basic(BasicParseError::UnexpectedToken(Token::CurlyBracketBlock)),
slice: input.slice_from(start.position()),
location: start.source_location(),
}),
Ok(_) => unreachable!()
}
}
Ok(AtRuleType::WithBlock(prelude)) => {
match input.next() {
Ok(&Token::CurlyBracketBlock) => {
// FIXME: https://github.com/rust-lang/rust/issues/42508
parse_nested_block::<'i, 't, _, _, _>(input, move |input| parser.parse_block(prelude, input))
.map_err(|e| PreciseParseError {
error: e,
slice: input.slice_from(start.position()),
location: start.source_location(),
})
}
Ok(&Token::Semicolon) => Err(PreciseParseError {
error: ParseError::Basic(BasicParseError::UnexpectedToken(Token::Semicolon)),
slice: input.slice_from(start.position()),
location: start.source_location(),
}),
Err(e) => Err(PreciseParseError {
error: ParseError::Basic(e),
slice: input.slice_from(start.position()),
location: start.source_location(),
}),
Ok(_) => unreachable!()
}
}
Ok(AtRuleType::OptionalBlock(prelude)) => {
match input.next() {
Ok(&Token::Semicolon) | Err(_) => Ok(parser.rule_without_block(prelude)),
Ok(&Token::CurlyBracketBlock) => {
// FIXME: https://github.com/rust-lang/rust/issues/42508
parse_nested_block::<'i, 't, _, _, _>(input, move |input| parser.parse_block(prelude, input))
.map_err(|e| PreciseParseError {
error: e,
slice: input.slice_from(start.position()),
location: start.source_location(),
})
}
_ => unreachable!()
}
}
Err(error) => {
let end_position = input.position();
match input.next() {
Ok(&Token::CurlyBracketBlock) | Ok(&Token::Semicolon) | Err(_) => {},
_ => unreachable!()
};
Err(PreciseParseError {
error: error,
slice: input.slice(start.position()..end_position),
location: start.source_location(),
})
}
}
}
fn parse_qualified_rule<'i, 't, P, E>(input: &mut Parser<'i, 't>, parser: &mut P)
-> Result<<P as QualifiedRuleParser<'i>>::QualifiedRule, ParseError<'i, E>>
where P: QualifiedRuleParser<'i, Error = E> {
// FIXME: https://github.com/rust-lang/rust/issues/42508
let prelude = parse_until_before::<'i, 't, _, _, _>(input, Delimiter::CurlyBracketBlock, |input| {
parser.parse_prelude(input)
});
match *input.next()? {
Token::CurlyBracketBlock => {
// Do this here so that we consume the `{` even if the prelude is `Err`.
let prelude = prelude?;
// FIXME: https://github.com/rust-lang/rust/issues/42508
parse_nested_block::<'i, 't, _, _, _>(input, move |input| parser.parse_block(prelude, input))
}
_ => unreachable!()
}
}

409
third_party/rust/cssparser-0.19.0/src/serializer.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,409 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::ascii::AsciiExt;
use std::fmt::{self, Write};
use super::Token;
/// Trait for things the can serialize themselves in CSS syntax.
pub trait ToCss {
/// Serialize `self` in CSS syntax, writing to `dest`.
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write;
/// Serialize `self` in CSS syntax and return a string.
///
/// (This is a convenience wrapper for `to_css` and probably should not be overridden.)
#[inline]
fn to_css_string(&self) -> String {
let mut s = String::new();
self.to_css(&mut s).unwrap();
s
}
/// Serialize `self` in CSS syntax and return a result compatible with `std::fmt::Show`.
///
/// Typical usage is, for a `Foo` that implements `ToCss`:
///
/// ```{rust,ignore}
/// use std::fmt;
/// impl fmt::Show for Foo {
/// #[inline] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.fmt_to_css(f) }
/// }
/// ```
///
/// (This is a convenience wrapper for `to_css` and probably should not be overridden.)
#[inline]
fn fmt_to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
self.to_css(dest).map_err(|_| fmt::Error)
}
}
#[inline]
fn write_numeric<W>(value: f32, int_value: Option<i32>, has_sign: bool, dest: &mut W)
-> fmt::Result where W: fmt::Write {
// `value.value >= 0` is true for negative 0.
if has_sign && value.is_sign_positive() {
dest.write_str("+")?;
}
if value == 0.0 && value.is_sign_negative() {
// Negative zero. Work around #20596.
dest.write_str("-0")?
} else {
write!(dest, "{}", value)?
}
if int_value.is_none() && value.fract() == 0. {
dest.write_str(".0")?;
}
Ok(())
}
impl<'a> ToCss for Token<'a> {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
match *self {
Token::Ident(ref value) => serialize_identifier(&**value, dest)?,
Token::AtKeyword(ref value) => {
dest.write_str("@")?;
serialize_identifier(&**value, dest)?;
},
Token::Hash(ref value) => {
dest.write_str("#")?;
serialize_name(value, dest)?;
},
Token::IDHash(ref value) => {
dest.write_str("#")?;
serialize_identifier(&**value, dest)?;
}
Token::QuotedString(ref value) => serialize_string(&**value, dest)?,
Token::UnquotedUrl(ref value) => {
dest.write_str("url(")?;
serialize_unquoted_url(&**value, dest)?;
dest.write_str(")")?;
},
Token::Delim(value) => write!(dest, "{}", value)?,
Token::Number { value, int_value, has_sign } => {
write_numeric(value, int_value, has_sign, dest)?
}
Token::Percentage { unit_value, int_value, has_sign } => {
write_numeric(unit_value * 100., int_value, has_sign, dest)?;
dest.write_str("%")?;
},
Token::Dimension { value, int_value, has_sign, ref unit } => {
write_numeric(value, int_value, has_sign, dest)?;
// Disambiguate with scientific notation.
let unit = &**unit;
if unit == "e" || unit == "E" || unit.starts_with("e-") || unit.starts_with("E-") {
dest.write_str("\\65 ")?;
serialize_name(&unit[1..], dest)?;
} else {
serialize_identifier(unit, dest)?;
}
},
Token::WhiteSpace(content) => dest.write_str(content)?,
Token::Comment(content) => write!(dest, "/*{}*/", content)?,
Token::Colon => dest.write_str(":")?,
Token::Semicolon => dest.write_str(";")?,
Token::Comma => dest.write_str(",")?,
Token::IncludeMatch => dest.write_str("~=")?,
Token::DashMatch => dest.write_str("|=")?,
Token::PrefixMatch => dest.write_str("^=")?,
Token::SuffixMatch => dest.write_str("$=")?,
Token::SubstringMatch => dest.write_str("*=")?,
Token::Column => dest.write_str("||")?,
Token::CDO => dest.write_str("<!--")?,
Token::CDC => dest.write_str("-->")?,
Token::Function(ref name) => {
serialize_identifier(&**name, dest)?;
dest.write_str("(")?;
},
Token::ParenthesisBlock => dest.write_str("(")?,
Token::SquareBracketBlock => dest.write_str("[")?,
Token::CurlyBracketBlock => dest.write_str("{")?,
Token::BadUrl(_) => dest.write_str("url(<bad url>)")?,
Token::BadString(_) => dest.write_str("\"<bad string>\n")?,
Token::CloseParenthesis => dest.write_str(")")?,
Token::CloseSquareBracket => dest.write_str("]")?,
Token::CloseCurlyBracket => dest.write_str("}")?,
}
Ok(())
}
}
/// Write a CSS identifier, escaping characters as necessary.
pub fn serialize_identifier<W>(mut value: &str, dest: &mut W) -> fmt::Result where W:fmt::Write {
if value.is_empty() {
return Ok(())
}
if value.starts_with("--") {
dest.write_str("--")?;
serialize_name(&value[2..], dest)
} else if value == "-" {
dest.write_str("\\-")
} else {
if value.as_bytes()[0] == b'-' {
dest.write_str("-")?;
value = &value[1..];
}
if let digit @ b'0'...b'9' = value.as_bytes()[0] {
write!(dest, "\\3{} ", digit as char)?;
value = &value[1..];
}
serialize_name(value, dest)
}
}
fn serialize_name<W>(value: &str, dest: &mut W) -> fmt::Result where W:fmt::Write {
let mut chunk_start = 0;
for (i, b) in value.bytes().enumerate() {
let escaped = match b {
b'0'...b'9' | b'A'...b'Z' | b'a'...b'z' | b'_' | b'-' => continue,
_ if !b.is_ascii() => continue,
b'\0' => Some("\u{FFFD}"),
_ => None,
};
dest.write_str(&value[chunk_start..i])?;
if let Some(escaped) = escaped {
dest.write_str(escaped)?;
} else if (b >= b'\x01' && b <= b'\x1F') || b == b'\x7F' {
write!(dest, "\\{:x} ", b)?;
} else {
write!(dest, "\\{}", b as char)?;
}
chunk_start = i + 1;
}
dest.write_str(&value[chunk_start..])
}
fn serialize_unquoted_url<W>(value: &str, dest: &mut W) -> fmt::Result where W:fmt::Write {
let mut chunk_start = 0;
for (i, b) in value.bytes().enumerate() {
let hex = match b {
b'\0' ... b' ' | b'\x7F' => true,
b'(' | b')' | b'"' | b'\'' | b'\\' => false,
_ => continue
};
dest.write_str(&value[chunk_start..i])?;
if hex {
write!(dest, "\\{:X} ", b)?;
} else {
write!(dest, "\\{}", b as char)?;
}
chunk_start = i + 1;
}
dest.write_str(&value[chunk_start..])
}
/// Write a double-quoted CSS string token, escaping content as necessary.
pub fn serialize_string<W>(value: &str, dest: &mut W) -> fmt::Result where W: fmt::Write {
dest.write_str("\"")?;
CssStringWriter::new(dest).write_str(value)?;
dest.write_str("\"")?;
Ok(())
}
/// A `fmt::Write` adapter that escapes text for writing as a double-quoted CSS string.
/// Quotes are not included.
///
/// Typical usage:
///
/// ```{rust,ignore}
/// fn write_foo<W>(foo: &Foo, dest: &mut W) -> fmt::Result where W: fmt::Write {
/// try!(dest.write_str("\""));
/// {
/// let mut string_dest = CssStringWriter::new(dest);
/// // Write into string_dest...
/// }
/// try!(dest.write_str("\""));
/// Ok(())
/// }
/// ```
pub struct CssStringWriter<'a, W: 'a> {
inner: &'a mut W,
}
impl<'a, W> CssStringWriter<'a, W> where W: fmt::Write {
/// Wrap a text writer to create a `CssStringWriter`.
pub fn new(inner: &'a mut W) -> CssStringWriter<'a, W> {
CssStringWriter { inner: inner }
}
}
impl<'a, W> fmt::Write for CssStringWriter<'a, W> where W: fmt::Write {
fn write_str(&mut self, s: &str) -> fmt::Result {
let mut chunk_start = 0;
for (i, b) in s.bytes().enumerate() {
let escaped = match b {
b'"' => Some("\\\""),
b'\\' => Some("\\\\"),
b'\0' => Some("\u{FFFD}"),
b'\x01'...b'\x1F' | b'\x7F' => None,
_ => continue,
};
self.inner.write_str(&s[chunk_start..i])?;
match escaped {
Some(x) => self.inner.write_str(x)?,
None => write!(self.inner, "\\{:x} ", b)?,
};
chunk_start = i + 1;
}
self.inner.write_str(&s[chunk_start..])
}
}
macro_rules! impl_tocss_for_number {
($T: ty) => {
impl<'a> ToCss for $T {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
write!(dest, "{}", *self)
}
}
}
}
impl_tocss_for_number!(f32);
impl_tocss_for_number!(f64);
impl_tocss_for_number!(i8);
impl_tocss_for_number!(u8);
impl_tocss_for_number!(i16);
impl_tocss_for_number!(u16);
impl_tocss_for_number!(i32);
impl_tocss_for_number!(u32);
impl_tocss_for_number!(i64);
impl_tocss_for_number!(u64);
/// A category of token. See the `needs_separator_when_before` method.
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
pub struct TokenSerializationType(TokenSerializationTypeVariants);
#[cfg(feature = "heapsize")]
known_heap_size!(0, TokenSerializationType);
impl TokenSerializationType {
/// Return a value that represents the absence of a token, e.g. before the start of the input.
pub fn nothing() -> TokenSerializationType {
TokenSerializationType(TokenSerializationTypeVariants::Nothing)
}
/// If this value is `TokenSerializationType::nothing()`, set it to the given value instead.
pub fn set_if_nothing(&mut self, new_value: TokenSerializationType) {
if self.0 == TokenSerializationTypeVariants::Nothing {
self.0 = new_value.0
}
}
/// Return true if, when a token of category `self` is serialized just before
/// a token of category `other` with no whitespace in between,
/// an empty comment `/**/` needs to be inserted between them
/// so that they are not re-parsed as a single token.
///
/// See https://drafts.csswg.org/css-syntax/#serialization
pub fn needs_separator_when_before(self, other: TokenSerializationType) -> bool {
use self::TokenSerializationTypeVariants::*;
match self.0 {
Ident => matches!(other.0,
Ident | Function | UrlOrBadUrl | DelimMinus | Number | Percentage | Dimension |
CDC | OpenParen),
AtKeywordOrHash | Dimension => matches!(other.0,
Ident | Function | UrlOrBadUrl | DelimMinus | Number | Percentage | Dimension |
CDC),
DelimHash | DelimMinus | Number => matches!(other.0,
Ident | Function | UrlOrBadUrl | DelimMinus | Number | Percentage | Dimension),
DelimAt => matches!(other.0,
Ident | Function | UrlOrBadUrl | DelimMinus),
DelimDotOrPlus => matches!(other.0, Number | Percentage | Dimension),
DelimAssorted | DelimAsterisk => matches!(other.0, DelimEquals),
DelimBar => matches!(other.0, DelimEquals | DelimBar | DashMatch),
DelimSlash => matches!(other.0, DelimAsterisk | SubstringMatch),
Nothing | WhiteSpace | Percentage | UrlOrBadUrl | Function | CDC | OpenParen |
DashMatch | SubstringMatch | DelimQuestion | DelimEquals | Other => false,
}
}
}
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
enum TokenSerializationTypeVariants {
Nothing,
WhiteSpace,
AtKeywordOrHash,
Number,
Dimension,
Percentage,
UrlOrBadUrl,
Function,
Ident,
CDC,
DashMatch,
SubstringMatch,
OpenParen, // '('
DelimHash, // '#'
DelimAt, // '@'
DelimDotOrPlus, // '.', '+'
DelimMinus, // '-'
DelimQuestion, // '?'
DelimAssorted, // '$', '^', '~'
DelimEquals, // '='
DelimBar, // '|'
DelimSlash, // '/'
DelimAsterisk, // '*'
Other, // anything else
}
impl<'a> Token<'a> {
/// Categorize a token into a type that determines when `/**/` needs to be inserted
/// between two tokens when serialized next to each other without whitespace in between.
///
/// See the `TokenSerializationType::needs_separator_when_before` method.
pub fn serialization_type(&self) -> TokenSerializationType {
use self::TokenSerializationTypeVariants::*;
TokenSerializationType(match *self {
Token::Ident(_) => Ident,
Token::AtKeyword(_) | Token::Hash(_) | Token::IDHash(_) => AtKeywordOrHash,
Token::UnquotedUrl(_) | Token::BadUrl(_) => UrlOrBadUrl,
Token::Delim('#') => DelimHash,
Token::Delim('@') => DelimAt,
Token::Delim('.') | Token::Delim('+') => DelimDotOrPlus,
Token::Delim('-') => DelimMinus,
Token::Delim('?') => DelimQuestion,
Token::Delim('$') | Token::Delim('^') | Token::Delim('~') => DelimAssorted,
Token::Delim('=') => DelimEquals,
Token::Delim('|') => DelimBar,
Token::Delim('/') => DelimSlash,
Token::Delim('*') => DelimAsterisk,
Token::Number { .. } => Number,
Token::Percentage { .. } => Percentage,
Token::Dimension { .. } => Dimension,
Token::WhiteSpace(_) => WhiteSpace,
Token::Comment(_) => DelimSlash,
Token::DashMatch => DashMatch,
Token::SubstringMatch => SubstringMatch,
Token::Column => DelimBar,
Token::CDC => CDC,
Token::Function(_) => Function,
Token::ParenthesisBlock => OpenParen,
Token::SquareBracketBlock | Token::CurlyBracketBlock |
Token::CloseParenthesis | Token::CloseSquareBracket | Token::CloseCurlyBracket |
Token::QuotedString(_) | Token::BadString(_) |
Token::Delim(_) | Token::Colon | Token::Semicolon | Token::Comma | Token::CDO |
Token::IncludeMatch | Token::PrefixMatch | Token::SuffixMatch
=> Other,
})
}
}

47
third_party/rust/cssparser-0.19.0/src/size_of_tests.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,47 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use cow_rc_str::CowRcStr;
use std::borrow::Cow;
use tokenizer::Token;
#[macro_export]
macro_rules! size_of_test {
($testname: ident, $t: ty, $expected_size: expr) => {
#[test]
fn $testname() {
let new = ::std::mem::size_of::<$t>();
let old = $expected_size;
if new < old {
panic!(
"Your changes have decreased the stack size of {} from {} to {}. \
Good work! Please update the expected size in {}.",
stringify!($t), old, new, file!()
)
} else if new > old {
panic!(
"Your changes have increased the stack size of {} from {} to {}. \
Please consider choosing a design which avoids this increase. \
If you feel that the increase is necessary, update the size in {}.",
stringify!($t), old, new, file!()
)
}
}
}
}
// Some of these assume 64-bit
size_of_test!(token, Token, 32);
size_of_test!(std_cow_str, Cow<'static, str>, 32);
size_of_test!(cow_rc_str, CowRcStr, 16);
size_of_test!(tokenizer, ::tokenizer::Tokenizer, 40);
size_of_test!(parser_input, ::parser::ParserInput, 112);
size_of_test!(parser, ::parser::Parser, 16);
size_of_test!(source_position, ::SourcePosition, 8);
size_of_test!(parser_state, ::ParserState, 24);
size_of_test!(basic_parse_error, ::BasicParseError, 40);
size_of_test!(parse_error_lower_bound, ::ParseError<()>, 48);
size_of_test!(precise_parse_error_lower_bound, ::PreciseParseError<()>, 72);

981
third_party/rust/cssparser-0.19.0/src/tests.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,981 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#[cfg(feature = "bench")]
extern crate test;
use encoding_rs;
use rustc_serialize::json::{self, Json, ToJson};
#[cfg(feature = "bench")]
use self::test::Bencher;
use super::{Parser, Delimiter, Token, SourceLocation, ParseError,
DeclarationListParser, DeclarationParser, RuleListParser, BasicParseError,
AtRuleType, AtRuleParser, QualifiedRuleParser, ParserInput,
parse_one_declaration, parse_one_rule, parse_important,
stylesheet_encoding, EncodingSupport,
TokenSerializationType, CowRcStr,
Color, RGBA, parse_nth, UnicodeRange, ToCss};
macro_rules! JArray {
($($e: expr,)*) => { JArray![ $( $e ),* ] };
($($e: expr),*) => { Json::Array(vec!( $( $e.to_json() ),* )) }
}
fn almost_equals(a: &Json, b: &Json) -> bool {
match (a, b) {
(&Json::I64(a), _) => almost_equals(&Json::F64(a as f64), b),
(&Json::U64(a), _) => almost_equals(&Json::F64(a as f64), b),
(_, &Json::I64(b)) => almost_equals(a, &Json::F64(b as f64)),
(_, &Json::U64(b)) => almost_equals(a, &Json::F64(b as f64)),
(&Json::F64(a), &Json::F64(b)) => (a - b).abs() < 1e-6,
(&Json::Boolean(a), &Json::Boolean(b)) => a == b,
(&Json::String(ref a), &Json::String(ref b)) => a == b,
(&Json::Array(ref a), &Json::Array(ref b)) => {
a.len() == b.len() &&
a.iter().zip(b.iter()).all(|(ref a, ref b)| almost_equals(*a, *b))
},
(&Json::Object(_), &Json::Object(_)) => panic!("Not implemented"),
(&Json::Null, &Json::Null) => true,
_ => false,
}
}
fn normalize(json: &mut Json) {
match *json {
Json::Array(ref mut list) => {
for item in list.iter_mut() {
normalize(item)
}
}
Json::String(ref mut s) => {
if *s == "extra-input" || *s == "empty" {
*s = "invalid".to_string()
}
}
_ => {}
}
}
fn assert_json_eq(results: json::Json, mut expected: json::Json, message: &str) {
normalize(&mut expected);
if !almost_equals(&results, &expected) {
println!("{}", ::difference::Changeset::new(
&results.pretty().to_string(),
&expected.pretty().to_string(),
"\n",
));
panic!("{}", message)
}
}
fn run_raw_json_tests<F: Fn(Json, Json) -> ()>(json_data: &str, run: F) {
let items = match Json::from_str(json_data) {
Ok(Json::Array(items)) => items,
_ => panic!("Invalid JSON")
};
assert!(items.len() % 2 == 0);
let mut input = None;
for item in items.into_iter() {
match (&input, item) {
(&None, json_obj) => input = Some(json_obj),
(&Some(_), expected) => {
let input = input.take().unwrap();
run(input, expected)
},
};
}
}
fn run_json_tests<F: Fn(&mut Parser) -> Json>(json_data: &str, parse: F) {
run_raw_json_tests(json_data, |input, expected| {
match input {
Json::String(input) => {
let mut parse_input = ParserInput::new(&input);
let result = parse(&mut Parser::new(&mut parse_input));
assert_json_eq(result, expected, &input);
},
_ => panic!("Unexpected JSON")
}
});
}
#[test]
fn component_value_list() {
run_json_tests(include_str!("css-parsing-tests/component_value_list.json"), |input| {
Json::Array(component_values_to_json(input))
});
}
#[test]
fn one_component_value() {
run_json_tests(include_str!("css-parsing-tests/one_component_value.json"), |input| {
let result: Result<Json, ParseError<()>> = input.parse_entirely(|input| {
Ok(one_component_value_to_json(input.next()?.clone(), input))
});
result.unwrap_or(JArray!["error", "invalid"])
});
}
#[test]
fn declaration_list() {
run_json_tests(include_str!("css-parsing-tests/declaration_list.json"), |input| {
Json::Array(DeclarationListParser::new(input, JsonParser).map(|result| {
result.unwrap_or(JArray!["error", "invalid"])
}).collect())
});
}
#[test]
fn one_declaration() {
run_json_tests(include_str!("css-parsing-tests/one_declaration.json"), |input| {
parse_one_declaration(input, &mut JsonParser).unwrap_or(JArray!["error", "invalid"])
});
}
#[test]
fn rule_list() {
run_json_tests(include_str!("css-parsing-tests/rule_list.json"), |input| {
Json::Array(RuleListParser::new_for_nested_rule(input, JsonParser).map(|result| {
result.unwrap_or(JArray!["error", "invalid"])
}).collect())
});
}
#[test]
fn stylesheet() {
run_json_tests(include_str!("css-parsing-tests/stylesheet.json"), |input| {
Json::Array(RuleListParser::new_for_stylesheet(input, JsonParser).map(|result| {
result.unwrap_or(JArray!["error", "invalid"])
}).collect())
});
}
#[test]
fn one_rule() {
run_json_tests(include_str!("css-parsing-tests/one_rule.json"), |input| {
parse_one_rule(input, &mut JsonParser).unwrap_or(JArray!["error", "invalid"])
});
}
#[test]
fn stylesheet_from_bytes() {
pub struct EncodingRs;
impl EncodingSupport for EncodingRs {
type Encoding = &'static encoding_rs::Encoding;
fn utf8() -> Self::Encoding {
encoding_rs::UTF_8
}
fn is_utf16_be_or_le(encoding: &Self::Encoding) -> bool {
*encoding == encoding_rs::UTF_16LE ||
*encoding == encoding_rs::UTF_16BE
}
fn from_label(ascii_label: &[u8]) -> Option<Self::Encoding> {
encoding_rs::Encoding::for_label(ascii_label)
}
}
run_raw_json_tests(include_str!("css-parsing-tests/stylesheet_bytes.json"),
|input, expected| {
let map = match input {
Json::Object(map) => map,
_ => panic!("Unexpected JSON")
};
let result = {
let css = get_string(&map, "css_bytes").unwrap().chars().map(|c| {
assert!(c as u32 <= 0xFF);
c as u8
}).collect::<Vec<u8>>();
let protocol_encoding_label = get_string(&map, "protocol_encoding")
.map(|s| s.as_bytes());
let environment_encoding = get_string(&map, "environment_encoding")
.map(|s| s.as_bytes())
.and_then(EncodingRs::from_label);
let encoding = stylesheet_encoding::<EncodingRs>(
&css, protocol_encoding_label, environment_encoding);
let (css_unicode, used_encoding, _) = encoding.decode(&css);
let mut input = ParserInput::new(&css_unicode);
let input = &mut Parser::new(&mut input);
let rules = RuleListParser::new_for_stylesheet(input, JsonParser)
.map(|result| result.unwrap_or(JArray!["error", "invalid"]))
.collect::<Vec<_>>();
JArray![rules, used_encoding.name().to_lowercase()]
};
assert_json_eq(result, expected, &Json::Object(map).to_string());
});
fn get_string<'a>(map: &'a json::Object, key: &str) -> Option<&'a str> {
match map.get(key) {
Some(&Json::String(ref s)) => Some(s),
Some(&Json::Null) => None,
None => None,
_ => panic!("Unexpected JSON"),
}
}
}
#[test]
fn expect_no_error_token() {
let mut input = ParserInput::new("foo 4px ( / { !bar }");
assert!(Parser::new(&mut input).expect_no_error_token().is_ok());
let mut input = ParserInput::new(")");
assert!(Parser::new(&mut input).expect_no_error_token().is_err());
let mut input = ParserInput::new("}");
assert!(Parser::new(&mut input).expect_no_error_token().is_err());
let mut input = ParserInput::new("(a){]");
assert!(Parser::new(&mut input).expect_no_error_token().is_err());
let mut input = ParserInput::new("'\n'");
assert!(Parser::new(&mut input).expect_no_error_token().is_err());
let mut input = ParserInput::new("url('\n'");
assert!(Parser::new(&mut input).expect_no_error_token().is_err());
let mut input = ParserInput::new("url(a b)");
assert!(Parser::new(&mut input).expect_no_error_token().is_err());
let mut input = ParserInput::new("url(\u{7F}))");
assert!(Parser::new(&mut input).expect_no_error_token().is_err());
}
/// https://github.com/servo/rust-cssparser/issues/71
#[test]
fn outer_block_end_consumed() {
let mut input = ParserInput::new("(calc(true))");
let mut input = Parser::new(&mut input);
assert!(input.expect_parenthesis_block().is_ok());
assert!(input.parse_nested_block(|input| {
let result: Result<_, ParseError<()>> = input.expect_function_matching("calc")
.map_err(|e| ParseError::Basic(e));
result
}).is_ok());
println!("{:?}", input.position());
assert!(input.next().is_err());
}
/// https://github.com/servo/rust-cssparser/issues/174
#[test]
fn bad_url_slice_out_of_bounds() {
let mut input = ParserInput::new("url(\u{1}\\");
let mut parser = Parser::new(&mut input);
let result = parser.next_including_whitespace_and_comments(); // This used to panic
assert_eq!(result, Ok(&Token::BadUrl("\u{1}\\".into())));
}
/// https://bugzilla.mozilla.org/show_bug.cgi?id=1383975
#[test]
fn bad_url_slice_not_at_char_boundary() {
let mut input = ParserInput::new("url(9\n۰");
let mut parser = Parser::new(&mut input);
let result = parser.next_including_whitespace_and_comments(); // This used to panic
assert_eq!(result, Ok(&Token::BadUrl("9\n۰".into())));
}
#[test]
fn unquoted_url_escaping() {
let token = Token::UnquotedUrl("\
\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\
\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f \
!\"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]\
^_`abcdefghijklmnopqrstuvwxyz{|}~\x7fé\
".into());
let serialized = token.to_css_string();
assert_eq!(serialized, "\
url(\
\\1 \\2 \\3 \\4 \\5 \\6 \\7 \\8 \\9 \\A \\B \\C \\D \\E \\F \\10 \
\\11 \\12 \\13 \\14 \\15 \\16 \\17 \\18 \\19 \\1A \\1B \\1C \\1D \\1E \\1F \\20 \
!\\\"#$%&\\'\\(\\)*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]\
^_`abcdefghijklmnopqrstuvwxyz{|}~\\7F é\
)\
");
let mut input = ParserInput::new(&serialized);
assert_eq!(Parser::new(&mut input).next(), Ok(&token));
}
#[test]
fn test_expect_url() {
fn parse<'a>(s: &mut ParserInput<'a>) -> Result<CowRcStr<'a>, BasicParseError<'a>> {
Parser::new(s).expect_url()
}
let mut input = ParserInput::new("url()");
assert_eq!(parse(&mut input).unwrap(), "");
let mut input = ParserInput::new("url( ");
assert_eq!(parse(&mut input).unwrap(), "");
let mut input = ParserInput::new("url( abc");
assert_eq!(parse(&mut input).unwrap(), "abc");
let mut input = ParserInput::new("url( abc \t)");
assert_eq!(parse(&mut input).unwrap(), "abc");
let mut input = ParserInput::new("url( 'abc' \t)");
assert_eq!(parse(&mut input).unwrap(), "abc");
let mut input = ParserInput::new("url(abc more stuff)");
assert!(parse(&mut input).is_err());
// The grammar at https://drafts.csswg.org/css-values/#urls plans for `<url-modifier>*`
// at the position of "more stuff", but no such modifier is defined yet.
let mut input = ParserInput::new("url('abc' more stuff)");
assert!(parse(&mut input).is_err());
}
fn run_color_tests<F: Fn(Result<Color, ()>) -> Json>(json_data: &str, to_json: F) {
run_json_tests(json_data, |input| {
let result: Result<_, ParseError<()>> = input.parse_entirely(|i| {
Color::parse(i).map_err(|e| ParseError::Basic(e))
});
to_json(result.map_err(|_| ()))
});
}
#[test]
fn color3() {
run_color_tests(include_str!("css-parsing-tests/color3.json"), |c| c.ok().to_json())
}
#[test]
fn color3_hsl() {
run_color_tests(include_str!("css-parsing-tests/color3_hsl.json"), |c| c.ok().to_json())
}
/// color3_keywords.json is different: R, G and B are in 0..255 rather than 0..1
#[test]
fn color3_keywords() {
run_color_tests(include_str!("css-parsing-tests/color3_keywords.json"), |c| c.ok().to_json())
}
#[test]
fn nth() {
run_json_tests(include_str!("css-parsing-tests/An+B.json"), |input| {
input.parse_entirely(|i| {
let result: Result<_, ParseError<()>> = parse_nth(i).map_err(|e| ParseError::Basic(e));
result
}).ok().to_json()
});
}
#[test]
fn unicode_range() {
run_json_tests(include_str!("css-parsing-tests/urange.json"), |input| {
let result: Result<_, ParseError<()>> = input.parse_comma_separated(|input| {
let result = UnicodeRange::parse(input).ok().map(|r| (r.start, r.end));
if input.is_exhausted() {
Ok(result)
} else {
while let Ok(_) = input.next() {}
Ok(None)
}
});
result.unwrap().to_json()
});
}
#[test]
fn serializer_not_preserving_comments() {
serializer(false)
}
#[test]
fn serializer_preserving_comments() {
serializer(true)
}
fn serializer(preserve_comments: bool) {
run_json_tests(include_str!("css-parsing-tests/component_value_list.json"), |input| {
fn write_to(mut previous_token: TokenSerializationType,
input: &mut Parser,
string: &mut String,
preserve_comments: bool) {
while let Ok(token) = if preserve_comments {
input.next_including_whitespace_and_comments().map(|t| t.clone())
} else {
input.next_including_whitespace().map(|t| t.clone())
} {
let token_type = token.serialization_type();
if !preserve_comments && previous_token.needs_separator_when_before(token_type) {
string.push_str("/**/")
}
previous_token = token_type;
token.to_css(string).unwrap();
let closing_token = match token {
Token::Function(_) | Token::ParenthesisBlock => Some(Token::CloseParenthesis),
Token::SquareBracketBlock => Some(Token::CloseSquareBracket),
Token::CurlyBracketBlock => Some(Token::CloseCurlyBracket),
_ => None
};
if let Some(closing_token) = closing_token {
let result: Result<_, ParseError<()>> = input.parse_nested_block(|input| {
write_to(previous_token, input, string, preserve_comments);
Ok(())
});
result.unwrap();
closing_token.to_css(string).unwrap();
}
}
}
let mut serialized = String::new();
write_to(TokenSerializationType::nothing(), input, &mut serialized, preserve_comments);
let mut input = ParserInput::new(&serialized);
let parser = &mut Parser::new(&mut input);
Json::Array(component_values_to_json(parser))
});
}
#[test]
fn serialize_current_color() {
let c = Color::CurrentColor;
assert!(c.to_css_string() == "currentcolor");
}
#[test]
fn serialize_rgb_full_alpha() {
let c = Color::RGBA(RGBA::new(255, 230, 204, 255));
assert_eq!(c.to_css_string(), "rgb(255, 230, 204)");
}
#[test]
fn serialize_rgba() {
let c = Color::RGBA(RGBA::new(26, 51, 77, 32));
assert_eq!(c.to_css_string(), "rgba(26, 51, 77, 0.125)");
}
#[test]
fn serialize_rgba_two_digit_float_if_roundtrips() {
let c = Color::RGBA(RGBA::from_floats(0., 0., 0., 0.5));
assert_eq!(c.to_css_string(), "rgba(0, 0, 0, 0.5)");
}
#[test]
fn line_numbers() {
let mut input = ParserInput::new(concat!(
"fo\\30\r\n",
"0o bar/*\n",
"*/baz\r\n",
"\n",
"url(\r\n",
" u \r\n",
")\"a\\\r\n",
"b\""
));
let mut input = Parser::new(&mut input);
assert_eq!(input.current_source_location(), SourceLocation { line: 0, column: 0 });
assert_eq!(input.next_including_whitespace(), Ok(&Token::Ident("fo00o".into())));
assert_eq!(input.current_source_location(), SourceLocation { line: 1, column: 2 });
assert_eq!(input.next_including_whitespace(), Ok(&Token::WhiteSpace(" ")));
assert_eq!(input.current_source_location(), SourceLocation { line: 1, column: 3 });
assert_eq!(input.next_including_whitespace(), Ok(&Token::Ident("bar".into())));
assert_eq!(input.current_source_location(), SourceLocation { line: 1, column: 6 });
assert_eq!(input.next_including_whitespace_and_comments(), Ok(&Token::Comment("\n")));
assert_eq!(input.current_source_location(), SourceLocation { line: 2, column: 2 });
assert_eq!(input.next_including_whitespace(), Ok(&Token::Ident("baz".into())));
assert_eq!(input.current_source_location(), SourceLocation { line: 2, column: 5 });
let state = input.state();
assert_eq!(input.next_including_whitespace(), Ok(&Token::WhiteSpace("\r\n\n")));
assert_eq!(input.current_source_location(), SourceLocation { line: 4, column: 0 });
assert_eq!(state.source_location(), SourceLocation { line: 2, column: 5 });
assert_eq!(input.next_including_whitespace(), Ok(&Token::UnquotedUrl("u".into())));
assert_eq!(input.current_source_location(), SourceLocation { line: 6, column: 1 });
assert_eq!(input.next_including_whitespace(), Ok(&Token::QuotedString("ab".into())));
assert_eq!(input.current_source_location(), SourceLocation { line: 7, column: 2 });
assert!(input.next_including_whitespace().is_err());
}
#[test]
fn overflow() {
use std::iter::repeat;
use std::f32;
let css = r"
2147483646
2147483647
2147483648
10000000000000
1000000000000000000000000000000000000000
1{309 zeros}
-2147483647
-2147483648
-2147483649
-10000000000000
-1000000000000000000000000000000000000000
-1{309 zeros}
3.30282347e+38
3.40282347e+38
3.402824e+38
-3.30282347e+38
-3.40282347e+38
-3.402824e+38
".replace("{309 zeros}", &repeat('0').take(309).collect::<String>());
let mut input = ParserInput::new(&css);
let mut input = Parser::new(&mut input);
assert_eq!(input.expect_integer(), Ok(2147483646));
assert_eq!(input.expect_integer(), Ok(2147483647));
assert_eq!(input.expect_integer(), Ok(2147483647)); // Clamp on overflow
assert_eq!(input.expect_integer(), Ok(2147483647));
assert_eq!(input.expect_integer(), Ok(2147483647));
assert_eq!(input.expect_integer(), Ok(2147483647));
assert_eq!(input.expect_integer(), Ok(-2147483647));
assert_eq!(input.expect_integer(), Ok(-2147483648));
assert_eq!(input.expect_integer(), Ok(-2147483648)); // Clamp on overflow
assert_eq!(input.expect_integer(), Ok(-2147483648));
assert_eq!(input.expect_integer(), Ok(-2147483648));
assert_eq!(input.expect_integer(), Ok(-2147483648));
assert_eq!(input.expect_number(), Ok(3.30282347e+38));
assert_eq!(input.expect_number(), Ok(f32::MAX));
assert_eq!(input.expect_number(), Ok(f32::INFINITY));
assert!(f32::MAX != f32::INFINITY);
assert_eq!(input.expect_number(), Ok(-3.30282347e+38));
assert_eq!(input.expect_number(), Ok(f32::MIN));
assert_eq!(input.expect_number(), Ok(f32::NEG_INFINITY));
assert!(f32::MIN != f32::NEG_INFINITY);
}
#[test]
fn line_delimited() {
let mut input = ParserInput::new(" { foo ; bar } baz;,");
let mut input = Parser::new(&mut input);
assert_eq!(input.next(), Ok(&Token::CurlyBracketBlock));
assert!({
let result: Result<_, ParseError<()>> = input.parse_until_after(Delimiter::Semicolon, |_| Ok(42));
result
}.is_err());
assert_eq!(input.next(), Ok(&Token::Comma));
assert!(input.next().is_err());
}
#[test]
fn identifier_serialization() {
// Null bytes
assert_eq!(Token::Ident("\0".into()).to_css_string(), "\u{FFFD}");
assert_eq!(Token::Ident("a\0".into()).to_css_string(), "a\u{FFFD}");
assert_eq!(Token::Ident("\0b".into()).to_css_string(), "\u{FFFD}b");
assert_eq!(Token::Ident("a\0b".into()).to_css_string(), "a\u{FFFD}b");
// Replacement character
assert_eq!(Token::Ident("\u{FFFD}".into()).to_css_string(), "\u{FFFD}");
assert_eq!(Token::Ident("a\u{FFFD}".into()).to_css_string(), "a\u{FFFD}");
assert_eq!(Token::Ident("\u{FFFD}b".into()).to_css_string(), "\u{FFFD}b");
assert_eq!(Token::Ident("a\u{FFFD}b".into()).to_css_string(), "a\u{FFFD}b");
// Number prefix
assert_eq!(Token::Ident("0a".into()).to_css_string(), "\\30 a");
assert_eq!(Token::Ident("1a".into()).to_css_string(), "\\31 a");
assert_eq!(Token::Ident("2a".into()).to_css_string(), "\\32 a");
assert_eq!(Token::Ident("3a".into()).to_css_string(), "\\33 a");
assert_eq!(Token::Ident("4a".into()).to_css_string(), "\\34 a");
assert_eq!(Token::Ident("5a".into()).to_css_string(), "\\35 a");
assert_eq!(Token::Ident("6a".into()).to_css_string(), "\\36 a");
assert_eq!(Token::Ident("7a".into()).to_css_string(), "\\37 a");
assert_eq!(Token::Ident("8a".into()).to_css_string(), "\\38 a");
assert_eq!(Token::Ident("9a".into()).to_css_string(), "\\39 a");
// Letter number prefix
assert_eq!(Token::Ident("a0b".into()).to_css_string(), "a0b");
assert_eq!(Token::Ident("a1b".into()).to_css_string(), "a1b");
assert_eq!(Token::Ident("a2b".into()).to_css_string(), "a2b");
assert_eq!(Token::Ident("a3b".into()).to_css_string(), "a3b");
assert_eq!(Token::Ident("a4b".into()).to_css_string(), "a4b");
assert_eq!(Token::Ident("a5b".into()).to_css_string(), "a5b");
assert_eq!(Token::Ident("a6b".into()).to_css_string(), "a6b");
assert_eq!(Token::Ident("a7b".into()).to_css_string(), "a7b");
assert_eq!(Token::Ident("a8b".into()).to_css_string(), "a8b");
assert_eq!(Token::Ident("a9b".into()).to_css_string(), "a9b");
// Dash number prefix
assert_eq!(Token::Ident("-0a".into()).to_css_string(), "-\\30 a");
assert_eq!(Token::Ident("-1a".into()).to_css_string(), "-\\31 a");
assert_eq!(Token::Ident("-2a".into()).to_css_string(), "-\\32 a");
assert_eq!(Token::Ident("-3a".into()).to_css_string(), "-\\33 a");
assert_eq!(Token::Ident("-4a".into()).to_css_string(), "-\\34 a");
assert_eq!(Token::Ident("-5a".into()).to_css_string(), "-\\35 a");
assert_eq!(Token::Ident("-6a".into()).to_css_string(), "-\\36 a");
assert_eq!(Token::Ident("-7a".into()).to_css_string(), "-\\37 a");
assert_eq!(Token::Ident("-8a".into()).to_css_string(), "-\\38 a");
assert_eq!(Token::Ident("-9a".into()).to_css_string(), "-\\39 a");
// Double dash prefix
assert_eq!(Token::Ident("--a".into()).to_css_string(), "--a");
// Various tests
assert_eq!(Token::Ident("\x01\x02\x1E\x1F".into()).to_css_string(), "\\1 \\2 \\1e \\1f ");
assert_eq!(Token::Ident("\u{0080}\x2D\x5F\u{00A9}".into()).to_css_string(), "\u{0080}\x2D\x5F\u{00A9}");
assert_eq!(Token::Ident("\x7F\u{0080}\u{0081}\u{0082}\u{0083}\u{0084}\u{0085}\u{0086}\u{0087}\u{0088}\u{0089}\
\u{008A}\u{008B}\u{008C}\u{008D}\u{008E}\u{008F}\u{0090}\u{0091}\u{0092}\u{0093}\u{0094}\u{0095}\u{0096}\
\u{0097}\u{0098}\u{0099}\u{009A}\u{009B}\u{009C}\u{009D}\u{009E}\u{009F}".into()).to_css_string(),
"\\7f \u{0080}\u{0081}\u{0082}\u{0083}\u{0084}\u{0085}\u{0086}\u{0087}\u{0088}\u{0089}\u{008A}\u{008B}\u{008C}\
\u{008D}\u{008E}\u{008F}\u{0090}\u{0091}\u{0092}\u{0093}\u{0094}\u{0095}\u{0096}\u{0097}\u{0098}\u{0099}\
\u{009A}\u{009B}\u{009C}\u{009D}\u{009E}\u{009F}");
assert_eq!(Token::Ident("\u{00A0}\u{00A1}\u{00A2}".into()).to_css_string(), "\u{00A0}\u{00A1}\u{00A2}");
assert_eq!(Token::Ident("a0123456789b".into()).to_css_string(), "a0123456789b");
assert_eq!(Token::Ident("abcdefghijklmnopqrstuvwxyz".into()).to_css_string(), "abcdefghijklmnopqrstuvwxyz");
assert_eq!(Token::Ident("ABCDEFGHIJKLMNOPQRSTUVWXYZ".into()).to_css_string(), "ABCDEFGHIJKLMNOPQRSTUVWXYZ");
assert_eq!(Token::Ident("\x20\x21\x78\x79".into()).to_css_string(), "\\ \\!xy");
// astral symbol (U+1D306 TETRAGRAM FOR CENTRE)
assert_eq!(Token::Ident("\u{1D306}".into()).to_css_string(), "\u{1D306}");
}
impl ToJson for Color {
fn to_json(&self) -> json::Json {
match *self {
Color::RGBA(ref rgba) => {
[rgba.red, rgba.green, rgba.blue, rgba.alpha].to_json()
},
Color::CurrentColor => "currentcolor".to_json(),
}
}
}
#[cfg(feature = "bench")]
const BACKGROUND_IMAGE: &'static str = include_str!("big-data-url.css");
#[cfg(feature = "bench")]
#[bench]
fn unquoted_url(b: &mut Bencher) {
b.iter(|| {
let mut input = ParserInput::new(BACKGROUND_IMAGE);
let mut input = Parser::new(&mut input);
input.look_for_var_functions();
let result = input.try(|input| input.expect_url());
assert!(result.is_ok());
input.seen_var_functions();
(result.is_ok(), input.seen_var_functions())
})
}
#[cfg(feature = "bench")]
#[bench]
fn numeric(b: &mut Bencher) {
b.iter(|| {
for _ in 0..1000000 {
let mut input = ParserInput::new("10px");
let mut input = Parser::new(&mut input);
let _ = test::black_box(input.next());
}
})
}
struct JsonParser;
#[test]
fn no_stack_overflow_multiple_nested_blocks() {
let mut input: String = "{{".into();
for _ in 0..20 {
let dup = input.clone();
input.push_str(&dup);
}
let mut input = ParserInput::new(&input);
let mut input = Parser::new(&mut input);
while let Ok(..) = input.next() { }
}
impl<'i> DeclarationParser<'i> for JsonParser {
type Declaration = Json;
type Error = ();
fn parse_value<'t>(&mut self, name: CowRcStr<'i>, input: &mut Parser<'i, 't>)
-> Result<Json, ParseError<'i, ()>> {
let mut value = vec![];
let mut important = false;
loop {
let start = input.state();
if let Ok(mut token) = input.next_including_whitespace().map(|t| t.clone()) {
// Hack to deal with css-parsing-tests assuming that
// `!important` in the middle of a declaration value is OK.
// This can never happen per spec
// (even CSS Variables forbid top-level `!`)
if token == Token::Delim('!') {
input.reset(&start);
if parse_important(input).is_ok() {
if input.is_exhausted() {
important = true;
break
}
}
input.reset(&start);
token = input.next_including_whitespace().unwrap().clone();
}
value.push(one_component_value_to_json(token, input));
} else {
break
}
}
Ok(JArray![
"declaration",
name,
value,
important,
])
}
}
impl<'i> AtRuleParser<'i> for JsonParser {
type Prelude = Vec<Json>;
type AtRule = Json;
type Error = ();
fn parse_prelude<'t>(&mut self, name: CowRcStr<'i>, input: &mut Parser<'i, 't>)
-> Result<AtRuleType<Vec<Json>, Json>, ParseError<'i, ()>> {
Ok(AtRuleType::OptionalBlock(vec![
"at-rule".to_json(),
name.to_json(),
Json::Array(component_values_to_json(input)),
]))
}
fn parse_block<'t>(&mut self, mut prelude: Vec<Json>, input: &mut Parser<'i, 't>)
-> Result<Json, ParseError<'i, ()>> {
prelude.push(Json::Array(component_values_to_json(input)));
Ok(Json::Array(prelude))
}
fn rule_without_block(&mut self, mut prelude: Vec<Json>) -> Json {
prelude.push(Json::Null);
Json::Array(prelude)
}
}
impl<'i> QualifiedRuleParser<'i> for JsonParser {
type Prelude = Vec<Json>;
type QualifiedRule = Json;
type Error = ();
fn parse_prelude<'t>(&mut self, input: &mut Parser<'i, 't>) -> Result<Vec<Json>, ParseError<'i, ()>> {
Ok(component_values_to_json(input))
}
fn parse_block<'t>(&mut self, prelude: Vec<Json>, input: &mut Parser<'i, 't>)
-> Result<Json, ParseError<'i, ()>> {
Ok(JArray![
"qualified rule",
prelude,
component_values_to_json(input),
])
}
}
fn component_values_to_json(input: &mut Parser) -> Vec<Json> {
let mut values = vec![];
while let Ok(token) = input.next_including_whitespace().map(|t| t.clone()) {
values.push(one_component_value_to_json(token, input));
}
values
}
fn one_component_value_to_json(token: Token, input: &mut Parser) -> Json {
fn numeric(value: f32, int_value: Option<i32>, has_sign: bool) -> Vec<json::Json> {
vec![
Token::Number {
value: value,
int_value: int_value,
has_sign: has_sign,
}.to_css_string().to_json(),
match int_value { Some(i) => i.to_json(), None => value.to_json() },
match int_value { Some(_) => "integer", None => "number" }.to_json()
]
}
fn nested(input: &mut Parser) -> Vec<Json> {
let result: Result<_, ParseError<()>> = input.parse_nested_block(|input| {
Ok(component_values_to_json(input))
});
result.unwrap()
}
match token {
Token::Ident(value) => JArray!["ident", value],
Token::AtKeyword(value) => JArray!["at-keyword", value],
Token::Hash(value) => JArray!["hash", value, "unrestricted"],
Token::IDHash(value) => JArray!["hash", value, "id"],
Token::QuotedString(value) => JArray!["string", value],
Token::UnquotedUrl(value) => JArray!["url", value],
Token::Delim('\\') => "\\".to_json(),
Token::Delim(value) => value.to_string().to_json(),
Token::Number { value, int_value, has_sign } => Json::Array({
let mut v = vec!["number".to_json()];
v.extend(numeric(value, int_value, has_sign));
v
}),
Token::Percentage { unit_value, int_value, has_sign } => Json::Array({
let mut v = vec!["percentage".to_json()];
v.extend(numeric(unit_value * 100., int_value, has_sign));
v
}),
Token::Dimension { value, int_value, has_sign, unit } => Json::Array({
let mut v = vec!["dimension".to_json()];
v.extend(numeric(value, int_value, has_sign));
v.push(unit.to_json());
v
}),
Token::WhiteSpace(_) => " ".to_json(),
Token::Comment(_) => "/**/".to_json(),
Token::Colon => ":".to_json(),
Token::Semicolon => ";".to_json(),
Token::Comma => ",".to_json(),
Token::IncludeMatch => "~=".to_json(),
Token::DashMatch => "|=".to_json(),
Token::PrefixMatch => "^=".to_json(),
Token::SuffixMatch => "$=".to_json(),
Token::SubstringMatch => "*=".to_json(),
Token::Column => "||".to_json(),
Token::CDO => "<!--".to_json(),
Token::CDC => "-->".to_json(),
Token::Function(name) => Json::Array({
let mut v = vec!["function".to_json(), name.to_json()];
v.extend(nested(input));
v
}),
Token::ParenthesisBlock => Json::Array({
let mut v = vec!["()".to_json()];
v.extend(nested(input));
v
}),
Token::SquareBracketBlock => Json::Array({
let mut v = vec!["[]".to_json()];
v.extend(nested(input));
v
}),
Token::CurlyBracketBlock => Json::Array({
let mut v = vec!["{}".to_json()];
v.extend(nested(input));
v
}),
Token::BadUrl(_) => JArray!["error", "bad-url"],
Token::BadString(_) => JArray!["error", "bad-string"],
Token::CloseParenthesis => JArray!["error", ")"],
Token::CloseSquareBracket => JArray!["error", "]"],
Token::CloseCurlyBracket => JArray!["error", "}"],
}
}
/// A previous version of procedural-masquerade had a bug where it
/// would normalize consecutive whitespace to a single space,
/// including in string literals.
#[test]
fn procedural_masquerade_whitespace() {
ascii_case_insensitive_phf_map! {
map -> () = {
" \t\n" => ()
}
}
assert_eq!(map(" \t\n"), Some(&()));
assert_eq!(map(" "), None);
match_ignore_ascii_case! { " \t\n",
" " => panic!("1"),
" \t\n" => {},
_ => panic!("2"),
}
match_ignore_ascii_case! { " ",
" \t\n" => panic!("3"),
" " => {},
_ => panic!("4"),
}
}
#[test]
fn parse_until_before_stops_at_delimiter_or_end_of_input() {
// For all j and k, inputs[i].1[j] should parse the same as inputs[i].1[k]
// when we use delimiters inputs[i].0.
let inputs = vec![
(Delimiter::Bang | Delimiter::Semicolon,
// Note that the ';extra' is fine, because the ';' acts the same as
// the end of input.
vec!["token stream;extra", "token stream!", "token stream"]),
(Delimiter::Bang | Delimiter::Semicolon,
vec![";", "!", ""]),
];
for equivalent in inputs {
for (j, x) in equivalent.1.iter().enumerate() {
for y in equivalent.1[j + 1..].iter() {
let mut ix = ParserInput::new(x);
let mut ix = Parser::new(&mut ix);
let mut iy = ParserInput::new(y);
let mut iy = Parser::new(&mut iy);
let _ = ix.parse_until_before::<_, _, ()>(equivalent.0, |ix| {
iy.parse_until_before::<_, _, ()>(equivalent.0, |iy| {
loop {
let ox = ix.next();
let oy = iy.next();
assert_eq!(ox, oy);
if let Err(_) = ox {
break
}
}
Ok(())
})
});
}
}
}
}
#[test]
fn parser_maintains_current_line() {
let mut input = ParserInput::new("ident ident;\nident ident ident;\nident");
let mut parser = Parser::new(&mut input);
assert_eq!(parser.current_line(), "ident ident;");
assert_eq!(parser.next(), Ok(&Token::Ident("ident".into())));
assert_eq!(parser.next(), Ok(&Token::Ident("ident".into())));
assert_eq!(parser.next(), Ok(&Token::Semicolon));
assert_eq!(parser.next(), Ok(&Token::Ident("ident".into())));
assert_eq!(parser.current_line(), "ident ident ident;");
assert_eq!(parser.next(), Ok(&Token::Ident("ident".into())));
assert_eq!(parser.next(), Ok(&Token::Ident("ident".into())));
assert_eq!(parser.next(), Ok(&Token::Semicolon));
assert_eq!(parser.next(), Ok(&Token::Ident("ident".into())));
assert_eq!(parser.current_line(), "ident");
}
#[test]
fn parse_entirely_reports_first_error() {
#[derive(PartialEq, Debug)]
enum E { Foo }
let mut input = ParserInput::new("ident");
let mut parser = Parser::new(&mut input);
let result: Result<(), _> = parser.parse_entirely(|_| Err(ParseError::Custom(E::Foo)));
assert_eq!(result, Err(ParseError::Custom(E::Foo)));
}

1221
third_party/rust/cssparser-0.19.0/src/tokenizer.rs поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

197
third_party/rust/cssparser-0.19.0/src/unicode_range.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,197 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! https://drafts.csswg.org/css-syntax/#urange
use {Parser, ToCss, BasicParseError};
use std::char;
use std::cmp;
use std::fmt;
use tokenizer::Token;
/// One contiguous range of code points.
///
/// Can not be empty. Can represent a single code point when start == end.
#[derive(PartialEq, Eq, Clone, Hash)]
pub struct UnicodeRange {
/// Inclusive start of the range. In [0, end].
pub start: u32,
/// Inclusive end of the range. In [0, 0x10FFFF].
pub end: u32,
}
impl UnicodeRange {
/// https://drafts.csswg.org/css-syntax/#urange-syntax
pub fn parse<'i, 't>(input: &mut Parser<'i, 't>) -> Result<Self, BasicParseError<'i>> {
// <urange> =
// u '+' <ident-token> '?'* |
// u <dimension-token> '?'* |
// u <number-token> '?'* |
// u <number-token> <dimension-token> |
// u <number-token> <number-token> |
// u '+' '?'+
input.expect_ident_matching("u")?;
let after_u = input.position();
parse_tokens(input)?;
// This deviates from the spec in case there are CSS comments
// between tokens in the middle of one <unicode-range>,
// but oh well…
let concatenated_tokens = input.slice_from(after_u);
let range = match parse_concatenated(concatenated_tokens.as_bytes()) {
Ok(range) => range,
Err(()) => return Err(BasicParseError::UnexpectedToken(Token::Ident(concatenated_tokens.into()))),
};
if range.end > char::MAX as u32 || range.start > range.end {
Err(BasicParseError::UnexpectedToken(Token::Ident(concatenated_tokens.into())))
} else {
Ok(range)
}
}
}
fn parse_tokens<'i, 't>(input: &mut Parser<'i, 't>) -> Result<(), BasicParseError<'i>> {
match input.next_including_whitespace()?.clone() {
Token::Delim('+') => {
match *input.next_including_whitespace()? {
Token::Ident(_) => {}
Token::Delim('?') => {}
ref t => return Err(BasicParseError::UnexpectedToken(t.clone()))
}
parse_question_marks(input)
}
Token::Dimension { .. } => {
parse_question_marks(input)
}
Token::Number { .. } => {
let after_number = input.state();
match input.next_including_whitespace() {
Ok(&Token::Delim('?')) => parse_question_marks(input),
Ok(&Token::Dimension { .. }) => {}
Ok(&Token::Number { .. }) => {}
_ => input.reset(&after_number)
}
}
t => return Err(BasicParseError::UnexpectedToken(t))
}
Ok(())
}
/// Consume as many '?' as possible
fn parse_question_marks(input: &mut Parser) {
loop {
let start = input.state();
match input.next_including_whitespace() {
Ok(&Token::Delim('?')) => {}
_ => {
input.reset(&start);
return
}
}
}
}
fn parse_concatenated(text: &[u8]) -> Result<UnicodeRange, ()> {
let mut text = match text.split_first() {
Some((&b'+', text)) => text,
_ => return Err(())
};
let (first_hex_value, hex_digit_count) = consume_hex(&mut text);
let question_marks = consume_question_marks(&mut text);
let consumed = hex_digit_count + question_marks;
if consumed == 0 || consumed > 6 {
return Err(())
}
if question_marks > 0 {
if text.is_empty() {
return Ok(UnicodeRange {
start: first_hex_value << (question_marks * 4),
end: ((first_hex_value + 1) << (question_marks * 4)) - 1,
})
}
} else if text.is_empty() {
return Ok(UnicodeRange {
start: first_hex_value,
end: first_hex_value,
})
} else {
if let Some((&b'-', mut text)) = text.split_first() {
let (second_hex_value, hex_digit_count) = consume_hex(&mut text);
if hex_digit_count > 0 && hex_digit_count <= 6 && text.is_empty() {
return Ok(UnicodeRange {
start: first_hex_value,
end: second_hex_value,
})
}
}
}
Err(())
}
fn consume_hex(text: &mut &[u8]) -> (u32, usize) {
let mut value = 0;
let mut digits = 0;
while let Some((&byte, rest)) = text.split_first() {
if let Some(digit_value) = (byte as char).to_digit(16) {
value = value * 0x10 + digit_value;
digits += 1;
*text = rest
} else {
break
}
}
(value, digits)
}
fn consume_question_marks(text: &mut &[u8]) -> usize {
let mut question_marks = 0;
while let Some((&b'?', rest)) = text.split_first() {
question_marks += 1;
*text = rest
}
question_marks
}
impl fmt::Debug for UnicodeRange {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
self.to_css(formatter)
}
}
impl ToCss for UnicodeRange {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
dest.write_str("U+")?;
// How many bits are 0 at the end of start and also 1 at the end of end.
let bits = cmp::min(self.start.trailing_zeros(), (!self.end).trailing_zeros());
let question_marks = bits / 4;
// How many lower bits can be represented as question marks
let bits = question_marks * 4;
let truncated_start = self.start >> bits;
let truncated_end = self.end >> bits;
if truncated_start == truncated_end {
// Bits not covered by question marks are the same in start and end,
// we can use the question mark syntax.
if truncated_start != 0 {
write!(dest, "{:X}", truncated_start)?;
}
for _ in 0..question_marks {
dest.write_str("?")?;
}
} else {
write!(dest, "{:X}", self.start)?;
if self.end != self.start {
write!(dest, "-{:X}", self.end)?;
}
}
Ok(())
}
}

Просмотреть файл

@ -1 +1 @@
{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".travis.yml":"f1fb4b65964c81bc1240544267ea334f554ca38ae7a74d57066f4d47d2b5d568","Cargo.toml":"cd0faaf645b871741c6270f7ebff68b0aff08be5ed387728fa7d90e0d8403420","LICENSE":"fab3dd6bdab226f1c08630b1dd917e11fcb4ec5e1e020e2c16f83a0a13863e85","README.md":"c5781e673335f37ed3d7acb119f8ed33efdf6eb75a7094b7da2abe0c3230adb8","build.rs":"950bcc47a196f07f99f59637c28cc65e02a885130011f90a2b2608248b4724a2","build/match_byte.rs":"89e8b941af74df2c204abf808672d3ff278bdec75abc918c41a843260b924677","docs/.nojekyll":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855","docs/404.html":"025861f76f8d1f6d67c20ab624c6e418f4f824385e2dd8ad8732c4ea563c6a2e","docs/index.html":"025861f76f8d1f6d67c20ab624c6e418f4f824385e2dd8ad8732c4ea563c6a2e","src/color.rs":"b847b80097015cb7d0f4be67c0d8b2f6b82006be865917ff14a96b484760d460","src/cow_rc_str.rs":"541216f8ef74ee3cc5cbbc1347e5f32ed66588c401851c9a7d68b867aede1de0","src/from_bytes.rs":"331fe63af2123ae3675b61928a69461b5ac77799fff3ce9978c55cf2c558f4ff","src/lib.rs":"a3994f121fbff3dd9cf5b72e2f31d34fa14a26e451278faeff423697943fe5ed","src/macros.rs":"adb9773c157890381556ea83d7942dcc676f99eea71abbb6afeffee1e3f28960","src/nth.rs":"246fa83a3ab97a7bb617c97a976af77136652ce77ba8ccca22e144b213b61310","src/parser.rs":"e5cbc7df1f7d2e57b909ab9ebe5916096eb7f01a67a32a3155f92193d1c73fab","src/rules_and_declarations.rs":"f2cde5c4518a2349d24049f6195e31783a8af2815d84394d21f90c763fc257a1","src/serializer.rs":"c872921703dc029155a8019b51df0d23066b072c7e1f553422e448e66218fbdc","src/size_of_tests.rs":"544193a839daf4f9eb615a3657e0b95ee35c482e8de717f4899ad323b121240e","src/tests.rs":"0d07575505e3d125932ce4ff79f7864fd2ef7c81714e71c6f30a46c55adbc6dd","src/tokenizer.rs":"3855802ca8a2236c463c76208a115ddb8dbf2087de2f7a711ef2aef81d83c508","src/unicode_range.rs":"fbbd0f4b393944699730a6b0f945b2b2376fcea61fce2ea37190fb287793021a"},"package":"f3a5464ebae36626f28254b60d1abbba951417383192bcea65578b40fbec1a47"}
{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".travis.yml":"f1fb4b65964c81bc1240544267ea334f554ca38ae7a74d57066f4d47d2b5d568","Cargo.toml":"48227e0e682bcba828f2672fe2b99a03f92021d3455a975d88817c43ca213460","LICENSE":"fab3dd6bdab226f1c08630b1dd917e11fcb4ec5e1e020e2c16f83a0a13863e85","README.md":"c5781e673335f37ed3d7acb119f8ed33efdf6eb75a7094b7da2abe0c3230adb8","build.rs":"950bcc47a196f07f99f59637c28cc65e02a885130011f90a2b2608248b4724a2","build/match_byte.rs":"89e8b941af74df2c204abf808672d3ff278bdec75abc918c41a843260b924677","docs/.nojekyll":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855","docs/404.html":"025861f76f8d1f6d67c20ab624c6e418f4f824385e2dd8ad8732c4ea563c6a2e","docs/index.html":"025861f76f8d1f6d67c20ab624c6e418f4f824385e2dd8ad8732c4ea563c6a2e","src/color.rs":"b847b80097015cb7d0f4be67c0d8b2f6b82006be865917ff14a96b484760d460","src/cow_rc_str.rs":"541216f8ef74ee3cc5cbbc1347e5f32ed66588c401851c9a7d68b867aede1de0","src/from_bytes.rs":"331fe63af2123ae3675b61928a69461b5ac77799fff3ce9978c55cf2c558f4ff","src/lib.rs":"a3994f121fbff3dd9cf5b72e2f31d34fa14a26e451278faeff423697943fe5ed","src/macros.rs":"adb9773c157890381556ea83d7942dcc676f99eea71abbb6afeffee1e3f28960","src/nth.rs":"246fa83a3ab97a7bb617c97a976af77136652ce77ba8ccca22e144b213b61310","src/parser.rs":"7a35d02bf0158a4725527e32295ce734d317e226569234d346952e27048f6ee2","src/rules_and_declarations.rs":"f2cde5c4518a2349d24049f6195e31783a8af2815d84394d21f90c763fc257a1","src/serializer.rs":"c872921703dc029155a8019b51df0d23066b072c7e1f553422e448e66218fbdc","src/size_of_tests.rs":"a28664d44797519119d659eaf7e84e1789ef97e9e2c2d36630eb9f226c0cc0a6","src/tests.rs":"c8ddb31c9d796c57c053364280caf43d6deb94d7c39c1edf81b88f029abfb347","src/tokenizer.rs":"914326e19bf2d97122b41e9930e16a70825aa5c4096bdb39c66d18388f4cdbda","src/unicode_range.rs":"fbbd0f4b393944699730a6b0f945b2b2376fcea61fce2ea37190fb287793021a"},"package":"1ce5f581b2fea3c097574b99ba16ca79607c4fc0d5e685b74b4ea05f2b18fe6c"}

20
third_party/rust/cssparser/Cargo.toml поставляемый
Просмотреть файл

@ -12,7 +12,7 @@
[package]
name = "cssparser"
version = "0.19.0"
version = "0.19.1"
authors = ["Simon Sapin <simon.sapin@exyr.org>"]
build = "build.rs"
exclude = ["src/css-parsing-tests/**", "src/big-data-url.css"]
@ -26,29 +26,29 @@ repository = "https://github.com/servo/rust-cssparser"
version = ">= 0.3, < 0.5"
optional = true
[dependencies.procedural-masquerade]
version = "0.1"
[dependencies.cssparser-macros]
version = "0.3"
[dependencies.phf]
version = "0.7"
[dependencies.serde]
version = "1.0"
optional = true
[dependencies.procedural-masquerade]
version = "0.1"
[dependencies.matches]
version = "0.1"
[dev-dependencies.difference]
version = "1.0"
[dependencies.phf]
version = "0.7"
[dev-dependencies.encoding_rs]
version = "0.5"
[dev-dependencies.rustc-serialize]
version = "0.3"
[dev-dependencies.difference]
version = "1.0"
[build-dependencies.syn]
version = "0.11"
@ -56,5 +56,5 @@ version = "0.11"
version = "0.3"
[features]
bench = []
dummy_match_byte = []
bench = []

9
third_party/rust/cssparser/src/parser.rs поставляемый
Просмотреть файл

@ -268,6 +268,15 @@ impl<'i: 't, 't> Parser<'i, 't> {
self.input.tokenizer.current_source_location()
}
/// The source map URL, if known.
///
/// The source map URL is extracted from a specially formatted
/// comment. The last such comment is used, so this value may
/// change as parsing proceeds.
pub fn current_source_map_url(&self) -> Option<&str> {
self.input.tokenizer.current_source_map_url()
}
/// Return the current internal state of the parser (including position within the input).
///
/// This state can later be restored with the `Parser::reset` method.

Просмотреть файл

@ -36,8 +36,8 @@ size_of_test!(token, Token, 32);
size_of_test!(std_cow_str, Cow<'static, str>, 32);
size_of_test!(cow_rc_str, CowRcStr, 16);
size_of_test!(tokenizer, ::tokenizer::Tokenizer, 40);
size_of_test!(parser_input, ::parser::ParserInput, 112);
size_of_test!(tokenizer, ::tokenizer::Tokenizer, 56);
size_of_test!(parser_input, ::parser::ParserInput, 128);
size_of_test!(parser, ::parser::Parser, 16);
size_of_test!(source_position, ::SourcePosition, 8);
size_of_test!(parser_state, ::ParserState, 24);

25
third_party/rust/cssparser/src/tests.rs поставляемый
Просмотреть файл

@ -979,3 +979,28 @@ fn parse_entirely_reports_first_error() {
let result: Result<(), _> = parser.parse_entirely(|_| Err(ParseError::Custom(E::Foo)));
assert_eq!(result, Err(ParseError::Custom(E::Foo)));
}
#[test]
fn parse_comments() {
let tests = vec![
("/*# sourceMappingURL=here*/", Some("here")),
("/*# sourceMappingURL=here */", Some("here")),
("/*@ sourceMappingURL=here*/", Some("here")),
("/*@ sourceMappingURL=there*/ /*# sourceMappingURL=here*/", Some("here")),
("/*# sourceMappingURL=here there */", Some("here")),
("/*# sourceMappingURL= here */", Some("")),
("/*# sourceMappingURL=*/", Some("")),
("/*# sourceMappingUR=here */", None),
("/*! sourceMappingURL=here */", None),
("/*# sourceMappingURL = here */", None),
("/* # sourceMappingURL=here */", None)
];
for test in tests {
let mut input = ParserInput::new(test.0);
let mut parser = Parser::new(&mut input);
while let Ok(_) = parser.next_including_whitespace() {
}
assert_eq!(parser.current_source_map_url(), test.1);
}
}

25
third_party/rust/cssparser/src/tokenizer.rs поставляемый
Просмотреть файл

@ -209,6 +209,7 @@ pub struct Tokenizer<'a> {
current_line_number: u32,
var_functions: SeenStatus,
viewport_percentages: SeenStatus,
source_map_url: Option<&'a str>,
}
#[derive(Copy, Clone, PartialEq, Eq)]
@ -234,6 +235,7 @@ impl<'a> Tokenizer<'a> {
current_line_number: first_line_number,
var_functions: SeenStatus::DontCare,
viewport_percentages: SeenStatus::DontCare,
source_map_url: None,
}
}
@ -300,6 +302,11 @@ impl<'a> Tokenizer<'a> {
}
}
#[inline]
pub fn current_source_map_url(&self) -> Option<&'a str> {
self.source_map_url
}
#[inline]
pub fn state(&self) -> ParserState {
ParserState {
@ -507,7 +514,9 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
}
b'/' => {
if tokenizer.starts_with(b"/*") {
Comment(consume_comment(tokenizer))
let contents = consume_comment(tokenizer);
check_for_source_map(tokenizer, contents);
Comment(contents)
} else {
tokenizer.advance(1);
Delim('/')
@ -594,6 +603,20 @@ fn consume_whitespace<'a>(tokenizer: &mut Tokenizer<'a>, newline: bool, is_cr: b
}
// Check for a sourceMappingURL comment and update the tokenizer appropriately.
fn check_for_source_map<'a>(tokenizer: &mut Tokenizer<'a>, contents: &'a str) {
let directive = "# sourceMappingURL=";
let directive_old = "@ sourceMappingURL=";
// If there is a source map directive, extract the URL.
if contents.starts_with(directive) || contents.starts_with(directive_old) {
let contents = &contents[directive.len()..];
tokenizer.source_map_url = contents.split(|c| {
c == ' ' || c == '\t' || c == '\x0C' || c == '\r' || c == '\n'
}).next()
}
}
fn consume_comment<'a>(tokenizer: &mut Tokenizer<'a>) -> &'a str {
tokenizer.advance(2); // consume "/*"
let start_position = tokenizer.position();

12
toolkit/library/rust/Cargo.lock сгенерированный
Просмотреть файл

@ -230,7 +230,7 @@ dependencies = [
[[package]]
name = "cssparser"
version = "0.19.0"
version = "0.19.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"cssparser-macros 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -382,7 +382,7 @@ name = "geckoservo"
version = "0.0.1"
dependencies = [
"atomic_refcell 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"cssparser 0.19.0 (registry+https://github.com/rust-lang/crates.io-index)",
"cssparser 0.19.1 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.24 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
@ -830,7 +830,7 @@ name = "selectors"
version = "0.19.0"
dependencies = [
"bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"cssparser 0.19.0 (registry+https://github.com/rust-lang/crates.io-index)",
"cssparser 0.19.1 (registry+https://github.com/rust-lang/crates.io-index)",
"fnv 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"matches 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
@ -932,7 +932,7 @@ dependencies = [
"bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"cssparser 0.19.0 (registry+https://github.com/rust-lang/crates.io-index)",
"cssparser 0.19.1 (registry+https://github.com/rust-lang/crates.io-index)",
"euclid 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)",
"fnv 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
"itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)",
@ -979,7 +979,7 @@ version = "0.0.1"
dependencies = [
"app_units 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)",
"bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"cssparser 0.19.0 (registry+https://github.com/rust-lang/crates.io-index)",
"cssparser 0.19.1 (registry+https://github.com/rust-lang/crates.io-index)",
"euclid 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)",
"selectors 0.19.0",
]
@ -1286,7 +1286,7 @@ dependencies = [
"checksum core-foundation-sys 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "41115a6aa5d3e1e5ef98148373f25971d1fad53818553f216495f9e67e90a624"
"checksum core-graphics 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a9f841e9637adec70838c537cae52cb4c751cc6514ad05669b51d107c2021c79"
"checksum core-text 6.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "16ce16d9ed00181016c11ff48e561314bec92bfbce9fe48f319366618d4e5de6"
"checksum cssparser 0.19.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f3a5464ebae36626f28254b60d1abbba951417383192bcea65578b40fbec1a47"
"checksum cssparser 0.19.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1ce5f581b2fea3c097574b99ba16ca79607c4fc0d5e685b74b4ea05f2b18fe6c"
"checksum cssparser-macros 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "079adec4af52bb5275eadd004292028c79eb3c5f5b4ee8086a36d4197032f6df"
"checksum dwrote 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "36e3b27cd0b8a68e00f07e8d8e1e4f4d8a6b8b873290a734f63bd56d792d23e1"
"checksum either 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "18785c1ba806c258137c937e44ada9ee7e69a37e3c72077542cd2f069d78562a"