MozReview-Commit-ID: CckZnhJlNTF
This commit is contained in:
Xidorn Quan 2017-10-04 17:13:34 +11:00
Родитель 0b55d8077b
Коммит 45f3919331
13 изменённых файлов: 2064 добавлений и 39 удалений

2
third_party/rust/rayon/.cargo-checksum.json поставляемый

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

12
third_party/rust/rayon/Cargo.toml поставляемый
Просмотреть файл

@ -12,7 +12,7 @@
[package]
name = "rayon"
version = "0.8.1"
version = "0.8.2"
authors = ["Niko Matsakis <niko@alum.mit.edu>", "Josh Stone <cuviper@gmail.com>"]
description = "Simple work-stealing parallelism for Rust"
documentation = "https://docs.rs/rayon/"
@ -23,14 +23,14 @@ version = "1.2"
[dev-dependencies.rand]
version = "0.3"
[dev-dependencies.futures]
version = "0.1.7"
[dev-dependencies.compiletest_rs]
version = "0.2.1"
[dev-dependencies.docopt]
version = "0.7"
[dev-dependencies.compiletest_rs]
version = "0.2.1"
[dev-dependencies.rustc-serialize]
version = "0.3"
[dev-dependencies.futures]
version = "0.1.7"

2
third_party/rust/rayon/README.md поставляемый
Просмотреть файл

@ -40,7 +40,7 @@ as:
```rust
[dependencies]
rayon = 0.8.1
rayon = "0.8.2"
```
and then add the following to to your `lib.rs`:

11
third_party/rust/rayon/RELEASES.md поставляемый
Просмотреть файл

@ -1,3 +1,14 @@
# Release rayon 0.8.2
- `ParallelSliceMut` now has six parallel sorting methods with the same
variations as the standard library.
- `par_sort`, `par_sort_by`, and `par_sort_by_key` perform stable sorts in
parallel, using the default order, a custom comparator, or a key extraction
function, respectively.
- `par_sort_unstable`, `par_sort_unstable_by`, and `par_sort_unstable_by_key`
perform unstable sorts with the same comparison options.
- Thanks to @stejpang!
# Release rayon 0.8.1 / rayon-core 1.2.0
- The following core APIs are being stabilized:

754
third_party/rust/rayon/src/slice/mergesort.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,754 @@
//! Parallel merge sort.
//!
//! This implementation is copied verbatim from `std::slice::sort` and then parallelized.
//! The only difference from the original is that the sequential `mergesort` returns
//! `MergesortResult` and leaves descending arrays intact.
use iter::*;
use rayon_core;
use slice::ParallelSliceMut;
use std::mem::size_of;
use std::mem;
use std::ptr;
use std::slice;
unsafe fn get_and_increment<T>(ptr: &mut *mut T) -> *mut T {
let old = *ptr;
*ptr = ptr.offset(1);
old
}
unsafe fn decrement_and_get<T>(ptr: &mut *mut T) -> *mut T {
*ptr = ptr.offset(-1);
*ptr
}
/// When dropped, copies from `src` into `dest` a sequence of length `len`.
struct CopyOnDrop<T> {
src: *mut T,
dest: *mut T,
len: usize,
}
impl<T> Drop for CopyOnDrop<T> {
fn drop(&mut self) {
unsafe {
ptr::copy_nonoverlapping(self.src, self.dest, self.len);
}
}
}
/// Inserts `v[0]` into pre-sorted sequence `v[1..]` so that whole `v[..]` becomes sorted.
///
/// This is the integral subroutine of insertion sort.
fn insert_head<T, F>(v: &mut [T], is_less: &F)
where
F: Fn(&T, &T) -> bool,
{
if v.len() >= 2 && is_less(&v[1], &v[0]) {
unsafe {
// There are three ways to implement insertion here:
//
// 1. Swap adjacent elements until the first one gets to its final destination.
// However, this way we copy data around more than is necessary. If elements are big
// structures (costly to copy), this method will be slow.
//
// 2. Iterate until the right place for the first element is found. Then shift the
// elements succeeding it to make room for it and finally place it into the
// remaining hole. This is a good method.
//
// 3. Copy the first element into a temporary variable. Iterate until the right place
// for it is found. As we go along, copy every traversed element into the slot
// preceding it. Finally, copy data from the temporary variable into the remaining
// hole. This method is very good. Benchmarks demonstrated slightly better
// performance than with the 2nd method.
//
// All methods were benchmarked, and the 3rd showed best results. So we chose that one.
let mut tmp = NoDrop { value: Some(ptr::read(&v[0])) };
// Intermediate state of the insertion process is always tracked by `hole`, which
// serves two purposes:
// 1. Protects integrity of `v` from panics in `is_less`.
// 2. Fills the remaining hole in `v` in the end.
//
// Panic safety:
//
// If `is_less` panics at any point during the process, `hole` will get dropped and
// fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it
// initially held exactly once.
let mut hole = InsertionHole {
src: tmp.value.as_mut().unwrap(),
dest: &mut v[1],
};
ptr::copy_nonoverlapping(&v[1], &mut v[0], 1);
for i in 2..v.len() {
if !is_less(&v[i], tmp.value.as_ref().unwrap()) {
break;
}
ptr::copy_nonoverlapping(&v[i], &mut v[i - 1], 1);
hole.dest = &mut v[i];
}
// `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
}
}
// Holds a value, but never drops it.
struct NoDrop<T> {
value: Option<T>,
}
impl<T> Drop for NoDrop<T> {
fn drop(&mut self) {
mem::forget(self.value.take());
}
}
// When dropped, copies from `src` into `dest`.
struct InsertionHole<T> {
src: *mut T,
dest: *mut T,
}
impl<T> Drop for InsertionHole<T> {
fn drop(&mut self) {
unsafe {
ptr::copy_nonoverlapping(self.src, self.dest, 1);
}
}
}
}
/// Merges non-decreasing runs `v[..mid]` and `v[mid..]` using `buf` as temporary storage, and
/// stores the result into `v[..]`.
///
/// # Safety
///
/// The two slices must be non-empty and `mid` must be in bounds. Buffer `buf` must be long enough
/// to hold a copy of the shorter slice. Also, `T` must not be a zero-sized type.
unsafe fn merge<T, F>(v: &mut [T], mid: usize, buf: *mut T, is_less: &F)
where
F: Fn(&T, &T) -> bool,
{
let len = v.len();
let v = v.as_mut_ptr();
let v_mid = v.offset(mid as isize);
let v_end = v.offset(len as isize);
// The merge process first copies the shorter run into `buf`. Then it traces the newly copied
// run and the longer run forwards (or backwards), comparing their next unconsumed elements and
// copying the lesser (or greater) one into `v`.
//
// As soon as the shorter run is fully consumed, the process is done. If the longer run gets
// consumed first, then we must copy whatever is left of the shorter run into the remaining
// hole in `v`.
//
// Intermediate state of the process is always tracked by `hole`, which serves two purposes:
// 1. Protects integrity of `v` from panics in `is_less`.
// 2. Fills the remaining hole in `v` if the longer run gets consumed first.
//
// Panic safety:
//
// If `is_less` panics at any point during the process, `hole` will get dropped and fill the
// hole in `v` with the unconsumed range in `buf`, thus ensuring that `v` still holds every
// object it initially held exactly once.
let mut hole;
if mid <= len - mid {
// The left run is shorter.
ptr::copy_nonoverlapping(v, buf, mid);
hole = MergeHole {
start: buf,
end: buf.offset(mid as isize),
dest: v,
};
// Initially, these pointers point to the beginnings of their arrays.
let left = &mut hole.start;
let mut right = v_mid;
let out = &mut hole.dest;
while *left < hole.end && right < v_end {
// Consume the lesser side.
// If equal, prefer the left run to maintain stability.
let to_copy = if is_less(&*right, &**left) {
get_and_increment(&mut right)
} else {
get_and_increment(left)
};
ptr::copy_nonoverlapping(to_copy, get_and_increment(out), 1);
}
} else {
// The right run is shorter.
ptr::copy_nonoverlapping(v_mid, buf, len - mid);
hole = MergeHole {
start: buf,
end: buf.offset((len - mid) as isize),
dest: v_mid,
};
// Initially, these pointers point past the ends of their arrays.
let left = &mut hole.dest;
let right = &mut hole.end;
let mut out = v_end;
while v < *left && buf < *right {
// Consume the greater side.
// If equal, prefer the right run to maintain stability.
let to_copy = if is_less(&*right.offset(-1), &*left.offset(-1)) {
decrement_and_get(left)
} else {
decrement_and_get(right)
};
ptr::copy_nonoverlapping(to_copy, decrement_and_get(&mut out), 1);
}
}
// Finally, `hole` gets dropped. If the shorter run was not fully consumed, whatever remains of
// it will now be copied into the hole in `v`.
// When dropped, copies the range `start..end` into `dest..`.
struct MergeHole<T> {
start: *mut T,
end: *mut T,
dest: *mut T,
}
impl<T> Drop for MergeHole<T> {
fn drop(&mut self) {
// `T` is not a zero-sized type, so it's okay to divide by its size.
let len = (self.end as usize - self.start as usize) / mem::size_of::<T>();
unsafe {
ptr::copy_nonoverlapping(self.start, self.dest, len);
}
}
}
}
/// The result of merge sort.
#[must_use]
#[derive(Clone, Copy, PartialEq, Eq)]
enum MergesortResult {
/// The slice has already been sorted.
NonDescending,
/// The slice has been descending and therefore it was left intact.
Descending,
/// The slice was sorted.
Sorted,
}
/// A sorted run that starts at index `start` and is of length `len`.
#[derive(Clone, Copy)]
struct Run {
start: usize,
len: usize,
}
/// Examines the stack of runs and identifies the next pair of runs to merge. More specifically,
/// if `Some(r)` is returned, that means `runs[r]` and `runs[r + 1]` must be merged next. If the
/// algorithm should continue building a new run instead, `None` is returned.
///
/// TimSort is infamous for its buggy implementations, as described here:
/// http://envisage-project.eu/timsort-specification-and-verification/
///
/// The gist of the story is: we must enforce the invariants on the top four runs on the stack.
/// Enforcing them on just top three is not sufficient to ensure that the invariants will still
/// hold for *all* runs in the stack.
///
/// This function correctly checks invariants for the top four runs. Additionally, if the top
/// run starts at index 0, it will always demand a merge operation until the stack is fully
/// collapsed, in order to complete the sort.
#[inline]
fn collapse(runs: &[Run]) -> Option<usize> {
let n = runs.len();
if n >= 2 && (runs[n - 1].start == 0 ||
runs[n - 2].len <= runs[n - 1].len ||
(n >= 3 && runs[n - 3].len <= runs[n - 2].len + runs[n - 1].len) ||
(n >= 4 && runs[n - 4].len <= runs[n - 3].len + runs[n - 2].len))
{
if n >= 3 && runs[n - 3].len < runs[n - 1].len {
Some(n - 3)
} else {
Some(n - 2)
}
} else {
None
}
}
/// Sorts a slice using merge sort, unless it is already in descending order.
///
/// This function doesn't modify the slice if it is already non-descending or descending.
/// Otherwise, it sorts the slice into non-descending order.
///
/// This merge sort borrows some (but not all) ideas from TimSort, which is described in detail
/// [here](http://svn.python.org/projects/python/trunk/Objects/listsort.txt).
///
/// The algorithm identifies strictly descending and non-descending subsequences, which are called
/// natural runs. There is a stack of pending runs yet to be merged. Each newly found run is pushed
/// onto the stack, and then some pairs of adjacent runs are merged until these two invariants are
/// satisfied:
///
/// 1. for every `i` in `1..runs.len()`: `runs[i - 1].len > runs[i].len`
/// 2. for every `i` in `2..runs.len()`: `runs[i - 2].len > runs[i - 1].len + runs[i].len`
///
/// The invariants ensure that the total running time is `O(n log n)` worst-case.
///
/// # Safety
///
/// The argument `buf` is used as a temporary buffer and must be at least as long as `v`.
unsafe fn mergesort<T, F>(v: &mut [T], buf: *mut T, is_less: &F) -> MergesortResult
where
T: Send,
F: Fn(&T, &T) -> bool + Sync,
{
// Very short runs are extended using insertion sort to span at least this many elements.
const MIN_RUN: usize = 10;
let len = v.len();
// In order to identify natural runs in `v`, we traverse it backwards. That might seem like a
// strange decision, but consider the fact that merges more often go in the opposite direction
// (forwards). According to benchmarks, merging forwards is slightly faster than merging
// backwards. To conclude, identifying runs by traversing backwards improves performance.
let mut runs = vec![];
let mut end = len;
while end > 0 {
// Find the next natural run, and reverse it if it's strictly descending.
let mut start = end - 1;
if start > 0 {
start -= 1;
if is_less(v.get_unchecked(start + 1), v.get_unchecked(start)) {
while start > 0 && is_less(v.get_unchecked(start), v.get_unchecked(start - 1)) {
start -= 1;
}
// If this descending run covers the whole slice, return immediately.
if start == 0 && end == len {
return MergesortResult::Descending;
} else {
v[start..end].reverse();
}
} else {
while start > 0 && !is_less(v.get_unchecked(start), v.get_unchecked(start - 1)) {
start -= 1;
}
// If this non-descending run covers the whole slice, return immediately.
if end - start == len {
return MergesortResult::NonDescending;
}
}
}
// Insert some more elements into the run if it's too short. Insertion sort is faster than
// merge sort on short sequences, so this significantly improves performance.
while start > 0 && end - start < MIN_RUN {
start -= 1;
insert_head(&mut v[start..end], &is_less);
}
// Push this run onto the stack.
runs.push(Run {
start: start,
len: end - start,
});
end = start;
// Merge some pairs of adjacent runs to satisfy the invariants.
while let Some(r) = collapse(&runs) {
let left = runs[r + 1];
let right = runs[r];
merge(&mut v[left.start..right.start + right.len], left.len, buf, &is_less);
runs[r] = Run {
start: left.start,
len: left.len + right.len,
};
runs.remove(r + 1);
}
}
// Finally, exactly one run must remain in the stack.
debug_assert!(runs.len() == 1 && runs[0].start == 0 && runs[0].len == len);
// The original order of the slice was neither non-descending nor descending.
MergesortResult::Sorted
}
////////////////////////////////////////////////////////////////////////////
// Everything above this line is copied from `std::slice::sort` (with very minor tweaks).
// Everything below this line is parallelization.
////////////////////////////////////////////////////////////////////////////
/// Splits two sorted slices so that they can be merged in parallel.
///
/// Returns two indices `(a, b)` so that slices `left[..a]` and `right[..b]` come before
/// `left[a..]` and `right[b..]`.
fn split_for_merge<T, F>(left: &[T], right: &[T], is_less: &F) -> (usize, usize)
where
F: Fn(&T, &T) -> bool,
{
let left_len = left.len();
let right_len = right.len();
if left_len >= right_len {
let left_mid = left_len / 2;
// Find the first element in `right` that is greater than or equal to `left[left_mid]`.
let mut a = 0;
let mut b = right_len;
while a < b {
let m = a + (b - a) / 2;
if is_less(&right[m], &left[left_mid]) {
a = m + 1;
} else {
b = m;
}
}
(left_mid, a)
} else {
let right_mid = right_len / 2;
// Find the first element in `left` that is greater than `right[right_mid]`.
let mut a = 0;
let mut b = left_len;
while a < b {
let m = a + (b - a) / 2;
if is_less(&right[right_mid], &left[m]) {
b = m;
} else {
a = m + 1;
}
}
(a, right_mid)
}
}
/// Merges slices `left` and `right` in parallel and stores the result into `dest`.
///
/// # Safety
///
/// The `dest` pointer must have enough space to store the result.
///
/// Even if `is_less` panics at any point during the merge process, this function will fully copy
/// all elements from `left` and `right` into `dest` (not necessarily in sorted order).
unsafe fn par_merge<T, F>(left: &mut [T], right: &mut [T], dest: *mut T, is_less: &F)
where
T: Send,
F: Fn(&T, &T) -> bool + Sync,
{
// Slices whose lengths sum up to this value are merged sequentially. This number is slightly
// larger than `CHUNK_LENGTH`, and the reason is that merging is faster than merge sorting, so
// merging needs a bit coarser granularity in order to hide the overhead of Rayon's task
// scheduling.
const MAX_SEQUENTIAL: usize = 5000;
let left_len = left.len();
let right_len = right.len();
// Intermediate state of the merge process, which serves two purposes:
// 1. Protects integrity of `dest` from panics in `is_less`.
// 2. Copies the remaining elements as soon as one of the two sides is exhausted.
//
// Panic safety:
//
// If `is_less` panics at any point during the merge process, `s` will get dropped and copy the
// remaining parts of `left` and `right` into `dest`.
let mut s = State {
left_start: left.as_mut_ptr(),
left_end: left.as_mut_ptr().offset(left_len as isize),
right_start: right.as_mut_ptr(),
right_end: right.as_mut_ptr().offset(right_len as isize),
dest: dest,
};
if left_len == 0 || right_len == 0 || left_len + right_len < MAX_SEQUENTIAL {
while s.left_start < s.left_end && s.right_start < s.right_end {
// Consume the lesser side.
// If equal, prefer the left run to maintain stability.
let to_copy = if is_less(&*s.right_start, &*s.left_start) {
get_and_increment(&mut s.right_start)
} else {
get_and_increment(&mut s.left_start)
};
ptr::copy_nonoverlapping(to_copy, get_and_increment(&mut s.dest), 1);
}
} else {
// Function `split_for_merge` might panic. If that happens, `s` will get destructed and copy
// the whole `left` and `right` into `dest`.
let (left_mid, right_mid) = split_for_merge(left, right, is_less);
let (left_l, left_r) = left.split_at_mut(left_mid);
let (right_l, right_r) = right.split_at_mut(right_mid);
// Prevent the destructor of `s` from running. Rayon will ensure that both calls to
// `par_merge` happen. If one of the two calls panics, they will ensure that elements still
// get copied into `dest_left` and `dest_right``.
mem::forget(s);
// Convert the pointers to `usize` because `*mut T` is not `Send`.
let dest_l = dest as usize;
let dest_r = dest.offset((left_l.len() + right_l.len()) as isize) as usize;
rayon_core::join(
|| par_merge(left_l, right_l, dest_l as *mut T, is_less),
|| par_merge(left_r, right_r, dest_r as *mut T, is_less),
);
}
// Finally, `s` gets dropped if we used sequential merge, thus copying the remaining elements
// all at once.
// When dropped, copies arrays `left_start..left_end` and `right_start..right_end` into `dest`,
// in that order.
struct State<T> {
left_start: *mut T,
left_end: *mut T,
right_start: *mut T,
right_end: *mut T,
dest: *mut T,
}
impl<T> Drop for State<T> {
fn drop(&mut self) {
let size = mem::size_of::<T>();
let left_len = (self.left_end as usize - self.left_start as usize) / size;
let right_len = (self.right_end as usize - self.right_start as usize) / size;
// Copy array `left`, followed by `right`.
unsafe {
ptr::copy_nonoverlapping(self.left_start, self.dest, left_len);
self.dest = self.dest.offset(left_len as isize);
ptr::copy_nonoverlapping(self.right_start, self.dest, right_len);
}
}
}
}
/// Recursively merges pre-sorted chunks inside `v`.
///
/// Chunks of `v` are stored in `chunks` as intervals (inclusive left and exclusive right bound).
/// Argument `buf` is an auxiliary buffer that will be used during the procedure.
/// If `into_buf` is true, the result will be stored into `buf`, otherwise it will be in `v`.
///
/// # Safety
///
/// The number of chunks must be positive and they must be adjacent: the right bound of each chunk
/// must equal the left bound of the following chunk.
///
/// The buffer must be at least as long as `v`.
unsafe fn recurse<T, F>(
v: *mut T,
buf: *mut T,
chunks: &[(usize, usize)],
into_buf: bool,
is_less: &F,
)
where
T: Send,
F: Fn(&T, &T) -> bool + Sync,
{
let len = chunks.len();
debug_assert!(len > 0);
// Base case of the algorithm.
// If only one chunk is remaining, there's no more work to split and merge.
if len == 1 {
if into_buf {
// Copy the chunk from `v` into `buf`.
let (start, end) = chunks[0];
let src = v.offset(start as isize);
let dest = buf.offset(start as isize);
ptr::copy_nonoverlapping(src, dest, end - start);
}
return;
}
// Split the chunks into two halves.
let (start, _) = chunks[0];
let (mid, _) = chunks[len / 2];
let (_, end) = chunks[len - 1];
let (left, right) = chunks.split_at(len / 2);
// After recursive calls finish we'll have to merge chunks `(start, mid)` and `(mid, end)` from
// `src` into `dest`. If the current invocation has to store the result into `buf`, we'll
// merge chunks from `v` into `buf`, and viceversa.
//
// Recursive calls flip `into_buf` at each level of recursion. More concretely, `par_merge`
// merges chunks from `buf` into `v` at the first level, from `v` into `buf` at the second
// level etc.
let (src, dest) = if into_buf { (v, buf) } else { (buf, v) };
// Panic safety:
//
// If `is_less` panics at any point during the recursive calls, the destructor of `guard` will
// be executed, thus copying everything from `src` into `dest`. This way we ensure that all
// chunks are in fact copied into `dest`, even if the merge process doesn't finish.
let guard = CopyOnDrop {
src: src.offset(start as isize),
dest: dest.offset(start as isize),
len: end - start,
};
// Convert the pointers to `usize` because `*mut T` is not `Send`.
let v = v as usize;
let buf = buf as usize;
rayon_core::join(
|| recurse(v as *mut T, buf as *mut T, left, !into_buf, is_less),
|| recurse(v as *mut T, buf as *mut T, right, !into_buf, is_less),
);
// Everything went all right - recursive calls didn't panic.
// Forget the guard in order to prevent its destructor from running.
mem::forget(guard);
// Merge chunks `(start, mid)` and `(mid, end)` from `src` into `dest`.
let src_left = slice::from_raw_parts_mut(src.offset(start as isize), mid - start);
let src_right = slice::from_raw_parts_mut(src.offset(mid as isize), end - mid);
par_merge(src_left, src_right, dest.offset(start as isize), is_less);
}
/// Sorts `v` using merge sort in parallel.
///
/// The algorithm is stable, allocates memory, and `O(n log n)` worst-case.
/// The allocated temporary buffer is of the same length as is `v`.
pub fn par_mergesort<T, F>(v: &mut [T], is_less: F)
where
T: Send,
F: Fn(&T, &T) -> bool + Sync,
{
// Slices of up to this length get sorted using insertion sort in order to avoid the cost of
// buffer allocation.
const MAX_INSERTION: usize = 20;
// The length of initial chunks. This number is as small as possible but so that the overhead
// of Rayon's task scheduling is still negligible.
const CHUNK_LENGTH: usize = 2000;
// Sorting has no meaningful behavior on zero-sized types.
if size_of::<T>() == 0 {
return;
}
let len = v.len();
// Short slices get sorted in-place via insertion sort to avoid allocations.
if len <= MAX_INSERTION {
if len >= 2 {
for i in (0..len - 1).rev() {
insert_head(&mut v[i..], &is_less);
}
}
return;
}
// Allocate a buffer to use as scratch memory. We keep the length 0 so we can keep in it
// shallow copies of the contents of `v` without risking the dtors running on copies if
// `is_less` panics.
let mut buf = Vec::<T>::with_capacity(len);
let buf = buf.as_mut_ptr();
// If the slice is not longer than one chunk would be, do sequential merge sort and return.
if len <= CHUNK_LENGTH {
let res = unsafe { mergesort(v, buf, &is_less) };
if res == MergesortResult::Descending {
v.reverse();
}
return;
}
// Split the slice into chunks and merge sort them in parallel.
// However, descending chunks will not be sorted - they will be simply left intact.
let mut iter = {
// Convert the pointer to `usize` because `*mut T` is not `Send`.
let buf = buf as usize;
v.par_chunks_mut(CHUNK_LENGTH)
.with_max_len(1)
.enumerate()
.map(|(i, chunk)| {
let l = CHUNK_LENGTH * i;
let r = l + chunk.len();
unsafe {
let buf = (buf as *mut T).offset(l as isize);
(l, r, mergesort(chunk, buf, &is_less))
}
})
.collect::<Vec<_>>()
.into_iter()
.peekable()
};
// Now attempt to concatenate adjacent chunks that were left intact.
let mut chunks = Vec::with_capacity(iter.len());
while let Some((a, mut b, res)) = iter.next() {
// If this chunk was not modified by the sort procedure...
if res != MergesortResult::Sorted {
while let Some(&(x, y, r)) = iter.peek() {
// If the following chunk is of the same type and can be concatenated...
if r == res && (r == MergesortResult::Descending) == is_less(&v[x], &v[x - 1]) {
// Concatenate them.
b = y;
iter.next();
} else {
break;
}
}
}
// Descending chunks must be reversed.
if res == MergesortResult::Descending {
v[a..b].reverse();
}
chunks.push((a, b));
}
// All chunks are properly sorted.
// Now we just have to merge them together.
unsafe {
recurse(v.as_mut_ptr(), buf as *mut T, &chunks, false, &is_less);
}
}
#[cfg(test)]
mod tests {
use rand::{thread_rng, Rng};
use super::split_for_merge;
#[test]
fn test_split_for_merge() {
fn check(left: &[u32], right: &[u32]) {
let (l, r) = split_for_merge(left, right, &|&a, &b| a < b);
assert!(left[..l].iter().all(|&x| right[r..].iter().all(|&y| x <= y)));
assert!(right[..r].iter().all(|&x| left[l..].iter().all(|&y| x < y)));
}
check(&[1, 2, 2, 2, 2, 3], &[1, 2, 2, 2, 2, 3]);
check(&[1, 2, 2, 2, 2, 3], &[]);
check(&[], &[1, 2, 2, 2, 2, 3]);
for _ in 0..100 {
let mut rng = thread_rng();
let limit = rng.gen::<u32>() % 20 + 1;
let left_len = rng.gen::<usize>() % 20;
let right_len = rng.gen::<usize>() % 20;
let mut left = rng.gen_iter::<u32>()
.map(|x| x % limit)
.take(left_len)
.collect::<Vec<_>>();
let mut right = rng.gen_iter::<u32>()
.map(|x| x % limit)
.take(right_len)
.collect::<Vec<_>>();
left.sort();
right.sort();
check(&left, &right);
}
}
}

Просмотреть файл

@ -2,10 +2,18 @@
//! (`[T]`). You will rarely need to interact with it directly unless
//! you have need to name one of those types.
mod mergesort;
mod quicksort;
mod test;
use iter::*;
use iter::internal::*;
use self::mergesort::par_mergesort;
use self::quicksort::par_quicksort;
use split_producer::*;
use std::cmp;
use std::cmp::Ordering;
/// Parallel extensions for slices.
pub trait ParallelSlice<T: Sync> {
@ -76,6 +84,178 @@ pub trait ParallelSliceMut<T: Send> {
slice: self.as_parallel_slice_mut(),
}
}
/// Sorts the slice in parallel.
///
/// This sort is stable (i.e. does not reorder equal elements) and `O(n log n)` worst-case.
///
/// When applicable, unstable sorting is preferred because it is generally faster than stable
/// sorting and it doesn't allocate auxiliary memory.
/// See [`par_sort_unstable`](#method.par_sort_unstable).
///
/// # Current implementation
///
/// The current algorithm is an adaptive merge sort inspired by
/// [timsort](https://en.wikipedia.org/wiki/Timsort).
/// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
/// two or more sorted sequences concatenated one after another.
///
/// Also, it allocates temporary storage the same size as `self`, but for very short slices a
/// non-allocating insertion sort is used instead.
///
/// In order to sort the slice in parallel, the slice is first divided into smaller chunks and
/// all chunks are sorted in parallel. Then, adjacent chunks that together form non-descending
/// or descending runs are concatenated. Finally, the remaining chunks are merged together using
/// parallel subdivision of chunks and parallel merge operation.
fn par_sort(&mut self)
where
T: Ord,
{
par_mergesort(self.as_parallel_slice_mut(), |a, b| a.lt(b));
}
/// Sorts the slice in parallel with a comparator function.
///
/// This sort is stable (i.e. does not reorder equal elements) and `O(n log n)` worst-case.
///
/// When applicable, unstable sorting is preferred because it is generally faster than stable
/// sorting and it doesn't allocate auxiliary memory.
/// See [`par_sort_unstable_by`](#method.par_sort_unstable_by).
///
/// # Current implementation
///
/// The current algorithm is an adaptive merge sort inspired by
/// [timsort](https://en.wikipedia.org/wiki/Timsort).
/// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
/// two or more sorted sequences concatenated one after another.
///
/// Also, it allocates temporary storage the same size as `self`, but for very short slices a
/// non-allocating insertion sort is used instead.
///
/// In order to sort the slice in parallel, the slice is first divided into smaller chunks and
/// all chunks are sorted in parallel. Then, adjacent chunks that together form non-descending
/// or descending runs are concatenated. Finally, the remaining chunks are merged together using
/// parallel subdivision of chunks and parallel merge operation.
fn par_sort_by<F>(&mut self, compare: F)
where
F: Fn(&T, &T) -> Ordering + Sync,
{
par_mergesort(self.as_parallel_slice_mut(), |a, b| compare(a, b) == Ordering::Less);
}
/// Sorts the slice in parallel with a key extraction function.
///
/// This sort is stable (i.e. does not reorder equal elements) and `O(n log n)` worst-case.
///
/// When applicable, unstable sorting is preferred because it is generally faster than stable
/// sorting and it doesn't allocate auxiliary memory.
/// See [`par_sort_unstable_by_key`](#method.par_sort_unstable_by_key).
///
/// # Current implementation
///
/// The current algorithm is an adaptive merge sort inspired by
/// [timsort](https://en.wikipedia.org/wiki/Timsort).
/// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
/// two or more sorted sequences concatenated one after another.
///
/// Also, it allocates temporary storage the same size as `self`, but for very short slices a
/// non-allocating insertion sort is used instead.
///
/// In order to sort the slice in parallel, the slice is first divided into smaller chunks and
/// all chunks are sorted in parallel. Then, adjacent chunks that together form non-descending
/// or descending runs are concatenated. Finally, the remaining chunks are merged together using
/// parallel subdivision of chunks and parallel merge operation.
fn par_sort_by_key<B, F>(&mut self, f: F)
where
B: Ord,
F: Fn(&T) -> B + Sync,
{
par_mergesort(self.as_parallel_slice_mut(), |a, b| f(a).lt(&f(b)));
}
/// Sorts the slice in parallel, but may not preserve the order of equal elements.
///
/// This sort is unstable (i.e. may reorder equal elements), in-place (i.e. does not allocate),
/// and `O(n log n)` worst-case.
///
/// # Current implementation
///
/// The current algorithm is based on Orson Peters' [pattern-defeating quicksort][pdqsort],
/// which is a quicksort variant designed to be very fast on certain kinds of patterns,
/// sometimes achieving linear time. It is randomized but deterministic, and falls back to
/// heapsort on degenerate inputs.
///
/// It is generally faster than stable sorting, except in a few special cases, e.g. when the
/// slice consists of several concatenated sorted sequences.
///
/// All quicksorts work in two stages: partitioning into two halves followed by recursive
/// calls. The partitioning phase is sequential, but the two recursive calls are performed in
/// parallel.
///
/// [pdqsort]: https://github.com/orlp/pdqsort
fn par_sort_unstable(&mut self)
where
T: Ord,
{
par_quicksort(self.as_parallel_slice_mut(), |a, b| a.lt(b));
}
/// Sorts the slice in parallel with a comparator function, but may not preserve the order of
/// equal elements.
///
/// This sort is unstable (i.e. may reorder equal elements), in-place (i.e. does not allocate),
/// and `O(n log n)` worst-case.
///
/// # Current implementation
///
/// The current algorithm is based on Orson Peters' [pattern-defeating quicksort][pdqsort],
/// which is a quicksort variant designed to be very fast on certain kinds of patterns,
/// sometimes achieving linear time. It is randomized but deterministic, and falls back to
/// heapsort on degenerate inputs.
///
/// It is generally faster than stable sorting, except in a few special cases, e.g. when the
/// slice consists of several concatenated sorted sequences.
///
/// All quicksorts work in two stages: partitioning into two halves followed by recursive
/// calls. The partitioning phase is sequential, but the two recursive calls are performed in
/// parallel.
///
/// [pdqsort]: https://github.com/orlp/pdqsort
fn par_sort_unstable_by<F>(&mut self, compare: F)
where
F: Fn(&T, &T) -> Ordering + Sync,
{
par_quicksort(self.as_parallel_slice_mut(), |a, b| compare(a, b) == Ordering::Less);
}
/// Sorts the slice in parallel with a key extraction function, but may not preserve the order
/// of equal elements.
///
/// This sort is unstable (i.e. may reorder equal elements), in-place (i.e. does not allocate),
/// and `O(n log n)` worst-case.
///
/// # Current implementation
///
/// The current algorithm is based on Orson Peters' [pattern-defeating quicksort][pdqsort],
/// which is a quicksort variant designed to be very fast on certain kinds of patterns,
/// sometimes achieving linear time. It is randomized but deterministic, and falls back to
/// heapsort on degenerate inputs.
///
/// It is generally faster than stable sorting, except in a few special cases, e.g. when the
/// slice consists of several concatenated sorted sequences.
///
/// All quicksorts work in two stages: partitioning into two halves followed by recursive
/// calls. The partitioning phase is sequential, but the two recursive calls are performed in
/// parallel.
///
/// [pdqsort]: https://github.com/orlp/pdqsort
fn par_sort_unstable_by_key<B, F>(&mut self, f: F)
where
B: Ord,
F: Fn(&T) -> B + Sync,
{
par_quicksort(self.as_parallel_slice_mut(), |a, b| f(a).lt(&f(b)));
}
}
impl<T: Send> ParallelSliceMut<T> for [T] {

788
third_party/rust/rayon/src/slice/quicksort.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,788 @@
//! Parallel quicksort.
//!
//! This implementation is copied verbatim from `std::slice::sort_unstable` and then parallelized.
//! The only difference from the original is that calls to `recurse` are executed in parallel using
//! `rayon_core::join`.
use rayon_core;
use std::cmp;
use std::mem;
use std::ptr;
/// When dropped, takes the value out of `Option` and writes it into `dest`.
///
/// This allows us to safely read the pivot into a stack-allocated variable for efficiency, and
/// write it back into the slice after partitioning. This way we ensure that the write happens
/// even if `is_less` panics in the meantime.
struct WriteOnDrop<T> {
value: Option<T>,
dest: *mut T,
}
impl<T> Drop for WriteOnDrop<T> {
fn drop(&mut self) {
unsafe {
ptr::write(self.dest, self.value.take().unwrap());
}
}
}
/// Holds a value, but never drops it.
struct NoDrop<T> {
value: Option<T>,
}
impl<T> Drop for NoDrop<T> {
fn drop(&mut self) {
mem::forget(self.value.take());
}
}
/// When dropped, copies from `src` into `dest`.
struct CopyOnDrop<T> {
src: *mut T,
dest: *mut T,
}
impl<T> Drop for CopyOnDrop<T> {
fn drop(&mut self) {
unsafe {
ptr::copy_nonoverlapping(self.src, self.dest, 1);
}
}
}
/// Shifts the first element to the right until it encounters a greater or equal element.
fn shift_head<T, F>(v: &mut [T], is_less: &F)
where
F: Fn(&T, &T) -> bool,
{
let len = v.len();
unsafe {
// If the first two elements are out-of-order...
if len >= 2 && is_less(v.get_unchecked(1), v.get_unchecked(0)) {
// Read the first element into a stack-allocated variable. If a following comparison
// operation panics, `hole` will get dropped and automatically write the element back
// into the slice.
let mut tmp = NoDrop { value: Some(ptr::read(v.get_unchecked(0))) };
let mut hole = CopyOnDrop {
src: tmp.value.as_mut().unwrap(),
dest: v.get_unchecked_mut(1),
};
ptr::copy_nonoverlapping(v.get_unchecked(1), v.get_unchecked_mut(0), 1);
for i in 2..len {
if !is_less(v.get_unchecked(i), tmp.value.as_ref().unwrap()) {
break;
}
// Move `i`-th element one place to the left, thus shifting the hole to the right.
ptr::copy_nonoverlapping(v.get_unchecked(i), v.get_unchecked_mut(i - 1), 1);
hole.dest = v.get_unchecked_mut(i);
}
// `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
}
}
}
/// Shifts the last element to the left until it encounters a smaller or equal element.
fn shift_tail<T, F>(v: &mut [T], is_less: &F)
where
F: Fn(&T, &T) -> bool,
{
let len = v.len();
unsafe {
// If the last two elements are out-of-order...
if len >= 2 && is_less(v.get_unchecked(len - 1), v.get_unchecked(len - 2)) {
// Read the last element into a stack-allocated variable. If a following comparison
// operation panics, `hole` will get dropped and automatically write the element back
// into the slice.
let mut tmp = NoDrop { value: Some(ptr::read(v.get_unchecked(len - 1))) };
let mut hole = CopyOnDrop {
src: tmp.value.as_mut().unwrap(),
dest: v.get_unchecked_mut(len - 2),
};
ptr::copy_nonoverlapping(v.get_unchecked(len - 2), v.get_unchecked_mut(len - 1), 1);
for i in (0..len - 2).rev() {
if !is_less(&tmp.value.as_ref().unwrap(), v.get_unchecked(i)) {
break;
}
// Move `i`-th element one place to the right, thus shifting the hole to the left.
ptr::copy_nonoverlapping(v.get_unchecked(i), v.get_unchecked_mut(i + 1), 1);
hole.dest = v.get_unchecked_mut(i);
}
// `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
}
}
}
/// Partially sorts a slice by shifting several out-of-order elements around.
///
/// Returns `true` if the slice is sorted at the end. This function is `O(n)` worst-case.
#[cold]
fn partial_insertion_sort<T, F>(v: &mut [T], is_less: &F) -> bool
where
F: Fn(&T, &T) -> bool,
{
// Maximum number of adjacent out-of-order pairs that will get shifted.
const MAX_STEPS: usize = 5;
// If the slice is shorter than this, don't shift any elements.
const SHORTEST_SHIFTING: usize = 50;
let len = v.len();
let mut i = 1;
for _ in 0..MAX_STEPS {
unsafe {
// Find the next pair of adjacent out-of-order elements.
while i < len && !is_less(v.get_unchecked(i), v.get_unchecked(i - 1)) {
i += 1;
}
}
// Are we done?
if i == len {
return true;
}
// Don't shift elements on short arrays, that has a performance cost.
if len < SHORTEST_SHIFTING {
return false;
}
// Swap the found pair of elements. This puts them in correct order.
v.swap(i - 1, i);
// Shift the smaller element to the left.
shift_tail(&mut v[..i], is_less);
// Shift the greater element to the right.
shift_head(&mut v[i..], is_less);
}
// Didn't manage to sort the slice in the limited number of steps.
false
}
/// Sorts a slice using insertion sort, which is `O(n^2)` worst-case.
fn insertion_sort<T, F>(v: &mut [T], is_less: &F)
where
F: Fn(&T, &T) -> bool,
{
for i in 1..v.len() {
shift_tail(&mut v[..i + 1], is_less);
}
}
/// Sorts `v` using heapsort, which guarantees `O(n log n)` worst-case.
#[cold]
fn heapsort<T, F>(v: &mut [T], is_less: &F)
where
F: Fn(&T, &T) -> bool,
{
// This binary heap respects the invariant `parent >= child`.
let sift_down = |v: &mut [T], mut node| {
loop {
// Children of `node`:
let left = 2 * node + 1;
let right = 2 * node + 2;
// Choose the greater child.
let greater = if right < v.len() && is_less(&v[left], &v[right]) {
right
} else {
left
};
// Stop if the invariant holds at `node`.
if greater >= v.len() || !is_less(&v[node], &v[greater]) {
break;
}
// Swap `node` with the greater child, move one step down, and continue sifting.
v.swap(node, greater);
node = greater;
}
};
// Build the heap in linear time.
for i in (0..v.len() / 2).rev() {
sift_down(v, i);
}
// Pop maximal elements from the heap.
for i in (1..v.len()).rev() {
v.swap(0, i);
sift_down(&mut v[..i], 0);
}
}
/// Partitions `v` into elements smaller than `pivot`, followed by elements greater than or equal
/// to `pivot`.
///
/// Returns the number of elements smaller than `pivot`.
///
/// Partitioning is performed block-by-block in order to minimize the cost of branching operations.
/// This idea is presented in the [BlockQuicksort][pdf] paper.
///
/// [pdf]: http://drops.dagstuhl.de/opus/volltexte/2016/6389/pdf/LIPIcs-ESA-2016-38.pdf
fn partition_in_blocks<T, F>(v: &mut [T], pivot: &T, is_less: &F) -> usize
where
F: Fn(&T, &T) -> bool,
{
// Number of elements in a typical block.
const BLOCK: usize = 128;
// The partitioning algorithm repeats the following steps until completion:
//
// 1. Trace a block from the left side to identify elements greater than or equal to the pivot.
// 2. Trace a block from the right side to identify elements smaller than the pivot.
// 3. Exchange the identified elements between the left and right side.
//
// We keep the following variables for a block of elements:
//
// 1. `block` - Number of elements in the block.
// 2. `start` - Start pointer into the `offsets` array.
// 3. `end` - End pointer into the `offsets` array.
// 4. `offsets - Indices of out-of-order elements within the block.
// The current block on the left side (from `l` to `l.offset(block_l)`).
let mut l = v.as_mut_ptr();
let mut block_l = BLOCK;
let mut start_l = ptr::null_mut();
let mut end_l = ptr::null_mut();
let mut offsets_l: [u8; BLOCK] = unsafe { mem::uninitialized() };
// The current block on the right side (from `r.offset(-block_r)` to `r`).
let mut r = unsafe { l.offset(v.len() as isize) };
let mut block_r = BLOCK;
let mut start_r = ptr::null_mut();
let mut end_r = ptr::null_mut();
let mut offsets_r: [u8; BLOCK] = unsafe { mem::uninitialized() };
// Returns the number of elements between pointers `l` (inclusive) and `r` (exclusive).
fn width<T>(l: *mut T, r: *mut T) -> usize {
assert!(mem::size_of::<T>() > 0);
(r as usize - l as usize) / mem::size_of::<T>()
}
loop {
// We are done with partitioning block-by-block when `l` and `r` get very close. Then we do
// some patch-up work in order to partition the remaining elements in between.
let is_done = width(l, r) <= 2 * BLOCK;
if is_done {
// Number of remaining elements (still not compared to the pivot).
let mut rem = width(l, r);
if start_l < end_l || start_r < end_r {
rem -= BLOCK;
}
// Adjust block sizes so that the left and right block don't overlap, but get perfectly
// aligned to cover the whole remaining gap.
if start_l < end_l {
block_r = rem;
} else if start_r < end_r {
block_l = rem;
} else {
block_l = rem / 2;
block_r = rem - block_l;
}
debug_assert!(block_l <= BLOCK && block_r <= BLOCK);
debug_assert!(width(l, r) == block_l + block_r);
}
if start_l == end_l {
// Trace `block_l` elements from the left side.
start_l = offsets_l.as_mut_ptr();
end_l = offsets_l.as_mut_ptr();
let mut elem = l;
for i in 0..block_l {
unsafe {
// Branchless comparison.
*end_l = i as u8;
end_l = end_l.offset(!is_less(&*elem, pivot) as isize);
elem = elem.offset(1);
}
}
}
if start_r == end_r {
// Trace `block_r` elements from the right side.
start_r = offsets_r.as_mut_ptr();
end_r = offsets_r.as_mut_ptr();
let mut elem = r;
for i in 0..block_r {
unsafe {
// Branchless comparison.
elem = elem.offset(-1);
*end_r = i as u8;
end_r = end_r.offset(is_less(&*elem, pivot) as isize);
}
}
}
// Number of out-of-order elements to swap between the left and right side.
let count = cmp::min(width(start_l, end_l), width(start_r, end_r));
if count > 0 {
macro_rules! left { () => { l.offset(*start_l as isize) } }
macro_rules! right { () => { r.offset(-(*start_r as isize) - 1) } }
// Instead of swapping one pair at the time, it is more efficient to perform a cyclic
// permutation. This is not strictly equivalent to swapping, but produces a similar
// result using fewer memory operations.
unsafe {
let tmp = ptr::read(left!());
ptr::copy_nonoverlapping(right!(), left!(), 1);
for _ in 1..count {
start_l = start_l.offset(1);
ptr::copy_nonoverlapping(left!(), right!(), 1);
start_r = start_r.offset(1);
ptr::copy_nonoverlapping(right!(), left!(), 1);
}
ptr::copy_nonoverlapping(&tmp, right!(), 1);
mem::forget(tmp);
start_l = start_l.offset(1);
start_r = start_r.offset(1);
}
}
if start_l == end_l {
// All out-of-order elements in the left block were moved. Move to the next block.
l = unsafe { l.offset(block_l as isize) };
}
if start_r == end_r {
// All out-of-order elements in the right block were moved. Move to the previous block.
r = unsafe { r.offset(-(block_r as isize)) };
}
if is_done {
break;
}
}
// All that remains now is at most one block (either the left or the right) with out-of-order
// elements that need to be moved. Such remaining elements can be simply shifted to the end
// within their block.
if start_l < end_l {
// The left block remains.
// Move it's remaining out-of-order elements to the far right.
debug_assert_eq!(width(l, r), block_l);
while start_l < end_l {
unsafe {
end_l = end_l.offset(-1);
ptr::swap(l.offset(*end_l as isize), r.offset(-1));
r = r.offset(-1);
}
}
width(v.as_mut_ptr(), r)
} else if start_r < end_r {
// The right block remains.
// Move it's remaining out-of-order elements to the far left.
debug_assert_eq!(width(l, r), block_r);
while start_r < end_r {
unsafe {
end_r = end_r.offset(-1);
ptr::swap(l, r.offset(-(*end_r as isize) - 1));
l = l.offset(1);
}
}
width(v.as_mut_ptr(), l)
} else {
// Nothing else to do, we're done.
width(v.as_mut_ptr(), l)
}
}
/// Partitions `v` into elements smaller than `v[pivot]`, followed by elements greater than or
/// equal to `v[pivot]`.
///
/// Returns a tuple of:
///
/// 1. Number of elements smaller than `v[pivot]`.
/// 2. True if `v` was already partitioned.
fn partition<T, F>(v: &mut [T], pivot: usize, is_less: &F) -> (usize, bool)
where
F: Fn(&T, &T) -> bool,
{
let (mid, was_partitioned) = {
// Place the pivot at the beginning of slice.
v.swap(0, pivot);
let (pivot, v) = v.split_at_mut(1);
let pivot = &mut pivot[0];
// Read the pivot into a stack-allocated variable for efficiency. If a following comparison
// operation panics, the pivot will be automatically written back into the slice.
let write_on_drop = WriteOnDrop {
value: unsafe { Some(ptr::read(pivot)) },
dest: pivot,
};
let pivot = write_on_drop.value.as_ref().unwrap();
// Find the first pair of out-of-order elements.
let mut l = 0;
let mut r = v.len();
unsafe {
// Find the first element greater then or equal to the pivot.
while l < r && is_less(v.get_unchecked(l), pivot) {
l += 1;
}
// Find the last element smaller that the pivot.
while l < r && !is_less(v.get_unchecked(r - 1), pivot) {
r -= 1;
}
}
(
l + partition_in_blocks(&mut v[l..r], pivot, is_less),
l >= r,
)
// `write_on_drop` goes out of scope and writes the pivot (which is a stack-allocated
// variable) back into the slice where it originally was. This step is critical in ensuring
// safety!
};
// Place the pivot between the two partitions.
v.swap(0, mid);
(mid, was_partitioned)
}
/// Partitions `v` into elements equal to `v[pivot]` followed by elements greater than `v[pivot]`.
///
/// Returns the number of elements equal to the pivot. It is assumed that `v` does not contain
/// elements smaller than the pivot.
fn partition_equal<T, F>(v: &mut [T], pivot: usize, is_less: &F) -> usize
where
F: Fn(&T, &T) -> bool,
{
// Place the pivot at the beginning of slice.
v.swap(0, pivot);
let (pivot, v) = v.split_at_mut(1);
let pivot = &mut pivot[0];
// Read the pivot into a stack-allocated variable for efficiency. If a following comparison
// operation panics, the pivot will be automatically written back into the slice.
let write_on_drop = WriteOnDrop {
value: unsafe { Some(ptr::read(pivot)) },
dest: pivot,
};
let pivot = write_on_drop.value.as_ref().unwrap();
// Now partition the slice.
let mut l = 0;
let mut r = v.len();
loop {
unsafe {
// Find the first element greater that the pivot.
while l < r && !is_less(pivot, v.get_unchecked(l)) {
l += 1;
}
// Find the last element equal to the pivot.
while l < r && is_less(pivot, v.get_unchecked(r - 1)) {
r -= 1;
}
// Are we done?
if l >= r {
break;
}
// Swap the found pair of out-of-order elements.
r -= 1;
ptr::swap(v.get_unchecked_mut(l), v.get_unchecked_mut(r));
l += 1;
}
}
// We found `l` elements equal to the pivot. Add 1 to account for the pivot itself.
l + 1
// `write_on_drop` goes out of scope and writes the pivot (which is a stack-allocated variable)
// back into the slice where it originally was. This step is critical in ensuring safety!
}
/// Scatters some elements around in an attempt to break patterns that might cause imbalanced
/// partitions in quicksort.
#[cold]
fn break_patterns<T>(v: &mut [T]) {
let len = v.len();
if len >= 8 {
// Pseudorandom number generator from the "Xorshift RNGs" paper by George Marsaglia.
let mut random = len as u32;
let mut gen_u32 = || {
random ^= random << 13;
random ^= random >> 17;
random ^= random << 5;
random
};
let mut gen_usize = || if mem::size_of::<usize>() <= 4 {
gen_u32() as usize
} else {
(((gen_u32() as u64) << 32) | (gen_u32() as u64)) as usize
};
// Take random numbers modulo this number.
// The number fits into `usize` because `len` is not greater than `isize::MAX`.
let modulus = len.next_power_of_two();
// Some pivot candidates will be in the nearby of this index. Let's randomize them.
let pos = len / 4 * 2;
for i in 0..3 {
// Generate a random number modulo `len`. However, in order to avoid costly operations
// we first take it modulo a power of two, and then decrease by `len` until it fits
// into the range `[0, len - 1]`.
let mut other = gen_usize() & (modulus - 1);
// `other` is guaranteed to be less than `2 * len`.
if other >= len {
other -= len;
}
v.swap(pos - 1 + i, other);
}
}
}
/// Chooses a pivot in `v` and returns the index and `true` if the slice is likely already sorted.
///
/// Elements in `v` might be reordered in the process.
fn choose_pivot<T, F>(v: &mut [T], is_less: &F) -> (usize, bool)
where
F: Fn(&T, &T) -> bool,
{
// Minimum length to choose the median-of-medians method.
// Shorter slices use the simple median-of-three method.
const SHORTEST_MEDIAN_OF_MEDIANS: usize = 50;
// Maximum number of swaps that can be performed in this function.
const MAX_SWAPS: usize = 4 * 3;
let len = v.len();
// Three indices near which we are going to choose a pivot.
let mut a = len / 4 * 1;
let mut b = len / 4 * 2;
let mut c = len / 4 * 3;
// Counts the total number of swaps we are about to perform while sorting indices.
let mut swaps = 0;
if len >= 8 {
// Swaps indices so that `v[a] <= v[b]`.
let mut sort2 = |a: &mut usize, b: &mut usize| unsafe {
if is_less(v.get_unchecked(*b), v.get_unchecked(*a)) {
ptr::swap(a, b);
swaps += 1;
}
};
// Swaps indices so that `v[a] <= v[b] <= v[c]`.
let mut sort3 = |a: &mut usize, b: &mut usize, c: &mut usize| {
sort2(a, b);
sort2(b, c);
sort2(a, b);
};
if len >= SHORTEST_MEDIAN_OF_MEDIANS {
// Finds the median of `v[a - 1], v[a], v[a + 1]` and stores the index into `a`.
let mut sort_adjacent = |a: &mut usize| {
let tmp = *a;
sort3(&mut (tmp - 1), a, &mut (tmp + 1));
};
// Find medians in the neighborhoods of `a`, `b`, and `c`.
sort_adjacent(&mut a);
sort_adjacent(&mut b);
sort_adjacent(&mut c);
}
// Find the median among `a`, `b`, and `c`.
sort3(&mut a, &mut b, &mut c);
}
if swaps < MAX_SWAPS {
(b, swaps == 0)
} else {
// The maximum number of swaps was performed. Chances are the slice is descending or mostly
// descending, so reversing will probably help sort it faster.
v.reverse();
(len - 1 - b, true)
}
}
/// Sorts `v` recursively.
///
/// If the slice had a predecessor in the original array, it is specified as `pred`.
///
/// `limit` is the number of allowed imbalanced partitions before switching to `heapsort`. If zero,
/// this function will immediately switch to heapsort.
fn recurse<'a, T, F>(mut v: &'a mut [T], is_less: &F, mut pred: Option<&'a mut T>, mut limit: usize)
where
T: Send,
F: Fn(&T, &T) -> bool + Sync,
{
// Slices of up to this length get sorted using insertion sort.
const MAX_INSERTION: usize = 20;
// If both partitions are up to this length, we continue sequentially. This number is as small
// as possible but so that the overhead of Rayon's task scheduling is still negligible.
const MAX_SEQUENTIAL: usize = 2000;
// True if the last partitioning was reasonably balanced.
let mut was_balanced = true;
// True if the last partitioning didn't shuffle elements (the slice was already partitioned).
let mut was_partitioned = true;
loop {
let len = v.len();
// Very short slices get sorted using insertion sort.
if len <= MAX_INSERTION {
insertion_sort(v, is_less);
return;
}
// If too many bad pivot choices were made, simply fall back to heapsort in order to
// guarantee `O(n log n)` worst-case.
if limit == 0 {
heapsort(v, is_less);
return;
}
// If the last partitioning was imbalanced, try breaking patterns in the slice by shuffling
// some elements around. Hopefully we'll choose a better pivot this time.
if !was_balanced {
break_patterns(v);
limit -= 1;
}
// Choose a pivot and try guessing whether the slice is already sorted.
let (pivot, likely_sorted) = choose_pivot(v, is_less);
// If the last partitioning was decently balanced and didn't shuffle elements, and if pivot
// selection predicts the slice is likely already sorted...
if was_balanced && was_partitioned && likely_sorted {
// Try identifying several out-of-order elements and shifting them to correct
// positions. If the slice ends up being completely sorted, we're done.
if partial_insertion_sort(v, is_less) {
return;
}
}
// If the chosen pivot is equal to the predecessor, then it's the smallest element in the
// slice. Partition the slice into elements equal to and elements greater than the pivot.
// This case is usually hit when the slice contains many duplicate elements.
if let Some(ref p) = pred {
if !is_less(p, &v[pivot]) {
let mid = partition_equal(v, pivot, is_less);
// Continue sorting elements greater than the pivot.
v = &mut {v}[mid..];
continue;
}
}
// Partition the slice.
let (mid, was_p) = partition(v, pivot, is_less);
was_balanced = cmp::min(mid, len - mid) >= len / 8;
was_partitioned = was_p;
// Split the slice into `left`, `pivot`, and `right`.
let (left, right) = {v}.split_at_mut(mid);
let (pivot, right) = right.split_at_mut(1);
let pivot = &mut pivot[0];
if cmp::max(left.len(), right.len()) <= MAX_SEQUENTIAL {
// Recurse into the shorter side only in order to minimize the total number of recursive
// calls and consume less stack space. Then just continue with the longer side (this is
// akin to tail recursion).
if left.len() < right.len() {
recurse(left, is_less, pred, limit);
v = right;
pred = Some(pivot);
} else {
recurse(right, is_less, Some(pivot), limit);
v = left;
}
} else {
// Sort the left and right half in parallel.
rayon_core::join(
|| recurse(left, is_less, pred, limit),
|| recurse(right, is_less, Some(pivot), limit),
);
break;
}
}
}
/// Sorts `v` using pattern-defeating quicksort in parallel.
///
/// The algorithm is unstable, in-place, and `O(n log n)` worst-case.
pub fn par_quicksort<T, F>(v: &mut [T], is_less: F)
where
T: Send,
F: Fn(&T, &T) -> bool + Sync,
{
// Sorting has no meaningful behavior on zero-sized types.
if mem::size_of::<T>() == 0 {
return;
}
// Limit the number of imbalanced partitions to `floor(log2(len)) + 1`.
let limit = mem::size_of::<usize>() * 8 - v.len().leading_zeros() as usize;
recurse(v, &is_less, None, limit);
}
#[cfg(test)]
mod tests {
use rand::{thread_rng, Rng};
use super::heapsort;
#[test]
fn test_heapsort() {
let mut rng = thread_rng();
for len in (0..25).chain(500..501) {
for &modulus in &[5, 10, 100] {
for _ in 0..100 {
let v: Vec<_> = rng.gen_iter::<i32>()
.map(|x| x % modulus)
.take(len)
.collect();
// Test heapsort using `<` operator.
let mut tmp = v.clone();
heapsort(&mut tmp, &|a, b| a < b);
assert!(tmp.windows(2).all(|w| w[0] <= w[1]));
// Test heapsort using `>` operator.
let mut tmp = v.clone();
heapsort(&mut tmp, &|a, b| a > b);
assert!(tmp.windows(2).all(|w| w[0] >= w[1]));
}
}
}
// Sort using a completely random comparison function.
// This will reorder the elements *somehow*, but won't panic.
let mut v: Vec<_> = (0..100).collect();
heapsort(&mut v, &|_, _| thread_rng().gen());
heapsort(&mut v, &|a, b| a < b);
for i in 0..v.len() {
assert_eq!(v[i], i);
}
}
}

129
third_party/rust/rayon/src/slice/test.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,129 @@
#![cfg(test)]
use rand::{thread_rng, Rng};
use std::cmp::Ordering::{Equal, Greater, Less};
use std::mem;
use super::ParallelSliceMut;
macro_rules! sort {
($f:ident, $name:ident) => {
#[test]
fn $name() {
let mut rng = thread_rng();
for len in (0..25).chain(500..501) {
for &modulus in &[5, 10, 100] {
for _ in 0..100 {
let v: Vec<_> = rng.gen_iter::<i32>()
.map(|x| x % modulus)
.take(len)
.collect();
// Test sort using `<` operator.
let mut tmp = v.clone();
tmp.$f(|a, b| a.cmp(b));
assert!(tmp.windows(2).all(|w| w[0] <= w[1]));
// Test sort using `>` operator.
let mut tmp = v.clone();
tmp.$f(|a, b| b.cmp(a));
assert!(tmp.windows(2).all(|w| w[0] >= w[1]));
}
}
}
// Test sort with many duplicates.
for &len in &[1_000, 10_000, 100_000] {
for &modulus in &[5, 10, 100, 10_000] {
let mut v: Vec<_> = rng.gen_iter::<i32>()
.map(|x| x % modulus)
.take(len)
.collect();
v.$f(|a, b| a.cmp(b));
assert!(v.windows(2).all(|w| w[0] <= w[1]));
}
}
// Test sort with many pre-sorted runs.
for &len in &[1_000, 10_000, 100_000] {
for &modulus in &[5, 10, 1000, 50_000] {
let mut v: Vec<_> = rng.gen_iter::<i32>()
.map(|x| x % modulus)
.take(len)
.collect();
v.sort();
v.reverse();
for _ in 0..5 {
let a = rng.gen::<usize>() % len;
let b = rng.gen::<usize>() % len;
if a < b {
v[a..b].reverse();
} else {
v.swap(a, b);
}
}
v.$f(|a, b| a.cmp(b));
assert!(v.windows(2).all(|w| w[0] <= w[1]));
}
}
// Sort using a completely random comparison function.
// This will reorder the elements *somehow*, but won't panic.
let mut v: Vec<_> = (0..100).collect();
v.$f(|_, _| *thread_rng().choose(&[Less, Equal, Greater]).unwrap());
v.$f(|a, b| a.cmp(b));
for i in 0..v.len() {
assert_eq!(v[i], i);
}
// Should not panic.
[0i32; 0].$f(|a, b| a.cmp(b));
[(); 10].$f(|a, b| a.cmp(b));
[(); 100].$f(|a, b| a.cmp(b));
let mut v = [0xDEADBEEFu64];
v.$f(|a, b| a.cmp(b));
assert!(v == [0xDEADBEEF]);
}
}
}
sort!(par_sort_by, test_par_sort);
sort!(par_sort_unstable_by, test_par_sort_unstable);
#[test]
fn test_par_sort_stability() {
for len in (2..25).chain(500..510).chain(50_000..50_010) {
for _ in 0..10 {
let mut counts = [0; 10];
// Create a vector like [(6, 1), (5, 1), (6, 2), ...],
// where the first item of each tuple is random, but
// the second item represents which occurrence of that
// number this element is, i.e. the second elements
// will occur in sorted order.
let mut v: Vec<_> = (0..len)
.map(|_| {
let n = thread_rng().gen::<usize>() % 10;
counts[n] += 1;
(n, counts[n])
})
.collect();
// Only sort on the first element, so an unstable sort
// may mix up the counts.
v.par_sort_by(|&(a, _), &(b, _)| a.cmp(&b));
// This comparison includes the count (the second item
// of the tuple), so elements with equal first items
// will need to be ordered with increasing
// counts... i.e. exactly asserting that this sort is
// stable.
assert!(v.windows(2).all(|w| w[0] <= w[1]));
}
}
}

Просмотреть файл

@ -9,22 +9,22 @@ use rayon::prelude::*;
fn main() {
let v: Vec<_> = (0..100).map(Some).collect();
v.par_iter().chain(&v); //~ ERROR unused result
v.par_iter().cloned(); //~ ERROR unused result
v.par_iter().enumerate(); //~ ERROR unused result
v.par_iter().filter(|_| true); //~ ERROR unused result
v.par_iter().filter_map(|x| *x); //~ ERROR unused result
v.par_iter().flat_map(|x| *x); //~ ERROR unused result
v.par_iter().fold(|| 0, |x, _| x); //~ ERROR unused result
v.par_iter().fold_with(0, |x, _| x); //~ ERROR unused result
v.par_iter().inspect(|_| {}); //~ ERROR unused result
v.par_iter().map(|x| x); //~ ERROR unused result
v.par_iter().map_with(0, |_, x| x); //~ ERROR unused result
v.par_iter().rev(); //~ ERROR unused result
v.par_iter().skip(1); //~ ERROR unused result
v.par_iter().take(1); //~ ERROR unused result
v.par_iter().cloned().while_some(); //~ ERROR unused result
v.par_iter().with_max_len(1); //~ ERROR unused result
v.par_iter().with_min_len(1); //~ ERROR unused result
v.par_iter().zip(&v); //~ ERROR unused result
v.par_iter().chain(&v); //~ ERROR must be used
v.par_iter().cloned(); //~ ERROR must be used
v.par_iter().enumerate(); //~ ERROR must be used
v.par_iter().filter(|_| true); //~ ERROR must be used
v.par_iter().filter_map(|x| *x); //~ ERROR must be used
v.par_iter().flat_map(|x| *x); //~ ERROR must be used
v.par_iter().fold(|| 0, |x, _| x); //~ ERROR must be used
v.par_iter().fold_with(0, |x, _| x); //~ ERROR must be used
v.par_iter().inspect(|_| {}); //~ ERROR must be used
v.par_iter().map(|x| x); //~ ERROR must be used
v.par_iter().map_with(0, |_, x| x); //~ ERROR must be used
v.par_iter().rev(); //~ ERROR must be used
v.par_iter().skip(1); //~ ERROR must be used
v.par_iter().take(1); //~ ERROR must be used
v.par_iter().cloned().while_some(); //~ ERROR must be used
v.par_iter().with_max_len(1); //~ ERROR must be used
v.par_iter().with_min_len(1); //~ ERROR must be used
v.par_iter().zip(&v); //~ ERROR must be used
}

Просмотреть файл

@ -3,7 +3,6 @@ extern crate rayon;
use std::rc::Rc;
fn main() {
rayon::join(|| Rc::new(22), || Rc::new(23));
//~^ ERROR E0277
//~| ERROR E0277
rayon::join(|| Rc::new(22), || ()); //~ ERROR E0277
rayon::join(|| (), || Rc::new(23)); //~ ERROR E0277
}

164
third_party/rust/rayon/tests/run-pass/sort-panic-safe.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,164 @@
#[macro_use]
extern crate lazy_static;
extern crate rayon;
extern crate rand;
use rand::{thread_rng, Rng};
use rayon::*;
use rayon::prelude::*;
use std::cell::Cell;
use std::cmp::{self, Ordering};
use std::panic;
use std::sync::atomic::Ordering::Relaxed;
use std::sync::atomic::{ATOMIC_USIZE_INIT, AtomicUsize};
use std::thread;
static VERSIONS: AtomicUsize = ATOMIC_USIZE_INIT;
lazy_static! {
static ref DROP_COUNTS: Vec<AtomicUsize> = (0..20_000)
.map(|_| AtomicUsize::new(0))
.collect();
}
#[derive(Clone, Eq)]
struct DropCounter {
x: u32,
id: usize,
version: Cell<usize>,
}
impl PartialEq for DropCounter {
fn eq(&self, other: &Self) -> bool {
self.partial_cmp(other) == Some(Ordering::Equal)
}
}
impl PartialOrd for DropCounter {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.version.set(self.version.get() + 1);
other.version.set(other.version.get() + 1);
VERSIONS.fetch_add(2, Relaxed);
self.x.partial_cmp(&other.x)
}
}
impl Ord for DropCounter {
fn cmp(&self, other: &Self) -> Ordering {
self.partial_cmp(other).unwrap()
}
}
impl Drop for DropCounter {
fn drop(&mut self) {
DROP_COUNTS[self.id].fetch_add(1, Relaxed);
VERSIONS.fetch_sub(self.version.get(), Relaxed);
}
}
macro_rules! test {
($input:ident, $func:ident) => {
let len = $input.len();
// Work out the total number of comparisons required to sort
// this array...
let count = AtomicUsize::new(0);
$input.to_owned().$func(|a, b| {
count.fetch_add(1, Relaxed);
a.cmp(b)
});
let mut panic_countdown = count.load(Relaxed);
let step = if len <= 100 {
1
} else {
cmp::max(1, panic_countdown / 10)
};
// ... and then panic after each `step` comparisons.
loop {
// Refresh the counters.
VERSIONS.store(0, Relaxed);
for i in 0..len {
DROP_COUNTS[i].store(0, Relaxed);
}
let v = $input.to_owned();
let _ = thread::spawn(move || {
let mut v = v;
let panic_countdown = AtomicUsize::new(panic_countdown);
v.$func(|a, b| {
if panic_countdown.fetch_sub(1, Relaxed) == 1 {
SILENCE_PANIC.with(|s| s.set(true));
panic!();
}
a.cmp(b)
})
}).join();
// Check that the number of things dropped is exactly
// what we expect (i.e. the contents of `v`).
for (i, c) in DROP_COUNTS.iter().enumerate().take(len) {
let count = c.load(Relaxed);
assert!(count == 1,
"found drop count == {} for i == {}, len == {}",
count, i, len);
}
// Check that the most recent versions of values were dropped.
assert_eq!(VERSIONS.load(Relaxed), 0);
if panic_countdown < step {
break;
}
panic_countdown -= step;
}
}
}
thread_local!(static SILENCE_PANIC: Cell<bool> = Cell::new(false));
fn main() {
let prev = panic::take_hook();
panic::set_hook(Box::new(move |info| {
if !SILENCE_PANIC.with(|s| s.get()) {
prev(info);
}
}));
for &len in &[1, 2, 3, 4, 5, 10, 20, 100, 500, 5_000, 20_000] {
for &modulus in &[5, 30, 1_000, 20_000] {
for &has_runs in &[false, true] {
let mut rng = thread_rng();
let mut input = (0..len)
.map(|id| {
DropCounter {
x: rng.next_u32() % modulus,
id: id,
version: Cell::new(0),
}
})
.collect::<Vec<_>>();
if has_runs {
for c in &mut input {
c.x = c.id as u32;
}
for _ in 0..5 {
let a = rng.gen::<usize>() % len;
let b = rng.gen::<usize>() % len;
if a < b {
input[a..b].reverse();
} else {
input.swap(a, b);
}
}
}
test!(input, par_sort_by);
test!(input, par_sort_unstable_by);
}
}
}
}

10
toolkit/library/gtest/rust/Cargo.lock сгенерированный
Просмотреть файл

@ -1106,7 +1106,7 @@ dependencies = [
[[package]]
name = "rayon"
version = "0.8.1"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"rayon-core 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1321,7 +1321,7 @@ dependencies = [
"parking_lot 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
"pdqsort 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"precomputed-hash 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rayon 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rayon 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)",
"regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"selectors 0.19.0",
"servo_arc 0.0.1",
@ -1602,7 +1602,7 @@ dependencies = [
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"num-traits 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)",
"plane-split 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rayon 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rayon 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)",
"thread_profiler 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.36 (registry+https://github.com/rust-lang/crates.io-index)",
"webrender_api 0.52.0",
@ -1634,7 +1634,7 @@ dependencies = [
"bincode 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
"euclid 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)",
"gleam 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
"rayon 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rayon 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)",
"thread_profiler 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"webrender 0.52.0",
"webrender_api 0.52.0",
@ -1770,7 +1770,7 @@ dependencies = [
"checksum quasi_codegen 0.32.0 (registry+https://github.com/rust-lang/crates.io-index)" = "51b9e25fa23c044c1803f43ca59c98dac608976dd04ce799411edd58ece776d4"
"checksum quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e920b65c65f10b2ae65c831a81a073a89edd28c7cce89475bff467ab4167a"
"checksum rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "022e0636ec2519ddae48154b028864bdce4eaf7d35226ab8e65c611be97b189d"
"checksum rayon 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "705cf28d52a26a9ab548930a9a3d9799eb77cf84d66d7cc6e52fa222ca662424"
"checksum rayon 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b614fe08b6665cb9a231d07ac1364b0ef3cb3698f1239ee0c4c3a88a524f54c8"
"checksum rayon-core 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2c21a92a5dca958fb030787c1158446c6deb7f976399b72fa8074603f169e2a"
"checksum redox_syscall 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)" = "8dd35cc9a8bdec562c757e3d43c1526b5c6d2653e23e2315065bc25556550753"
"checksum regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1731164734096285ec2a5ec7fea5248ae2f5485b3feeb0115af4fda2183b2d1b"

10
toolkit/library/rust/Cargo.lock сгенерированный
Просмотреть файл

@ -1094,7 +1094,7 @@ dependencies = [
[[package]]
name = "rayon"
version = "0.8.1"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"rayon-core 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1313,7 +1313,7 @@ dependencies = [
"parking_lot 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
"pdqsort 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"precomputed-hash 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rayon 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rayon 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)",
"regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"selectors 0.19.0",
"servo_arc 0.0.1",
@ -1614,7 +1614,7 @@ dependencies = [
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"num-traits 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)",
"plane-split 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rayon 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rayon 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)",
"thread_profiler 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.36 (registry+https://github.com/rust-lang/crates.io-index)",
"webrender_api 0.52.0",
@ -1646,7 +1646,7 @@ dependencies = [
"bincode 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
"euclid 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)",
"gleam 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
"rayon 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rayon 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)",
"thread_profiler 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"webrender 0.52.0",
"webrender_api 0.52.0",
@ -1782,7 +1782,7 @@ dependencies = [
"checksum quasi_codegen 0.32.0 (registry+https://github.com/rust-lang/crates.io-index)" = "51b9e25fa23c044c1803f43ca59c98dac608976dd04ce799411edd58ece776d4"
"checksum quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e920b65c65f10b2ae65c831a81a073a89edd28c7cce89475bff467ab4167a"
"checksum rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "022e0636ec2519ddae48154b028864bdce4eaf7d35226ab8e65c611be97b189d"
"checksum rayon 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "705cf28d52a26a9ab548930a9a3d9799eb77cf84d66d7cc6e52fa222ca662424"
"checksum rayon 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b614fe08b6665cb9a231d07ac1364b0ef3cb3698f1239ee0c4c3a88a524f54c8"
"checksum rayon-core 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2c21a92a5dca958fb030787c1158446c6deb7f976399b72fa8074603f169e2a"
"checksum redox_syscall 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)" = "8dd35cc9a8bdec562c757e3d43c1526b5c6d2653e23e2315065bc25556550753"
"checksum regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1731164734096285ec2a5ec7fea5248ae2f5485b3feeb0115af4fda2183b2d1b"