Bug 1450162. Correctly merge in the presence of reorderings. r=mstange

Previously we were assuming that we'd always get the display list in
a canonical ordering. With retained display lists this assumption is false.
This commit is contained in:
Jeff Muizelaar 2018-04-04 18:46:27 -04:00
Родитель eb32c7230c
Коммит a3330d16d6
1 изменённых файлов: 57 добавлений и 18 удалений

Просмотреть файл

@ -4,11 +4,14 @@ use rayon::ThreadPool;
use std::collections::hash_map::HashMap;
use std::collections::hash_map;
use std::collections::btree_map::BTreeMap;
use std::collections::Bound::Included;
use std::mem;
use std::os::raw::c_void;
use std::ptr;
use std::sync::mpsc::{channel, Sender, Receiver};
use std::sync::Arc;
use std;
#[cfg(target_os = "windows")]
use dwrote;
@ -196,7 +199,7 @@ impl BlobWriter {
// XXX: Do we want to allow negative values here or clamp to the image bounds?
#[derive(Debug, Eq, PartialEq, Clone, Copy)]
#[derive(Debug, Eq, PartialEq, Clone, Copy, Ord, PartialOrd)]
struct Box2d {
x1: u32,
y1: u32,
@ -242,6 +245,51 @@ fn check_result(result: &[u8]) -> () {
}
}
// We use a BTree as a kind of multi-map, by appending an integer "cache_order" to the key.
// This lets us use multiple items with matching bounds in the map and allows
// us to fetch and remove them while retaining the ordering of the original list.
struct CachedReader<'a> {
reader: BlobReader<'a>,
cache: BTreeMap<(Box2d, u32), Entry>,
cache_index_counter: u32
}
impl<'a> CachedReader<'a> {
fn new(buf: &'a[u8]) -> CachedReader {
CachedReader{reader:BlobReader::new(buf), cache: BTreeMap::new(), cache_index_counter: 0 }
}
fn take_entry_with_bounds_from_cache(&mut self, bounds: &Box2d) -> Option<Entry> {
if self.cache.is_empty() {
return None;
}
let key_to_delete = match self.cache. range((Included((*bounds, 0u32)), Included((*bounds, std::u32::MAX)))).next() {
Some((&key, _)) => key,
None => return None,
};
Some(self.cache.remove(&key_to_delete).expect("We just got this key from range, it needs to be present"))
}
fn next_entry_with_bounds(&mut self, bounds: &Box2d, ignore_rect: &Box2d) -> Entry {
if let Some(entry) = self.take_entry_with_bounds_from_cache(bounds) {
return entry;
}
loop {
let old = self.reader.read_entry();
if old.bounds == *bounds {
return old;
} else if !old.bounds.contained_by(&ignore_rect) {
self.cache.insert((old.bounds, self.cache_index_counter), old);
self.cache_index_counter += 1;
}
}
}
}
/* Merge a new partial blob image into an existing complete blob image.
All of the items not fully contained in the dirty_rect should match
in both new and old lists.
@ -258,7 +306,7 @@ fn merge_blob_images(old_buf: &[u8], new_buf: &[u8], dirty_rect: Box2d) -> Vec<u
dlog!("new:");
dump_blob_index(new_buf, dirty_rect);
let mut old_reader = BlobReader::new(old_buf);
let mut old_reader = CachedReader::new(old_buf);
let mut new_reader = BlobReader::new(new_buf);
// Loop over both new and old entries merging them.
@ -271,29 +319,20 @@ fn merge_blob_images(old_buf: &[u8], new_buf: &[u8], dirty_rect: Box2d) -> Vec<u
if new.bounds.contained_by(&dirty_rect) {
result.new_entry(new.extra_end - new.end, new.bounds, &new_buf[new.begin..new.extra_end]);
} else {
loop {
assert!(old_reader.reader.has_more());
let old = old_reader.read_entry();
dlog!("new bounds: {} {} {:?}", old.end, old.extra_end, old.bounds);
if old.bounds.contained_by(&dirty_rect) {
// fully contained items will be discarded or replaced
} else {
assert_eq!(old.bounds, new.bounds);
// we found a matching item use the old data
result.new_entry(old.extra_end - old.end, old.bounds, &old_buf[old.begin..old.extra_end]);
break;
}
}
let old = old_reader.next_entry_with_bounds(&new.bounds, &dirty_rect);
result.new_entry(old.extra_end - old.end, new.bounds, &old_buf[old.begin..old.extra_end])
}
}
// Include any remaining old items.
while old_reader.reader.has_more() {
let old = old_reader.read_entry();
// Ensure all remaining items will be discarded
while old_reader.reader.reader.has_more() {
let old = old_reader.reader.read_entry();
dlog!("new bounds: {} {} {:?}", old.end, old.extra_end, old.bounds);
assert!(old.bounds.contained_by(&dirty_rect));
}
assert!(old_reader.cache.is_empty());
let result = result.finish();
check_result(&result);
result