Bug 1517072 - Update webrender to commit 7fc05244e1400acfde1d0a0a4a564e47dc2ef998 (WR PR #3455). r=kats

https://github.com/servo/webrender/pull/3455

Differential Revision: https://phabricator.services.mozilla.com/D15632

--HG--
extra : moz-landing-system : lando
This commit is contained in:
WR Updater Bot 2019-01-03 03:57:27 +00:00
Родитель e5ecdc6d96
Коммит c36fd12499
8 изменённых файлов: 483 добавлений и 374 удалений

Просмотреть файл

@ -1 +1 @@
a970cbaa88a7516758046c8be016c34627d31355
7fc05244e1400acfde1d0a0a4a564e47dc2ef998

Просмотреть файл

@ -20,7 +20,7 @@ use prim_store::{VisibleGradientTile, PrimitiveInstance, PrimitiveOpacity, Segme
use prim_store::{BrushSegment, ClipMaskKind, ClipTaskIndex};
use prim_store::image::ImageSource;
use render_backend::FrameResources;
use render_task::{RenderTaskAddress, RenderTaskId, RenderTaskTree};
use render_task::{RenderTaskAddress, RenderTaskId, RenderTaskTree, TileBlit};
use renderer::{BlendMode, ImageBufferKind, ShaderColorMode};
use renderer::BLOCKS_PER_UV_RECT;
use resource_cache::{CacheItem, GlyphFetchResult, ImageRequest, ResourceCache, ImageProperties};
@ -284,10 +284,16 @@ impl OpaqueBatchList {
pub struct BatchList {
pub alpha_batch_list: AlphaBatchList,
pub opaque_batch_list: OpaqueBatchList,
pub scissor_rect: Option<DeviceIntRect>,
pub tile_blits: Vec<TileBlit>,
}
impl BatchList {
pub fn new(screen_size: DeviceIntSize) -> Self {
pub fn new(
screen_size: DeviceIntSize,
scissor_rect: Option<DeviceIntRect>,
tile_blits: Vec<TileBlit>,
) -> Self {
// The threshold for creating a new batch is
// one quarter the screen size.
let batch_area_threshold = (screen_size.width * screen_size.height) as f32 / 4.0;
@ -295,6 +301,8 @@ impl BatchList {
BatchList {
alpha_batch_list: AlphaBatchList::new(),
opaque_batch_list: OpaqueBatchList::new(batch_area_threshold),
scissor_rect,
tile_blits,
}
}
@ -373,20 +381,29 @@ impl PrimitiveBatch {
pub struct AlphaBatchContainer {
pub opaque_batches: Vec<PrimitiveBatch>,
pub alpha_batches: Vec<PrimitiveBatch>,
pub target_rect: Option<DeviceIntRect>,
pub scissor_rect: Option<DeviceIntRect>,
pub tile_blits: Vec<TileBlit>,
}
impl AlphaBatchContainer {
pub fn new(target_rect: Option<DeviceIntRect>) -> AlphaBatchContainer {
pub fn new(
scissor_rect: Option<DeviceIntRect>,
) -> AlphaBatchContainer {
AlphaBatchContainer {
opaque_batches: Vec::new(),
alpha_batches: Vec::new(),
target_rect,
scissor_rect,
tile_blits: Vec::new(),
}
}
fn merge(&mut self, builder: AlphaBatchBuilder) {
for other_batch in builder.batch_list.opaque_batch_list.batches {
pub fn is_empty(&self) -> bool {
self.opaque_batches.is_empty() &&
self.alpha_batches.is_empty()
}
fn merge(&mut self, batch_list: BatchList) {
for other_batch in batch_list.opaque_batch_list.batches {
let batch_index = self.opaque_batches.iter().position(|batch| {
batch.key.is_compatible_with(&other_batch.key)
});
@ -403,7 +420,7 @@ impl AlphaBatchContainer {
let mut min_batch_index = 0;
for other_batch in builder.batch_list.alpha_batch_list.batches {
for other_batch in batch_list.alpha_batch_list.batches {
let batch_index = self.alpha_batches.iter().skip(min_batch_index).position(|batch| {
batch.key.is_compatible_with(&other_batch.key)
});
@ -433,38 +450,85 @@ struct SegmentInstanceData {
/// Encapsulates the logic of building batches for items that are blended.
pub struct AlphaBatchBuilder {
pub batch_list: BatchList,
pub batch_lists: Vec<BatchList>,
screen_size: DeviceIntSize,
scissor_rect: Option<DeviceIntRect>,
glyph_fetch_buffer: Vec<GlyphFetchResult>,
target_rect: DeviceIntRect,
can_merge: bool,
}
impl AlphaBatchBuilder {
pub fn new(
screen_size: DeviceIntSize,
target_rect: DeviceIntRect,
can_merge: bool,
scissor_rect: Option<DeviceIntRect>,
) -> Self {
let batch_lists = vec![
BatchList::new(
screen_size,
scissor_rect,
Vec::new(),
),
];
AlphaBatchBuilder {
batch_list: BatchList::new(screen_size),
batch_lists,
scissor_rect,
screen_size,
glyph_fetch_buffer: Vec::new(),
target_rect,
can_merge,
}
}
pub fn build(mut self, merged_batches: &mut AlphaBatchContainer) -> Option<AlphaBatchContainer> {
self.batch_list.finalize();
fn push_new_batch_list(
&mut self,
scissor_rect: Option<DeviceIntRect>,
tile_blits: Vec<TileBlit>,
) {
let scissor_rect = match (scissor_rect, self.scissor_rect) {
(Some(rect0), Some(rect1)) => {
Some(rect0.intersection(&rect1).unwrap_or(DeviceIntRect::zero()))
}
(Some(rect0), None) => Some(rect0),
(None, Some(rect1)) => Some(rect1),
(None, None) => None,
};
if self.can_merge {
merged_batches.merge(self);
None
self.batch_lists.push(BatchList::new(
self.screen_size,
scissor_rect,
tile_blits,
));
}
fn current_batch_list(&mut self) -> &mut BatchList {
self.batch_lists.last_mut().unwrap()
}
fn can_merge(&self) -> bool {
self.scissor_rect.is_none() &&
self.batch_lists.len() == 1
}
pub fn build(
mut self,
batch_containers: &mut Vec<AlphaBatchContainer>,
merged_batches: &mut AlphaBatchContainer,
) {
for batch_list in &mut self.batch_lists {
batch_list.finalize();
}
if self.can_merge() {
let batch_list = self.batch_lists.pop().unwrap();
debug_assert!(batch_list.tile_blits.is_empty());
merged_batches.merge(batch_list);
} else {
Some(AlphaBatchContainer {
alpha_batches: self.batch_list.alpha_batch_list.batches,
opaque_batches: self.batch_list.opaque_batch_list.batches,
target_rect: Some(self.target_rect),
})
for batch_list in self.batch_lists {
batch_containers.push(AlphaBatchContainer {
alpha_batches: batch_list.alpha_batch_list.batches,
opaque_batches: batch_list.opaque_batch_list.batches,
scissor_rect: batch_list.scissor_rect,
tile_blits: batch_list.tile_blits,
});
}
}
}
@ -595,7 +659,7 @@ impl AlphaBatchBuilder {
user_data: 0,
});
self.batch_list.push_single_instance(
self.current_batch_list().push_single_instance(
batch_key,
bounding_rect,
z_id,
@ -685,7 +749,7 @@ impl AlphaBatchBuilder {
// frames and display lists.
let prim_data = &ctx.resources.text_run_data_store[data_handle];
let glyph_fetch_buffer = &mut self.glyph_fetch_buffer;
let alpha_batch_list = &mut self.batch_list.alpha_batch_list;
let alpha_batch_list = &mut self.batch_lists.last_mut().unwrap().alpha_batch_list;
let prim_cache_address = gpu_cache.get_address(&prim_data.gpu_cache_handle);
let prim_header = PrimitiveHeader {
@ -865,7 +929,7 @@ impl AlphaBatchBuilder {
user_data: segment_user_data,
});
self.batch_list.push_single_instance(
self.current_batch_list().push_single_instance(
batch_key,
bounding_rect,
z_id,
@ -959,7 +1023,7 @@ impl AlphaBatchBuilder {
z_id,
);
self.batch_list.push_single_instance(
self.current_batch_list().push_single_instance(
key,
&prim_instance.bounding_rect.as_ref().expect("bug"),
z_id,
@ -978,105 +1042,124 @@ impl AlphaBatchBuilder {
Some(ref raster_config) => {
match raster_config.composite_mode {
PictureCompositeMode::TileCache { .. } => {
// Construct a local clip rect that ensures we only draw pixels where
// the local bounds of the picture extend to within the edge tiles.
let local_clip_rect = prim_instance
.combined_local_clip_rect
.intersection(&picture.local_rect)
.and_then(|rect| {
rect.intersection(&picture.local_clip_rect)
});
// Step through each tile in the cache, and draw it with an image
// brush primitive if visible.
if let Some(local_clip_rect) = local_clip_rect {
// Step through each tile in the cache, and draw it with an image
// brush primitive if visible.
let kind = BatchKind::Brush(
BrushBatchKind::Image(ImageBufferKind::Texture2DArray)
);
let tile_cache = picture.tile_cache.as_ref().unwrap();
// If there is a dirty rect for the tile cache, recurse into the
// main picture primitive list, and draw them first.
if let Some(_) = tile_cache.dirty_region {
self.add_pic_to_batch(
picture,
task_id,
ctx,
gpu_cache,
render_tasks,
deferred_resolves,
prim_headers,
transforms,
root_spatial_node_index,
z_generator,
);
}
// After drawing the dirty rect, now draw any of the valid tiles that
// will make up the rest of the scene.
// Generate a new z id for the tiles, that will place them *after*
// any opaque overdraw from the dirty rect above.
// TODO(gw): We should remove this hack, and also remove some
// (potential opaque) overdraw by adding support for
// setting a scissor rect for the dirty rect above.
let tile_zid = z_generator.next();
for tile_index in &tile_cache.tiles_to_draw {
let tile = &tile_cache.tiles[tile_index.0];
// Get the local rect of the tile.
let tile_rect = tile.local_rect;
// Construct a local clip rect that ensures we only draw pixels where
// the local bounds of the picture extend to within the edge tiles.
let local_clip_rect = prim_instance
.combined_local_clip_rect
.intersection(&picture.local_rect)
.expect("bug: invalid picture local rect");
let prim_header = PrimitiveHeader {
local_rect: tile_rect,
local_clip_rect,
task_address,
specific_prim_address: prim_cache_address,
clip_task_address,
transform_id,
};
let prim_header_index = prim_headers.push(&prim_header, tile_zid, [
ShaderColorMode::Image as i32 | ((AlphaType::PremultipliedAlpha as i32) << 16),
RasterizationSpace::Local as i32,
get_shader_opacity(1.0),
]);
let cache_item = ctx
.resource_cache
.get_texture_cache_item(&tile.handle);
let key = BatchKey::new(
kind,
BlendMode::None,
BatchTextures::color(cache_item.texture_id),
let kind = BatchKind::Brush(
BrushBatchKind::Image(ImageBufferKind::Texture2DArray)
);
let uv_rect_address = gpu_cache
.get_address(&cache_item.uv_rect_handle)
.as_int();
let tile_cache = picture.tile_cache.as_ref().unwrap();
let instance = BrushInstance {
prim_header_index,
clip_task_address,
segment_index: INVALID_SEGMENT_INDEX,
edge_flags: EdgeAaSegmentMask::empty(),
brush_flags: BrushFlags::empty(),
user_data: uv_rect_address,
};
for tile_index in &tile_cache.tiles_to_draw {
let tile = &tile_cache.tiles[tile_index.0];
// Instead of retrieving the batch once and adding each tile instance,
// use this API to get an appropriate batch for each tile, since
// the batch textures may be different. The batch list internally
// caches the current batch if the key hasn't changed.
let batch = self.batch_list.set_params_and_get_batch(
key,
bounding_rect,
tile_zid,
);
// Get the local rect of the tile.
let tile_rect = tile.local_rect;
batch.push(PrimitiveInstanceData::from(instance));
let prim_header = PrimitiveHeader {
local_rect: tile_rect,
local_clip_rect,
task_address,
specific_prim_address: prim_cache_address,
clip_task_address,
transform_id,
};
let prim_header_index = prim_headers.push(&prim_header, z_id, [
ShaderColorMode::Image as i32 | ((AlphaType::PremultipliedAlpha as i32) << 16),
RasterizationSpace::Local as i32,
get_shader_opacity(1.0),
]);
let cache_item = ctx
.resource_cache
.get_texture_cache_item(&tile.handle);
let key = BatchKey::new(
kind,
BlendMode::None,
BatchTextures::color(cache_item.texture_id),
);
let uv_rect_address = gpu_cache
.get_address(&cache_item.uv_rect_handle)
.as_int();
let instance = BrushInstance {
prim_header_index,
clip_task_address,
segment_index: INVALID_SEGMENT_INDEX,
edge_flags: EdgeAaSegmentMask::empty(),
brush_flags: BrushFlags::empty(),
user_data: uv_rect_address,
};
// Instead of retrieving the batch once and adding each tile instance,
// use this API to get an appropriate batch for each tile, since
// the batch textures may be different. The batch list internally
// caches the current batch if the key hasn't changed.
let batch = self.current_batch_list().set_params_and_get_batch(
key,
bounding_rect,
z_id,
);
batch.push(PrimitiveInstanceData::from(instance));
}
// If there is a dirty rect for the tile cache, recurse into the
// main picture primitive list, and draw them first.
if let Some(ref dirty_region) = tile_cache.dirty_region {
let mut tile_blits = Vec::new();
let (target_rect, _) = render_tasks[task_id].get_target_rect();
for blit in &tile_cache.pending_blits {
tile_blits.push(TileBlit {
dest_offset: blit.dest_offset,
size: blit.size,
target: blit.target.clone(),
src_offset: DeviceIntPoint::new(
blit.src_offset.x + target_rect.origin.x,
blit.src_offset.y + target_rect.origin.y,
),
})
}
self.push_new_batch_list(
Some(dirty_region.dirty_device_rect),
tile_blits,
);
self.add_pic_to_batch(
picture,
task_id,
ctx,
gpu_cache,
render_tasks,
deferred_resolves,
prim_headers,
transforms,
root_spatial_node_index,
z_generator,
);
self.push_new_batch_list(
None,
Vec::new(),
);
}
}
}
PictureCompositeMode::Filter(filter) => {
@ -1116,7 +1199,7 @@ impl AlphaBatchBuilder {
user_data: uv_rect_address.as_int(),
};
self.batch_list.push_single_instance(
self.current_batch_list().push_single_instance(
key,
bounding_rect,
z_id,
@ -1204,14 +1287,14 @@ impl AlphaBatchBuilder {
user_data: content_uv_rect_address,
};
self.batch_list.push_single_instance(
self.current_batch_list().push_single_instance(
shadow_key,
bounding_rect,
z_id_shadow,
PrimitiveInstanceData::from(shadow_instance),
);
self.batch_list.push_single_instance(
self.current_batch_list().push_single_instance(
content_key,
bounding_rect,
z_id_content,
@ -1289,7 +1372,7 @@ impl AlphaBatchBuilder {
user_data: 0,
};
self.batch_list.push_single_instance(
self.current_batch_list().push_single_instance(
key,
bounding_rect,
z_id,
@ -1334,7 +1417,7 @@ impl AlphaBatchBuilder {
user_data: 0,
};
self.batch_list.push_single_instance(
self.current_batch_list().push_single_instance(
key,
bounding_rect,
z_id,
@ -1374,7 +1457,7 @@ impl AlphaBatchBuilder {
user_data: uv_rect_address,
};
self.batch_list.push_single_instance(
self.current_batch_list().push_single_instance(
key,
bounding_rect,
z_id,
@ -1854,7 +1937,7 @@ impl AlphaBatchBuilder {
bounding_rect,
clip_task_address,
gpu_cache,
&mut self.batch_list,
self.current_batch_list(),
&prim_header,
prim_headers,
z_id,
@ -1935,7 +2018,7 @@ impl AlphaBatchBuilder {
bounding_rect,
clip_task_address,
gpu_cache,
&mut self.batch_list,
self.current_batch_list(),
&prim_header,
prim_headers,
z_id,
@ -1971,7 +2054,7 @@ impl AlphaBatchBuilder {
kind: BatchKind::Brush(batch_kind),
textures,
};
self.batch_list.push_single_instance(
self.current_batch_list().push_single_instance(
batch_key,
bounding_rect,
z_id,
@ -2031,7 +2114,7 @@ impl AlphaBatchBuilder {
textures: segment_data.textures,
};
self.batch_list.push_single_instance(
self.current_batch_list().push_single_instance(
batch_key,
bounding_rect,
z_id,
@ -2123,7 +2206,7 @@ impl AlphaBatchBuilder {
prim_header_index,
user_data: segment_data.user_data,
});
self.batch_list.push_single_instance(
self.current_batch_list().push_single_instance(
batch_key,
bounding_rect,
z_id,

Просмотреть файл

@ -339,11 +339,6 @@ impl FrameBuilder {
.surfaces[ROOT_SURFACE_INDEX.0]
.take_render_tasks();
let tile_blits = mem::replace(
&mut frame_state.surfaces[ROOT_SURFACE_INDEX.0].tile_blits,
Vec::new(),
);
let root_render_task = RenderTask::new_picture(
RenderTaskLocation::Fixed(self.screen_rect.to_i32()),
self.screen_rect.size.to_f32(),
@ -353,7 +348,6 @@ impl FrameBuilder {
UvRectKind::Rect,
root_spatial_node_index,
None,
tile_blits,
);
let render_task_id = frame_state.render_tasks.add(root_render_task);

Просмотреть файл

@ -4,7 +4,7 @@
use api::{DeviceRect, FilterOp, MixBlendMode, PipelineId, PremultipliedColorF, PictureRect, PicturePoint, WorldPoint};
use api::{DeviceIntRect, DevicePoint, LayoutRect, PictureToRasterTransform, LayoutPixel, PropertyBinding, PropertyBindingId};
use api::{DevicePixelScale, RasterRect, RasterSpace, ColorF, ImageKey, DirtyRect, WorldSize};
use api::{DevicePixelScale, RasterRect, RasterSpace, ColorF, ImageKey, DirtyRect, WorldSize, LayoutSize};
use api::{PicturePixel, RasterPixel, WorldPixel, WorldRect, ImageFormat, ImageDescriptor, WorldVector2D};
use box_shadow::{BLUR_SAMPLE_SCALE};
use clip::{ClipNodeCollector, ClipStore, ClipChainId, ClipChainNode};
@ -19,7 +19,7 @@ use gpu_cache::{GpuCache, GpuCacheAddress, GpuCacheHandle};
use gpu_types::{TransformPalette, TransformPaletteId, UvRectKind};
use plane_split::{Clipper, Polygon, Splitter};
use prim_store::{PictureIndex, PrimitiveInstance, SpaceMapper, VisibleFace, PrimitiveInstanceKind};
use prim_store::{get_raster_rects, CoordinateSpaceMapping};
use prim_store::{get_raster_rects, CoordinateSpaceMapping, VectorKey};
use prim_store::{OpacityBindingStorage, ImageInstanceStorage, OpacityBindingIndex};
use print_tree::PrintTreePrinter;
use render_backend::FrameResources;
@ -166,6 +166,11 @@ pub struct TileDescriptor {
/// to uniquely describe the content of the clip node.
clip_uids: ComparableVec<ItemUid>,
/// List of tile relative offsets of the clip node origins. This
/// ensures that if a clip node is supplied but has a different
/// transform between frames that the tile is invalidated.
clip_vertices: ComparableVec<VectorKey>,
/// List of image keys that this tile depends on.
image_keys: ComparableVec<ImageKey>,
@ -179,6 +184,7 @@ impl TileDescriptor {
TileDescriptor {
prims: ComparableVec::new(),
clip_uids: ComparableVec::new(),
clip_vertices: ComparableVec::new(),
opacity_bindings: ComparableVec::new(),
image_keys: ComparableVec::new(),
}
@ -189,6 +195,7 @@ impl TileDescriptor {
fn clear(&mut self) {
self.prims.reset();
self.clip_uids.reset();
self.clip_vertices.reset();
self.opacity_bindings.reset();
self.image_keys.reset();
}
@ -198,6 +205,7 @@ impl TileDescriptor {
self.image_keys.is_valid() &&
self.opacity_bindings.is_valid() &&
self.clip_uids.is_valid() &&
self.clip_vertices.is_valid() &&
self.prims.is_valid()
}
}
@ -207,7 +215,8 @@ impl TileDescriptor {
/// regions.
#[derive(Debug)]
pub struct DirtyRegion {
dirty_world_rect: WorldRect,
pub dirty_world_rect: WorldRect,
pub dirty_device_rect: DeviceIntRect,
}
/// Represents a cache of tiles that make up a picture primitives.
@ -244,7 +253,7 @@ pub struct TileCache {
/// a new scene arrives.
scroll_offset: Option<WorldVector2D>,
/// A list of blits from the framebuffer to be applied during this frame.
pending_blits: Vec<TileBlit>,
pub pending_blits: Vec<TileBlit>,
/// Collects the clips that apply to this surface.
clip_node_collector: ClipNodeCollector,
}
@ -614,8 +623,8 @@ impl TileCache {
// Build the list of resources that this primitive has dependencies on.
let mut opacity_bindings: SmallVec<[PropertyBindingId; 4]> = SmallVec::new();
let mut clip_chain_spatial_nodes: SmallVec<[SpatialNodeIndex; 8]> = SmallVec::new();
let mut clip_chain_uids: SmallVec<[ItemUid; 8]> = SmallVec::new();
let mut clip_vertices: SmallVec<[WorldPoint; 8]> = SmallVec::new();
let mut image_keys: SmallVec<[ImageKey; 8]> = SmallVec::new();
let mut current_clip_chain_id = prim_instance.clip_chain_id;
@ -686,7 +695,25 @@ impl TileCache {
if clip_chain_node.spatial_node_index < self.spatial_node_index {
self.clip_node_collector.insert(current_clip_chain_id)
} else {
clip_chain_spatial_nodes.push(clip_chain_node.spatial_node_index);
// TODO(gw): Constructing a rect here rather than mapping a point
// is wasteful. We can optimize this by extending the
// SpaceMapper struct to support mapping a point.
let local_rect = LayoutRect::new(
clip_chain_node.local_pos,
LayoutSize::zero(),
);
self.map_local_to_world.set_target_spatial_node(
clip_chain_node.spatial_node_index,
clip_scroll_tree,
);
let clip_world_rect = self
.map_local_to_world
.map(&local_rect)
.expect("bug: unable to map clip rect to world");
clip_vertices.push(clip_world_rect.origin);
clip_chain_uids.push(clip_chain_node.handle.uid());
}
current_clip_chain_id = clip_chain_node.parent_clip_chain_id;
@ -735,6 +762,17 @@ impl TileCache {
clip_count: clip_chain_uids.len() as u16,
});
tile.descriptor.clip_uids.extend_from_slice(&clip_chain_uids);
// Store tile relative clip vertices.
// TODO(gw): We might need to quantize these to avoid
// invalidations due to FP accuracy.
for clip_vertex in &clip_vertices {
let clip_vertex = VectorKey {
x: clip_vertex.x - tile.world_rect.origin.x,
y: clip_vertex.y - tile.world_rect.origin.y,
};
tile.descriptor.clip_vertices.push(clip_vertex);
}
}
}
}
@ -749,9 +787,12 @@ impl TileCache {
clip_store: &ClipStore,
frame_context: &FrameBuildingContext,
resources: &FrameResources,
) {
) -> LayoutRect {
let mut dirty_world_rect = WorldRect::zero();
self.dirty_region = None;
self.pending_blits.clear();
let descriptor = ImageDescriptor::new(
TILE_SIZE_WIDTH,
TILE_SIZE_HEIGHT,
@ -770,12 +811,23 @@ impl TileCache {
frame_context.clip_scroll_tree,
) {
Some(clip_rect) => clip_rect,
None => return,
None => return LayoutRect::zero(),
};
let map_surface_to_world: SpaceMapper<LayoutPixel, WorldPixel> = SpaceMapper::new_with_target(
ROOT_SPATIAL_NODE_INDEX,
self.spatial_node_index,
frame_context.screen_world_rect,
frame_context.clip_scroll_tree,
);
let local_clip_rect = map_surface_to_world
.unmap(&clip_rect)
.expect("bug: unable to map local clip rect");
let clip_rect = match clip_rect.intersection(&frame_context.screen_world_rect) {
Some(clip_rect) => clip_rect,
None => return,
None => return LayoutRect::zero(),
};
let clipped = (clip_rect * frame_context.device_pixel_scale).round().to_i32();
@ -872,10 +924,14 @@ impl TileCache {
self.dirty_region = if dirty_world_rect.is_empty() {
None
} else {
let dirty_device_rect = (dirty_world_rect * frame_context.device_pixel_scale).round().to_i32();
Some(DirtyRegion {
dirty_world_rect,
dirty_device_rect,
})
};
local_clip_rect
}
}
@ -987,8 +1043,6 @@ pub struct SurfaceInfo {
pub tasks: Vec<RenderTaskId>,
/// How much the local surface rect should be inflated (for blur radii).
pub inflation_factor: f32,
/// A list of tile blits to be done after drawing this surface.
pub tile_blits: Vec<TileBlit>,
}
impl SurfaceInfo {
@ -1023,7 +1077,6 @@ impl SurfaceInfo {
surface_spatial_node_index,
tasks: Vec::new(),
inflation_factor,
tile_blits: Vec::new(),
}
}
@ -1533,7 +1586,12 @@ impl PicturePrimitive {
}
};
if self.raster_config.is_some() {
// Don't bother pushing a clip node collector for a tile cache, it's not
// actually an off-screen surface.
// TODO(gw): The way this is handled via the picture composite mode is not
// ideal - we should fix this up and then be able to remove hacks
// like this.
if self.raster_config.is_some() && self.tile_cache.is_none() {
frame_state.clip_store
.push_surface(surface_spatial_node_index);
}
@ -1627,6 +1685,13 @@ impl PicturePrimitive {
self.prim_list = prim_list;
self.state = Some((state, context));
// Don't bother popping a clip node collector for a tile cache, it's not
// actually an off-screen surface (see comment when pushing surface for
// more information).
if self.tile_cache.is_some() {
return None;
}
self.raster_config.as_ref().map(|_| {
frame_state.clip_store.pop_surface()
})
@ -2074,13 +2139,10 @@ impl PicturePrimitive {
let surface = match raster_config.composite_mode {
PictureCompositeMode::TileCache { .. } => {
let tile_cache = self.tile_cache.as_mut().unwrap();
// For a picture surface, just push any child tasks and tile
// blits up to the parent surface.
let surface = &mut surfaces[surface_index.0];
surface.tasks.extend(child_tasks);
surface.tile_blits.extend(tile_cache.pending_blits.drain(..));
return true;
}
@ -2145,7 +2207,6 @@ impl PicturePrimitive {
uv_rect_kind,
pic_context.raster_spatial_node_index,
None,
Vec::new(),
);
let picture_task_id = frame_state.render_tasks.add(picture_task);
@ -2203,7 +2264,6 @@ impl PicturePrimitive {
uv_rect_kind,
pic_context.raster_spatial_node_index,
None,
Vec::new(),
);
let picture_task_id = render_tasks.add(picture_task);
@ -2261,7 +2321,6 @@ impl PicturePrimitive {
uv_rect_kind,
pic_context.raster_spatial_node_index,
None,
Vec::new(),
);
picture_task.mark_for_saving();
@ -2328,7 +2387,6 @@ impl PicturePrimitive {
uv_rect_kind,
pic_context.raster_spatial_node_index,
None,
Vec::new(),
);
let readback_task_id = frame_state.render_tasks.add(
@ -2368,7 +2426,6 @@ impl PicturePrimitive {
uv_rect_kind,
pic_context.raster_spatial_node_index,
None,
Vec::new(),
);
let render_task_id = frame_state.render_tasks.add(picture_task);
@ -2400,7 +2457,6 @@ impl PicturePrimitive {
uv_rect_kind,
pic_context.raster_spatial_node_index,
None,
Vec::new(),
);
let render_task_id = frame_state.render_tasks.add(picture_task);

Просмотреть файл

@ -505,8 +505,8 @@ impl SizeKey {
#[cfg_attr(feature = "replay", derive(Deserialize))]
#[derive(Copy, Debug, Clone, PartialEq)]
pub struct VectorKey {
x: f32,
y: f32,
pub x: f32,
pub y: f32,
}
impl Eq for VectorKey {}
@ -1731,7 +1731,7 @@ impl PrimitiveStore {
let mut tile_cache = state.tile_cache.take().unwrap();
// Build the dirty region(s) for this tile cache.
tile_cache.post_update(
pic.local_clip_rect = tile_cache.post_update(
resource_cache,
gpu_cache,
clip_store,
@ -3248,6 +3248,11 @@ pub fn get_raster_rects(
device_pixel_scale,
);
// Ensure that we won't try to allocate a zero-sized clip render task.
if clipped.is_empty() {
return None;
}
Some((clipped.to_i32(), unclipped))
}

Просмотреть файл

@ -270,7 +270,6 @@ pub struct PictureTask {
pub uv_rect_handle: GpuCacheHandle,
pub root_spatial_node_index: SpatialNodeIndex,
uv_rect_kind: UvRectKind,
pub blits: Vec<TileBlit>,
}
#[derive(Debug)]
@ -434,7 +433,6 @@ impl RenderTask {
uv_rect_kind: UvRectKind,
root_spatial_node_index: SpatialNodeIndex,
clear_color: Option<ColorF>,
blits: Vec<TileBlit>,
) -> Self {
let size = match location {
RenderTaskLocation::Dynamic(_, size) => size,
@ -462,7 +460,6 @@ impl RenderTask {
uv_rect_handle: GpuCacheHandle::new(),
uv_rect_kind,
root_spatial_node_index,
blits,
}),
clear_mode,
saved_index: None,

Просмотреть файл

@ -3280,32 +3280,30 @@ impl Renderer {
self.handle_scaling(&target.scalings, TextureSource::PrevPassColor, projection, stats);
//TODO: record the pixel count for cached primitives
for alpha_batch_container in &target.alpha_batch_containers {
if let Some(scissor_rect) = alpha_batch_container.scissor_rect {
// Note: `framebuffer_target_rect` needs a Y-flip before going to GL
let rect = if draw_target.is_default() {
let mut rect = scissor_rect
.intersection(&framebuffer_target_rect.to_i32())
.unwrap_or(DeviceIntRect::zero());
rect.origin.y = draw_target.dimensions().height as i32 - rect.origin.y - rect.size.height;
rect
} else {
scissor_rect
};
self.device.enable_scissor();
self.device.set_scissor_rect(rect);
}
if target.needs_depth() {
let _gl = self.gpu_profile.start_marker("opaque batches");
let opaque_sampler = self.gpu_profile.start_sampler(GPU_SAMPLER_TAG_OPAQUE);
self.set_blend(false, framebuffer_kind);
//Note: depth equality is needed for split planes
self.device.set_depth_func(DepthFunction::LessEqual);
self.device.enable_depth();
self.device.enable_depth_write();
for alpha_batch_container in &target.alpha_batch_containers {
if let Some(target_rect) = alpha_batch_container.target_rect {
// Note: `framebuffer_target_rect` needs a Y-flip before going to GL
let rect = if draw_target.is_default() {
let mut rect = target_rect
.intersection(&framebuffer_target_rect.to_i32())
.unwrap_or(DeviceIntRect::zero());
rect.origin.y = draw_target.dimensions().height as i32 - rect.origin.y - rect.size.height;
rect
} else {
target_rect
};
self.device.enable_scissor();
self.device.set_scissor_rect(rect);
}
if !alpha_batch_container.opaque_batches.is_empty() {
let _gl = self.gpu_profile.start_marker("opaque batches");
let opaque_sampler = self.gpu_profile.start_sampler(GPU_SAMPLER_TAG_OPAQUE);
self.set_blend(false, framebuffer_kind);
//Note: depth equality is needed for split planes
self.device.set_depth_func(DepthFunction::LessEqual);
self.device.enable_depth();
self.device.enable_depth_write();
// Draw opaque batches front-to-back for maximum
// z-buffer efficiency!
@ -3330,132 +3328,160 @@ impl Renderer {
);
}
if alpha_batch_container.target_rect.is_some() {
self.device.disable_scissor();
}
self.device.disable_depth_write();
self.gpu_profile.finish_sampler(opaque_sampler);
}
self.device.disable_depth_write();
self.gpu_profile.finish_sampler(opaque_sampler);
}
if !alpha_batch_container.alpha_batches.is_empty() {
let _gl = self.gpu_profile.start_marker("alpha batches");
let transparent_sampler = self.gpu_profile.start_sampler(GPU_SAMPLER_TAG_TRANSPARENT);
self.set_blend(true, framebuffer_kind);
let mut prev_blend_mode = BlendMode::None;
let _gl = self.gpu_profile.start_marker("alpha batches");
let transparent_sampler = self.gpu_profile.start_sampler(GPU_SAMPLER_TAG_TRANSPARENT);
self.set_blend(true, framebuffer_kind);
let mut prev_blend_mode = BlendMode::None;
for batch in &alpha_batch_container.alpha_batches {
self.shaders.borrow_mut()
.get(&batch.key, self.debug_flags)
.bind(
&mut self.device, projection,
&mut self.renderer_errors,
);
for alpha_batch_container in &target.alpha_batch_containers {
if let Some(target_rect) = alpha_batch_container.target_rect {
// Note: `framebuffer_target_rect` needs a Y-flip before going to GL
let rect = if draw_target.is_default() {
let mut rect = target_rect
.intersection(&framebuffer_target_rect.to_i32())
.unwrap_or(DeviceIntRect::zero());
rect.origin.y = draw_target.dimensions().height as i32 - rect.origin.y - rect.size.height;
rect
} else {
target_rect
};
self.device.enable_scissor();
self.device.set_scissor_rect(rect);
}
for batch in &alpha_batch_container.alpha_batches {
self.shaders.borrow_mut()
.get(&batch.key, self.debug_flags)
.bind(
&mut self.device, projection,
&mut self.renderer_errors,
);
if batch.key.blend_mode != prev_blend_mode {
match batch.key.blend_mode {
_ if self.debug_flags.contains(DebugFlags::SHOW_OVERDRAW) &&
framebuffer_kind == FramebufferKind::Main => {
self.device.set_blend_mode_show_overdraw();
}
BlendMode::None => {
unreachable!("bug: opaque blend in alpha pass");
}
BlendMode::Alpha => {
self.device.set_blend_mode_alpha();
}
BlendMode::PremultipliedAlpha => {
self.device.set_blend_mode_premultiplied_alpha();
}
BlendMode::PremultipliedDestOut => {
self.device.set_blend_mode_premultiplied_dest_out();
}
BlendMode::SubpixelDualSource => {
self.device.set_blend_mode_subpixel_dual_source();
}
BlendMode::SubpixelConstantTextColor(color) => {
self.device.set_blend_mode_subpixel_constant_text_color(color);
}
BlendMode::SubpixelWithBgColor => {
// Using the three pass "component alpha with font smoothing
// background color" rendering technique:
//
// /webrender/doc/text-rendering.md
//
self.device.set_blend_mode_subpixel_with_bg_color_pass0();
self.device.switch_mode(ShaderColorMode::SubpixelWithBgColorPass0 as _);
if batch.key.blend_mode != prev_blend_mode {
match batch.key.blend_mode {
_ if self.debug_flags.contains(DebugFlags::SHOW_OVERDRAW) &&
framebuffer_kind == FramebufferKind::Main => {
self.device.set_blend_mode_show_overdraw();
}
BlendMode::None => {
unreachable!("bug: opaque blend in alpha pass");
}
BlendMode::Alpha => {
self.device.set_blend_mode_alpha();
}
BlendMode::PremultipliedAlpha => {
self.device.set_blend_mode_premultiplied_alpha();
}
BlendMode::PremultipliedDestOut => {
self.device.set_blend_mode_premultiplied_dest_out();
}
BlendMode::SubpixelDualSource => {
self.device.set_blend_mode_subpixel_dual_source();
}
BlendMode::SubpixelConstantTextColor(color) => {
self.device.set_blend_mode_subpixel_constant_text_color(color);
}
BlendMode::SubpixelWithBgColor => {
// Using the three pass "component alpha with font smoothing
// background color" rendering technique:
//
// /webrender/doc/text-rendering.md
//
self.device.set_blend_mode_subpixel_with_bg_color_pass0();
self.device.switch_mode(ShaderColorMode::SubpixelWithBgColorPass0 as _);
}
}
prev_blend_mode = batch.key.blend_mode;
}
prev_blend_mode = batch.key.blend_mode;
}
// Handle special case readback for composites.
if let BatchKind::Brush(BrushBatchKind::MixBlend { task_id, source_id, backdrop_id }) = batch.key.kind {
// composites can't be grouped together because
// they may overlap and affect each other.
debug_assert_eq!(batch.instances.len(), 1);
self.handle_readback_composite(
draw_target,
alpha_batch_container.target_rect,
&render_tasks[source_id],
&render_tasks[task_id],
&render_tasks[backdrop_id],
// Handle special case readback for composites.
if let BatchKind::Brush(BrushBatchKind::MixBlend { task_id, source_id, backdrop_id }) = batch.key.kind {
// composites can't be grouped together because
// they may overlap and affect each other.
debug_assert_eq!(batch.instances.len(), 1);
self.handle_readback_composite(
draw_target,
alpha_batch_container.scissor_rect,
&render_tasks[source_id],
&render_tasks[task_id],
&render_tasks[backdrop_id],
);
}
let _timer = self.gpu_profile.start_timer(batch.key.kind.sampler_tag());
self.draw_instanced_batch(
&batch.instances,
VertexArrayKind::Primitive,
&batch.key.textures,
stats
);
if batch.key.blend_mode == BlendMode::SubpixelWithBgColor {
self.set_blend_mode_subpixel_with_bg_color_pass1(framebuffer_kind);
self.device.switch_mode(ShaderColorMode::SubpixelWithBgColorPass1 as _);
// When drawing the 2nd and 3rd passes, we know that the VAO, textures etc
// are all set up from the previous draw_instanced_batch call,
// so just issue a draw call here to avoid re-uploading the
// instances and re-binding textures etc.
self.device
.draw_indexed_triangles_instanced_u16(6, batch.instances.len() as i32);
self.set_blend_mode_subpixel_with_bg_color_pass2(framebuffer_kind);
self.device.switch_mode(ShaderColorMode::SubpixelWithBgColorPass2 as _);
self.device
.draw_indexed_triangles_instanced_u16(6, batch.instances.len() as i32);
prev_blend_mode = BlendMode::None;
}
}
let _timer = self.gpu_profile.start_timer(batch.key.kind.sampler_tag());
self.draw_instanced_batch(
&batch.instances,
VertexArrayKind::Primitive,
&batch.key.textures,
stats
);
if batch.key.blend_mode == BlendMode::SubpixelWithBgColor {
self.set_blend_mode_subpixel_with_bg_color_pass1(framebuffer_kind);
self.device.switch_mode(ShaderColorMode::SubpixelWithBgColorPass1 as _);
// When drawing the 2nd and 3rd passes, we know that the VAO, textures etc
// are all set up from the previous draw_instanced_batch call,
// so just issue a draw call here to avoid re-uploading the
// instances and re-binding textures etc.
self.device
.draw_indexed_triangles_instanced_u16(6, batch.instances.len() as i32);
self.set_blend_mode_subpixel_with_bg_color_pass2(framebuffer_kind);
self.device.switch_mode(ShaderColorMode::SubpixelWithBgColorPass2 as _);
self.device
.draw_indexed_triangles_instanced_u16(6, batch.instances.len() as i32);
prev_blend_mode = BlendMode::None;
}
self.device.disable_depth();
self.set_blend(false, framebuffer_kind);
self.gpu_profile.finish_sampler(transparent_sampler);
}
if alpha_batch_container.target_rect.is_some() {
if alpha_batch_container.scissor_rect.is_some() {
self.device.disable_scissor();
}
}
self.device.disable_depth();
self.set_blend(false, framebuffer_kind);
self.gpu_profile.finish_sampler(transparent_sampler);
// At the end of rendering a container, blit across any cache tiles
// to the texture cache for use on subsequent frames.
if !alpha_batch_container.tile_blits.is_empty() {
let _timer = self.gpu_profile.start_timer(GPU_TAG_BLIT);
self.device.bind_read_target(draw_target.into());
for blit in &alpha_batch_container.tile_blits {
let texture = self.texture_resolver
.resolve(&blit.target.texture_id)
.expect("BUG: invalid target texture");
self.device.bind_draw_target(DrawTarget::Texture {
texture,
layer: blit.target.texture_layer as usize,
with_depth: false,
});
let mut src_rect = DeviceIntRect::new(
blit.src_offset,
blit.size,
);
let target_rect = blit.target.uv_rect.to_i32();
let mut dest_rect = DeviceIntRect::new(
DeviceIntPoint::new(
blit.dest_offset.x + target_rect.origin.x,
blit.dest_offset.y + target_rect.origin.y,
),
blit.size,
);
// Modify the src/dest rects since we are blitting from the framebuffer
src_rect.origin.y = draw_target.dimensions().height as i32 - src_rect.size.height - src_rect.origin.y;
dest_rect.origin.y += dest_rect.size.height;
dest_rect.size.height = -dest_rect.size.height;
self.device.blit_render_target(
src_rect,
dest_rect,
);
}
self.device.bind_draw_target(draw_target);
}
}
// For any registered image outputs on this render target,
// get the texture from caller and blit it.
@ -3492,54 +3518,6 @@ impl Renderer {
handler.unlock(output.pipeline_id);
}
}
// At the end of rendering a target, blit across any cache tiles
// to the texture cache for use on subsequent frames.
if !target.tile_blits.is_empty() {
let _timer = self.gpu_profile.start_timer(GPU_TAG_BLIT);
self.device.bind_read_target(draw_target.into());
for blit in &target.tile_blits {
let texture = self.texture_resolver
.resolve(&blit.target.texture_id)
.expect("BUG: invalid target texture");
self.device.bind_draw_target(DrawTarget::Texture {
texture,
layer: blit.target.texture_layer as usize,
with_depth: false,
});
let mut src_rect = DeviceIntRect::new(
blit.src_offset,
blit.size,
);
let target_rect = blit.target.uv_rect.to_i32();
let mut dest_rect = DeviceIntRect::new(
DeviceIntPoint::new(
blit.dest_offset.x + target_rect.origin.x,
blit.dest_offset.y + target_rect.origin.y,
),
blit.size,
);
// Modify the src/dest rects since we are blitting from the framebuffer
src_rect.origin.y = draw_target.dimensions().height as i32 - src_rect.size.height - src_rect.origin.y;
dest_rect.origin.y += dest_rect.size.height;
dest_rect.size.height = -dest_rect.size.height;
self.device.blit_render_target(
src_rect,
dest_rect,
);
}
self.device.bind_draw_target(draw_target);
}
}
fn draw_alpha_target(

Просмотреть файл

@ -21,7 +21,7 @@ use picture::SurfaceInfo;
use prim_store::{PrimitiveStore, DeferredResolve, PrimitiveScratchBuffer};
use profiler::FrameProfileCounters;
use render_backend::{FrameId, FrameResources};
use render_task::{BlitSource, RenderTaskAddress, RenderTaskId, RenderTaskKind, TileBlit};
use render_task::{BlitSource, RenderTaskAddress, RenderTaskId, RenderTaskKind};
use render_task::{BlurTask, ClearMode, GlyphTask, RenderTaskLocation, RenderTaskTree, ScalingTask};
use resource_cache::ResourceCache;
use std::{cmp, usize, f32, i32, mem};
@ -343,7 +343,6 @@ pub struct ColorRenderTarget {
pub blits: Vec<BlitJob>,
// List of frame buffer outputs for this render target.
pub outputs: Vec<FrameOutput>,
pub tile_blits: Vec<TileBlit>,
pub color_clears: Vec<RenderTaskId>,
alpha_tasks: Vec<RenderTaskId>,
screen_size: DeviceIntSize,
@ -365,7 +364,6 @@ impl RenderTarget for ColorRenderTarget {
outputs: Vec::new(),
alpha_tasks: Vec::new(),
color_clears: Vec::new(),
tile_blits: Vec::new(),
screen_size,
used_rect: DeviceIntRect::zero(),
}
@ -403,10 +401,15 @@ impl RenderTarget for ColorRenderTarget {
let (target_rect, _) = task.get_target_rect();
let scisor_rect = if pic_task.can_merge {
None
} else {
Some(target_rect)
};
let mut batch_builder = AlphaBatchBuilder::new(
self.screen_size,
target_rect,
pic_task.can_merge,
scisor_rect,
);
batch_builder.add_pic_to_batch(
@ -422,21 +425,10 @@ impl RenderTarget for ColorRenderTarget {
z_generator,
);
for blit in &pic_task.blits {
self.tile_blits.push(TileBlit {
dest_offset: blit.dest_offset,
size: blit.size,
target: blit.target.clone(),
src_offset: DeviceIntPoint::new(
blit.src_offset.x + target_rect.origin.x,
blit.src_offset.y + target_rect.origin.y,
),
})
}
if let Some(batch_container) = batch_builder.build(&mut merged_batches) {
self.alpha_batch_containers.push(batch_container);
}
batch_builder.build(
&mut self.alpha_batch_containers,
&mut merged_batches,
);
}
_ => {
unreachable!();
@ -444,7 +436,9 @@ impl RenderTarget for ColorRenderTarget {
}
}
self.alpha_batch_containers.push(merged_batches);
if !merged_batches.is_empty() {
self.alpha_batch_containers.push(merged_batches);
}
}
fn add_task(
@ -552,7 +546,9 @@ impl RenderTarget for ColorRenderTarget {
}
fn must_be_drawn(&self) -> bool {
!self.tile_blits.is_empty()
self.alpha_batch_containers.iter().any(|ab| {
!ab.tile_blits.is_empty()
})
}
fn needs_depth(&self) -> bool {