Backed out changeset 7443389d0205 (bug 1698711) for causing wrench failures. CLOSED TREE

This commit is contained in:
Csoregi Natalia 2021-03-16 20:10:35 +02:00
Родитель 30507b25b5
Коммит 958b1b7b30
6 изменённых файлов: 380 добавлений и 40 удалений

Просмотреть файл

@ -184,10 +184,6 @@ void RenderCompositorSWGL::CommitMappedBuffer(bool aDirty) {
mDirtyRegion.SetEmpty();
return;
}
// Force any delayed clears to resolve.
if (aDirty) {
wr_swgl_resolve_framebuffer(mContext, 0);
}
// Clear out the old framebuffer in case something tries to access it after
// the frame.
wr_swgl_init_default_framebuffer(mContext, 0, 0, 0, 0, 0, nullptr);

Просмотреть файл

@ -1500,22 +1500,27 @@ pub extern "C" fn wr_window_new(
) -> bool {
assert!(unsafe { is_in_render_thread() });
let native_gl = if gl_context == ptr::null_mut() {
None
} else if unsafe { is_glcontext_gles(gl_context) } {
unsafe { Some(gl::GlesFns::load_with(|symbol| get_proc_address(gl_context, symbol))) }
} else {
unsafe { Some(gl::GlFns::load_with(|symbol| get_proc_address(gl_context, symbol))) }
};
let software = swgl_context != ptr::null_mut();
let (gl, sw_gl) = if software {
let ctx = swgl::Context::from(swgl_context);
ctx.make_current();
(Rc::new(ctx) as Rc<dyn gl::Gl>, Some(ctx))
} else {
let gl = unsafe {
if gl_context == ptr::null_mut() {
panic!("Native GL context required when not using SWGL!");
} else if is_glcontext_gles(gl_context) {
gl::GlesFns::load_with(|symbol| get_proc_address(gl_context, symbol))
} else {
gl::GlFns::load_with(|symbol| get_proc_address(gl_context, symbol))
}
};
(gl, None)
(
native_gl
.as_ref()
.expect("Native GL context required when not using SWGL!")
.clone(),
None,
)
};
let version = gl.get_string(gl::VERSION);
@ -1560,6 +1565,7 @@ pub extern "C" fn wr_window_new(
max_update_rects: 1,
compositor: Box::new(SwCompositor::new(
sw_gl.unwrap(),
native_gl,
Box::new(WrCompositor(compositor)),
use_native_compositor,
)),

Просмотреть файл

@ -39,11 +39,6 @@ pub extern "C" fn wr_swgl_init_default_framebuffer(
swgl::Context::from(ctx).init_default_framebuffer(x, y, width, height, stride, buf);
}
#[no_mangle]
pub extern "C" fn wr_swgl_resolve_framebuffer(ctx: *mut c_void, fbo: u32) {
swgl::Context::from(ctx).resolve_framebuffer(fbo);
}
#[no_mangle]
pub extern "C" fn wr_swgl_gen_texture(ctx: *mut c_void) -> u32 {
swgl::Context::from(ctx).gen_textures(1)[0]

Просмотреть файл

@ -2247,13 +2247,21 @@ void InitDefaultFramebuffer(int x, int y, int width, int height, int stride,
depthtex.offset = IntPoint(x, y);
}
void ResolveFramebuffer(GLuint fbo) {
void* GetColorBuffer(GLuint fbo, GLboolean flush, int32_t* width,
int32_t* height, int32_t* stride) {
Framebuffer* fb = ctx->framebuffers.find(fbo);
if (!fb || !fb->color_attachment) {
return;
return nullptr;
}
Texture& colortex = ctx->textures[fb->color_attachment];
prepare_texture(colortex);
if (flush) {
prepare_texture(colortex);
}
assert(colortex.offset == IntPoint(0, 0));
*width = colortex.width;
*height = colortex.height;
*stride = colortex.stride();
return colortex.buf ? colortex.sample_ptr(0, 0) : nullptr;
}
void SetTextureBuffer(GLuint texid, GLenum internal_format, GLsizei width,

Просмотреть файл

@ -235,7 +235,13 @@ extern "C" {
stride: i32,
buf: *mut c_void,
);
fn ResolveFramebuffer(fbo: GLuint);
fn GetColorBuffer(
fbo: GLuint,
flush: GLboolean,
width: *mut i32,
height: *mut i32,
stride: *mut i32,
) -> *mut c_void;
fn SetTextureBuffer(
tex: GLuint,
internal_format: GLenum,
@ -351,9 +357,19 @@ impl Context {
}
}
pub fn resolve_framebuffer(&self, fbo: GLuint) {
pub fn get_color_buffer(&self, fbo: GLuint, flush: bool) -> (*mut c_void, i32, i32, i32) {
unsafe {
ResolveFramebuffer(fbo);
let mut width: i32 = 0;
let mut height: i32 = 0;
let mut stride: i32 = 0;
let data_ptr = GetColorBuffer(
fbo,
flush as GLboolean,
&mut width,
&mut height,
&mut stride,
);
(data_ptr, width, height, stride)
}
}

Просмотреть файл

@ -2,11 +2,13 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use gleam::{gl, gl::Gl};
use gleam::{gl, gl::GLenum, gl::Gl};
use std::cell::{Cell, UnsafeCell};
use std::collections::{hash_map::HashMap, VecDeque};
use std::ops::{Deref, DerefMut, Range};
use std::os::raw::c_void;
use std::ptr;
use std::rc::Rc;
use std::sync::atomic::{AtomicIsize, AtomicPtr, AtomicU32, AtomicU8, Ordering};
use std::sync::{Arc, Condvar, Mutex, MutexGuard};
use std::thread;
@ -21,6 +23,8 @@ pub struct SwTile {
y: i32,
fbo_id: u32,
color_id: u32,
tex_id: u32,
pbo_id: u32,
dirty_rect: DeviceIntRect,
valid_rect: DeviceIntRect,
/// Composition of tiles must be ordered such that any tiles that may overlap
@ -43,6 +47,8 @@ impl SwTile {
y,
fbo_id: 0,
color_id: 0,
tex_id: 0,
pbo_id: 0,
dirty_rect: DeviceIntRect::zero(),
valid_rect: DeviceIntRect::zero(),
overlaps: Cell::new(0),
@ -171,6 +177,172 @@ fn image_rendering_to_gl_filter(filter: ImageRendering) -> gl::GLenum {
}
}
struct DrawTileHelper {
gl: Rc<dyn gl::Gl>,
prog: u32,
quad_vbo: u32,
quad_vao: u32,
dest_matrix_loc: i32,
tex_matrix_loc: i32,
}
impl DrawTileHelper {
fn new(gl: Rc<dyn gl::Gl>) -> Self {
let quad_vbo = gl.gen_buffers(1)[0];
gl.bind_buffer(gl::ARRAY_BUFFER, quad_vbo);
let quad_data: [f32; 8] = [0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0];
gl::buffer_data(&*gl, gl::ARRAY_BUFFER, &quad_data, gl::STATIC_DRAW);
let quad_vao = gl.gen_vertex_arrays(1)[0];
gl.bind_vertex_array(quad_vao);
gl.enable_vertex_attrib_array(0);
gl.vertex_attrib_pointer(0, 2, gl::FLOAT, false, 0, 0);
gl.bind_vertex_array(0);
let version = match gl.get_type() {
gl::GlType::Gl => "#version 150",
gl::GlType::Gles => "#version 300 es",
};
let vert_source = "
in vec2 aVert;
uniform mat3 uDestMatrix;
uniform mat3 uTexMatrix;
out vec2 vTexCoord;
void main(void) {
gl_Position = vec4((uDestMatrix * vec3(aVert, 1.0)).xy, 0.0, 1.0);
vTexCoord = (uTexMatrix * vec3(aVert, 1.0)).xy;
}
";
let vs = gl.create_shader(gl::VERTEX_SHADER);
gl.shader_source(vs, &[version.as_bytes(), vert_source.as_bytes()]);
gl.compile_shader(vs);
let frag_source = "
#ifdef GL_ES
#ifdef GL_FRAGMENT_PRECISION_HIGH
precision highp float;
#else
precision mediump float;
#endif
#endif
in vec2 vTexCoord;
out vec4 oFragColor;
uniform sampler2D uTex;
void main(void) {
oFragColor = texture(uTex, vTexCoord);
}
";
let fs = gl.create_shader(gl::FRAGMENT_SHADER);
gl.shader_source(fs, &[version.as_bytes(), frag_source.as_bytes()]);
gl.compile_shader(fs);
let prog = gl.create_program();
gl.attach_shader(prog, vs);
gl.attach_shader(prog, fs);
gl.bind_attrib_location(prog, 0, "aVert");
gl.link_program(prog);
let mut status = [0];
unsafe {
gl.get_program_iv(prog, gl::LINK_STATUS, &mut status);
}
assert!(status[0] != 0);
//println!("vert: {}", gl.get_shader_info_log(vs));
//println!("frag: {}", gl.get_shader_info_log(fs));
//println!("status: {}, {}", status[0], gl.get_program_info_log(prog));
gl.use_program(prog);
let dest_matrix_loc = gl.get_uniform_location(prog, "uDestMatrix");
assert!(dest_matrix_loc != -1);
let tex_matrix_loc = gl.get_uniform_location(prog, "uTexMatrix");
assert!(tex_matrix_loc != -1);
let tex_loc = gl.get_uniform_location(prog, "uTex");
assert!(tex_loc != -1);
gl.uniform_1i(tex_loc, 0);
gl.use_program(0);
gl.delete_shader(vs);
gl.delete_shader(fs);
DrawTileHelper {
gl,
prog,
quad_vao,
quad_vbo,
dest_matrix_loc,
tex_matrix_loc,
}
}
fn deinit(&self) {
self.gl.delete_program(self.prog);
self.gl.delete_vertex_arrays(&[self.quad_vao]);
self.gl.delete_buffers(&[self.quad_vbo]);
}
fn enable(&self, viewport: &DeviceIntRect) {
self.gl.viewport(
viewport.origin.x,
viewport.origin.y,
viewport.size.width,
viewport.size.height,
);
self.gl.bind_vertex_array(self.quad_vao);
self.gl.use_program(self.prog);
self.gl.active_texture(gl::TEXTURE0);
}
fn draw(
&self,
viewport: &DeviceIntRect,
dest: &DeviceIntRect,
src: &DeviceIntRect,
_clip: &DeviceIntRect,
surface: &SwSurface,
tile: &SwTile,
flip_y: bool,
filter: GLenum,
) {
let dx = dest.origin.x as f32 / viewport.size.width as f32;
let dy = dest.origin.y as f32 / viewport.size.height as f32;
let dw = dest.size.width as f32 / viewport.size.width as f32;
let dh = dest.size.height as f32 / viewport.size.height as f32;
self.gl.uniform_matrix_3fv(
self.dest_matrix_loc,
false,
&[
2.0 * dw,
0.0,
0.0,
0.0,
if flip_y { 2.0 * dh } else { -2.0 * dh },
0.0,
-1.0 + 2.0 * dx,
if flip_y { -1.0 + 2.0 * dy } else { 1.0 - 2.0 * dy },
1.0,
],
);
let sx = src.origin.x as f32 / surface.tile_size.width as f32;
let sy = src.origin.y as f32 / surface.tile_size.height as f32;
let sw = src.size.width as f32 / surface.tile_size.width as f32;
let sh = src.size.height as f32 / surface.tile_size.height as f32;
self.gl
.uniform_matrix_3fv(self.tex_matrix_loc, false, &[sw, 0.0, 0.0, 0.0, sh, 0.0, sx, sy, 1.0]);
self.gl.bind_texture(gl::TEXTURE_2D, tile.tex_id);
self.gl
.tex_parameter_i(gl::TEXTURE_2D, gl::TEXTURE_MIN_FILTER, filter as gl::GLint);
self.gl
.tex_parameter_i(gl::TEXTURE_2D, gl::TEXTURE_MAG_FILTER, filter as gl::GLint);
self.gl.draw_arrays(gl::TRIANGLE_STRIP, 0, 4);
}
fn disable(&self) {
self.gl.use_program(0);
self.gl.bind_vertex_array(0);
}
}
/// A source for a composite job which can either be a single BGRA locked SWGL
/// resource or a collection of SWGL resources representing a YUV surface.
#[derive(Clone)]
@ -693,6 +865,7 @@ type FrameSurface = (
/// WebRender and the RenderCompositr via the Compositor API.
pub struct SwCompositor {
gl: swgl::Context,
native_gl: Option<Rc<dyn gl::Gl>>,
compositor: Box<dyn MappableCompositor>,
use_native_compositor: bool,
surfaces: HashMap<NativeSurfaceId, SwSurface>,
@ -702,6 +875,7 @@ pub struct SwCompositor {
/// store them in a separate queue that gets processed later.
late_surfaces: Vec<FrameSurface>,
cur_tile: NativeTileId,
draw_tile: Option<DrawTileHelper>,
/// The maximum tile size required for any of the allocated surfaces.
max_tile_size: DeviceIntSize,
/// Reuse the same depth texture amongst all tiles in all surfaces.
@ -710,7 +884,7 @@ pub struct SwCompositor {
/// to ensure that this depth texture is at least that big.
depth_id: u32,
/// Instance of the SwComposite thread, only created if we are not relying
/// on a native RenderCompositor.
/// on OpenGL compositing or a native RenderCompositor.
composite_thread: Option<Arc<SwCompositeThread>>,
/// SWGL locked resource for sharing framebuffer with SwComposite thread
locked_framebuffer: Option<swgl::LockedResource>,
@ -719,14 +893,16 @@ pub struct SwCompositor {
impl SwCompositor {
pub fn new(
gl: swgl::Context,
native_gl: Option<Rc<dyn gl::Gl>>,
compositor: Box<dyn MappableCompositor>,
use_native_compositor: bool,
) -> Self {
let depth_id = gl.gen_textures(1)[0];
// Only create the SwComposite thread if we're not using a native render
// compositor. Thus, we are compositing into the main software framebuffer,
// which benefits from compositing asynchronously while updating tiles.
let composite_thread = if !use_native_compositor {
// Only create the SwComposite thread if we're neither using OpenGL composition nor a native
// render compositor. Thus, we are compositing into the main software framebuffer, which in
// that case benefits from compositing asynchronously while we are updating tiles.
assert!(native_gl.is_none() || !use_native_compositor);
let composite_thread = if native_gl.is_none() && !use_native_compositor {
Some(SwCompositeThread::new())
} else {
None
@ -743,6 +919,8 @@ impl SwCompositor {
x: 0,
y: 0,
},
draw_tile: native_gl.as_ref().map(|gl| DrawTileHelper::new(gl.clone())),
native_gl,
max_tile_size: DeviceIntSize::zero(),
depth_id,
composite_thread,
@ -750,9 +928,20 @@ impl SwCompositor {
}
}
fn deinit_shader(&mut self) {
if let Some(draw_tile) = &self.draw_tile {
draw_tile.deinit();
}
self.draw_tile = None;
}
fn deinit_tile(&self, tile: &SwTile) {
self.gl.delete_framebuffers(&[tile.fbo_id]);
self.gl.delete_textures(&[tile.color_id]);
if let Some(native_gl) = &self.native_gl {
native_gl.delete_textures(&[tile.tex_id]);
native_gl.delete_buffers(&[tile.pbo_id]);
}
}
fn deinit_surface(&self, surface: &SwSurface) {
@ -1162,6 +1351,8 @@ impl Compositor for SwCompositor {
self.gl.delete_textures(&[self.depth_id]);
self.deinit_shader();
if self.use_native_compositor {
self.compositor.deinit();
}
@ -1192,6 +1383,37 @@ impl Compositor for SwCompositor {
);
self.gl.bind_framebuffer(gl::DRAW_FRAMEBUFFER, 0);
if let Some(native_gl) = &self.native_gl {
tile.tex_id = native_gl.gen_textures(1)[0];
native_gl.bind_texture(gl::TEXTURE_2D, tile.tex_id);
native_gl.tex_image_2d(
gl::TEXTURE_2D,
0,
gl::RGBA8 as gl::GLint,
surface.tile_size.width,
surface.tile_size.height,
0,
gl::RGBA,
gl::UNSIGNED_BYTE,
None,
);
native_gl.tex_parameter_i(gl::TEXTURE_2D, gl::TEXTURE_MIN_FILTER, gl::LINEAR as gl::GLint);
native_gl.tex_parameter_i(gl::TEXTURE_2D, gl::TEXTURE_MAG_FILTER, gl::LINEAR as gl::GLint);
native_gl.tex_parameter_i(gl::TEXTURE_2D, gl::TEXTURE_WRAP_S, gl::CLAMP_TO_EDGE as gl::GLint);
native_gl.tex_parameter_i(gl::TEXTURE_2D, gl::TEXTURE_WRAP_T, gl::CLAMP_TO_EDGE as gl::GLint);
native_gl.bind_texture(gl::TEXTURE_2D, 0);
tile.pbo_id = native_gl.gen_buffers(1)[0];
native_gl.bind_buffer(gl::PIXEL_UNPACK_BUFFER, tile.pbo_id);
native_gl.buffer_data_untyped(
gl::PIXEL_UNPACK_BUFFER,
surface.tile_size.area() as isize * 4,
ptr::null(),
gl::DYNAMIC_DRAW,
);
native_gl.bind_buffer(gl::PIXEL_UNPACK_BUFFER, 0);
}
surface.tiles.push(tile);
}
}
@ -1258,6 +1480,23 @@ impl Compositor for SwCompositor {
stride = tile_info.stride;
buf = tile_info.data;
}
} else if let Some(native_gl) = &self.native_gl {
if tile.pbo_id != 0 {
native_gl.bind_buffer(gl::PIXEL_UNPACK_BUFFER, tile.pbo_id);
buf = native_gl.map_buffer_range(
gl::PIXEL_UNPACK_BUFFER,
0,
valid_rect.size.area() as isize * 4,
gl::MAP_WRITE_BIT | gl::MAP_INVALIDATE_BUFFER_BIT,
); // | gl::MAP_UNSYNCHRONIZED_BIT);
if buf != ptr::null_mut() {
stride = valid_rect.size.width * 4;
} else {
native_gl.bind_buffer(gl::PIXEL_UNPACK_BUFFER, 0);
native_gl.delete_buffers(&[tile.pbo_id]);
tile.pbo_id = 0;
}
}
}
self.gl.set_texture_buffer(
tile.color_id,
@ -1301,23 +1540,64 @@ impl Compositor for SwCompositor {
if let Some(surface) = self.surfaces.get(&id.surface_id) {
if let Some(tile) = surface.tiles.iter().find(|t| t.x == id.x && t.y == id.y) {
if tile.valid_rect.is_empty() {
// If we didn't actually render anything, then just queue any
// dependencies.
self.flush_composites(&id, surface, tile);
return;
}
// Force any delayed clears to be resolved.
self.gl.resolve_framebuffer(tile.fbo_id);
// get the color buffer even if we have a self.compositor, to make
// sure that any delayed clears are resolved
let (swbuf, _, _, stride) = self.gl.get_color_buffer(tile.fbo_id, true);
if self.use_native_compositor {
self.compositor.unmap_tile();
} else {
// If we're not relying on a native compositor, then composite
// any tiles that are dependent on this tile being updated but
// are otherwise ready to composite.
self.flush_composites(&id, surface, tile);
return;
}
let native_gl = match &self.native_gl {
Some(native_gl) => native_gl,
None => {
// If we're not relying on a native compositor or OpenGL compositing,
// then composite any tiles that are dependent on this tile being
// updated but are otherwise ready to composite.
self.flush_composites(&id, surface, tile);
return;
}
};
assert!(stride % 4 == 0);
let buf = if tile.pbo_id != 0 {
native_gl.unmap_buffer(gl::PIXEL_UNPACK_BUFFER);
std::ptr::null_mut::<c_void>()
} else {
swbuf
};
let dirty = tile.dirty_rect;
let src = unsafe {
(buf as *mut u32).offset(
(dirty.origin.y - tile.valid_rect.origin.y) as isize * (stride / 4) as isize
+ (dirty.origin.x - tile.valid_rect.origin.x) as isize,
)
};
native_gl.active_texture(gl::TEXTURE0);
native_gl.bind_texture(gl::TEXTURE_2D, tile.tex_id);
native_gl.pixel_store_i(gl::UNPACK_ROW_LENGTH, stride / 4);
native_gl.tex_sub_image_2d_pbo(
gl::TEXTURE_2D,
0,
dirty.origin.x,
dirty.origin.y,
dirty.size.width,
dirty.size.height,
gl::BGRA,
gl::UNSIGNED_BYTE,
src as _,
);
native_gl.pixel_store_i(gl::UNPACK_ROW_LENGTH, 0);
if tile.pbo_id != 0 {
native_gl.bind_buffer(gl::PIXEL_UNPACK_BUFFER, 0);
}
native_gl.bind_texture(gl::TEXTURE_2D, 0);
}
}
}
@ -1435,6 +1715,45 @@ impl Compositor for SwCompositor {
fn end_frame(&mut self) {
if self.use_native_compositor {
self.compositor.end_frame();
} else if let Some(native_gl) = &self.native_gl {
let (_, fw, fh, _) = self.gl.get_color_buffer(0, false);
let viewport = DeviceIntRect::from_size(DeviceIntSize::new(fw, fh));
let draw_tile = self.draw_tile.as_ref().unwrap();
draw_tile.enable(&viewport);
let mut blend = false;
native_gl.blend_func(gl::ONE, gl::ONE_MINUS_SRC_ALPHA);
for &(ref id, ref transform, ref clip_rect, filter) in &self.frame_surfaces {
if let Some(surface) = self.surfaces.get(id) {
if surface.is_opaque {
if blend {
native_gl.disable(gl::BLEND);
blend = false;
}
} else if !blend {
native_gl.enable(gl::BLEND);
blend = true;
}
for tile in &surface.tiles {
if let Some((src_rect, dst_rect, flip_y)) = tile.composite_rects(surface, transform, clip_rect)
{
draw_tile.draw(
&viewport,
&dst_rect,
&src_rect,
clip_rect,
surface,
tile,
flip_y,
image_rendering_to_gl_filter(filter),
);
}
}
}
}
if blend {
native_gl.disable(gl::BLEND);
}
draw_tile.disable();
} else if let Some(ref composite_thread) = self.composite_thread {
// Need to wait for the SwComposite thread to finish any queued jobs.
composite_thread.wait_for_composites(false);